diff --git a/go.mod b/go.mod index 28204870737..944dffcbfab 100644 --- a/go.mod +++ b/go.mod @@ -4,22 +4,22 @@ go 1.22.1 require ( cloud.google.com/go/compute/metadata v0.2.3 - github.com/aws/aws-sdk-go-v2 v1.25.2 - github.com/aws/aws-sdk-go-v2/config v1.27.6 - github.com/aws/aws-sdk-go-v2/credentials v1.17.6 - github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.8 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.30.3 - github.com/aws/smithy-go v1.20.1 + github.com/aws/aws-sdk-go-v2 v1.26.1 + github.com/aws/aws-sdk-go-v2/config v1.27.10 + github.com/aws/aws-sdk-go-v2/credentials v1.17.10 + github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.13 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1 + github.com/aws/smithy-go v1.20.2 github.com/blang/semver v3.5.1+incompatible github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 github.com/cenkalti/backoff v2.2.1+incompatible github.com/containers/image/v5 v5.30.0 github.com/coreos/go-systemd/v22 v22.5.0 github.com/evanphx/json-patch v5.6.0+incompatible - github.com/go-git/go-git/v5 v5.11.0 - github.com/go-openapi/runtime v0.27.2 - github.com/go-openapi/strfmt v0.22.2 + github.com/go-git/go-git/v5 v5.12.0 + github.com/go-openapi/runtime v0.28.0 + github.com/go-openapi/strfmt v0.23.0 github.com/gocql/gocql v1.6.0 github.com/godbus/dbus/v5 v5.1.0 github.com/google/go-cmp v0.6.0 @@ -27,13 +27,13 @@ require ( github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed github.com/magiconair/properties v1.8.7 github.com/mitchellh/mapstructure v1.5.0 - github.com/onsi/ginkgo/v2 v2.16.0 - github.com/onsi/gomega v1.31.1 + github.com/onsi/ginkgo/v2 v2.17.1 + github.com/onsi/gomega v1.32.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.0 github.com/scylladb/go-set v1.0.2 github.com/scylladb/gocqlx/v2 v2.8.0 - github.com/scylladb/scylla-manager/v3 v3.2.6 + github.com/scylladb/scylla-manager/v3 v3.2.7 github.com/scylladb/scylladb-swagger-go-client v0.2.0 github.com/shurcooL/githubv4 v0.0.0-20240120211514-18a1ae0e79dc github.com/spf13/cobra v1.8.0 @@ -43,53 +43,65 @@ require ( go.uber.org/config v1.4.0 golang.org/x/oauth2 v0.18.0 golang.org/x/sys v0.18.0 - google.golang.org/grpc v1.62.1 + google.golang.org/grpc v1.63.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.29.2 - k8s.io/apiextensions-apiserver v0.29.2 - k8s.io/apimachinery v0.29.2 - k8s.io/apiserver v0.29.2 - k8s.io/cli-runtime v0.29.2 - k8s.io/client-go v0.29.2 - k8s.io/code-generator v0.29.2 - k8s.io/component-base v0.29.2 - k8s.io/component-helpers v0.29.2 - k8s.io/cri-api v0.29.2 + k8s.io/api v0.29.3 + k8s.io/apiextensions-apiserver v0.29.3 + k8s.io/apimachinery v0.29.3 + k8s.io/apiserver v0.29.3 + k8s.io/cli-runtime v0.29.3 + k8s.io/client-go v0.29.3 + k8s.io/code-generator v0.29.3 + k8s.io/component-base v0.29.3 + k8s.io/component-helpers v0.29.3 + k8s.io/cri-api v0.29.3 k8s.io/klog/v2 v2.120.1 - k8s.io/kubectl v0.29.2 - k8s.io/kubelet v0.29.2 - k8s.io/utils v0.0.0-20240102154912-e7106e64919e + k8s.io/kubectl v0.29.3 + k8s.io/kubelet v0.29.3 + k8s.io/utils v0.0.0-20240310230437-4693a0247e57 sigs.k8s.io/controller-tools v0.11.3 sigs.k8s.io/yaml v1.4.0 ) require ( - cloud.google.com/go/compute v1.25.0 // indirect + cloud.google.com/go/compute v1.25.1 // indirect dario.cat/mergo v1.0.0 // indirect + github.com/Azure/azure-pipeline-go v0.2.3 // indirect + github.com/Azure/azure-storage-blob-go v0.15.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/ProtonMail/go-crypto v1.0.0 // indirect + github.com/Unknwon/goconfig v1.0.0 // indirect + github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 // indirect + github.com/abbot/go-http-auth v0.4.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.50.14 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 // indirect + github.com/aws/aws-sdk-go v1.51.14 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.3 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.20.4 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/containers/storage v1.53.0 // indirect + github.com/coreos/go-semver v0.3.1 // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.3 // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/emirpasic/gods v1.18.1 // indirect github.com/fatih/color v1.14.1 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect @@ -98,27 +110,29 @@ require ( github.com/go-git/go-billy/v5 v5.5.0 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.22.3 // indirect - github.com/go-openapi/errors v0.21.1 // indirect - github.com/go-openapi/jsonpointer v0.20.3 // indirect - github.com/go-openapi/jsonreference v0.20.5 // indirect - github.com/go-openapi/loads v0.21.6 // indirect - github.com/go-openapi/spec v0.20.15 // indirect - github.com/go-openapi/swag v0.22.10 // indirect - github.com/go-openapi/validate v0.23.2 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gobuffalo/flect v1.0.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/btree v1.1.2 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 // indirect + github.com/google/pprof v0.0.0-20240402174815-29b9bb013b0f // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -130,13 +144,16 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lnquy/cron v1.1.1 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-ieproxy v0.0.11 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/term v0.5.0 // indirect @@ -145,40 +162,52 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/ncw/swift v1.0.53 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect - github.com/prometheus/client_model v0.6.0 // indirect - github.com/prometheus/common v0.50.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.52.2 // indirect github.com/prometheus/procfs v0.13.0 // indirect + github.com/rclone/rclone v1.66.0 // indirect + github.com/rfjakob/eme v1.1.2 // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/scylladb/go-log v0.0.7 // indirect github.com/scylladb/go-reflectx v1.0.1 // indirect github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4 // indirect - github.com/sergi/go-diff v1.3.1 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/shurcooL/graphql v0.0.0-20230722043721-ed46e5a46466 // indirect - github.com/skeema/knownhosts v1.2.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect + github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect + github.com/smartystreets/goconvey v1.8.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect + go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - go.starlark.net v0.0.0-20240123142251-f86470692795 // indirect + go.starlark.net v0.0.0-20240329153429-e6e8e7ce1b7a // indirect go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.21.0 // indirect golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect golang.org/x/mod v0.16.0 // indirect - golang.org/x/net v0.22.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/term v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.19.0 // indirect + google.golang.org/api v0.172.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/evanphx/json-patch.v5 v5.9.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -186,11 +215,15 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect + k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.16.0 // indirect sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) -replace github.com/gocql/gocql => github.com/scylladb/gocql v1.12.0 +replace ( + github.com/gocql/gocql => github.com/scylladb/gocql v1.13.0 + github.com/rclone/rclone => github.com/scylladb/rclone v1.54.1-0.20240312172628-afe1fd2aa65e + google.golang.org/api => github.com/scylladb/google-api-go-client v0.34.1-patched +) diff --git a/go.sum b/go.sum index 046415325af..f2778098da0 100644 --- a/go.sum +++ b/go.sum @@ -1,69 +1,676 @@ -cloud.google.com/go/compute v1.25.0 h1:H1/4SqSUhjPFE7L5ddzHOfY2bCAvjwNRZPNl6Ni5oYU= -cloud.google.com/go/compute v1.25.0/go.mod h1:GR7F0ZPZH8EhChlMo9FkLd7eUTwEymjqQagxzilIxIE= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k= +cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A= +cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI= +cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg= +cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q= +cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute v1.25.1 h1:ZRpHJedLtTpKgr3RV1Fx23NuaAEN1Zfx9hw1u4aJdjU= +cloud.google.com/go/compute v1.25.1/go.mod h1:oopOIR53ly6viBYxaDhBfJwzUAxf1zE//uf3IB011ls= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM= +cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA= +cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo= +cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24= +cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo= +cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= -github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= +github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= +github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/Unknwon/goconfig v0.0.0-20200908083735-df7de6a44db8 h1:1TrMV1HmBApBbM+Hy7RCKZD6UlYWYIPPfoeXomG7+zE= -github.com/Unknwon/goconfig v0.0.0-20200908083735-df7de6a44db8/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= +github.com/Unknwon/goconfig v1.0.0 h1:9IAu/BYbSLQi8puFjUQApZTxIHqSwrj5d8vpP8vTq4A= +github.com/Unknwon/goconfig v1.0.0/go.mod h1:wngxua9XCNjvHjDiTiV26DaKDT+0c63QR6H5hjVUUxw= +github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 h1:hhdWprfSpFbN7lz3W1gM40vOgvSh1WCSMxYD6gGB4Hs= +github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3/go.mod h1:XaUnRxSCYgL3kkgX0QHIV0D+znljPIDImxlv2kbGv0Y= github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0= github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/aws/aws-sdk-go v1.50.14 h1:m1bxKtd1lJpNnl+Owah0+UPRuS9f3GFvxBPgc8RiodE= -github.com/aws/aws-sdk-go v1.50.14/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.25.2 h1:/uiG1avJRgLGiQM9X3qJM8+Qa6KRGK5rRPuXE0HUM+w= -github.com/aws/aws-sdk-go-v2 v1.25.2/go.mod h1:Evoc5AsmtveRt1komDwIsjHFyrP5tDuF1D1U+6z6pNo= -github.com/aws/aws-sdk-go-v2/config v1.27.6 h1:WmoH1aPrxwcqAZTTnETjKr+fuvqzKd4hRrKxQUiuKP4= -github.com/aws/aws-sdk-go-v2/config v1.27.6/go.mod h1:W9RZFF2pL+OhnUSZsQS/eDMWD8v+R+yWgjj3nSlrXVU= -github.com/aws/aws-sdk-go-v2/credentials v1.17.6 h1:akhj/nSC6SEx3OmiYGG/7mAyXMem9ZNVVf+DXkikcTk= -github.com/aws/aws-sdk-go-v2/credentials v1.17.6/go.mod h1:chJZuJ7TkW4kiMwmldOJOEueBoSkUb4ynZS1d9dhygo= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.8 h1:WcK5HEs8ktLHy5uQbJ8MjVqMnE39uQ0H6fGj2eemL8E= -github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.8/go.mod h1:ZB0LL+WO1hmIlHCMQ4PIs7+QyJI/4SUbo+6ljwWsb0U= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 h1:AK0J8iYBFeUk2Ax7O8YpLtFsfhdOByh2QIkHmigpRYk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2/go.mod h1:iRlGzMix0SExQEviAyptRWRGdYNo3+ufW/lCzvKVTUc= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 h1:bNo4LagzUKbjdxE0tIcR9pMzLR2U/Tgie1Hq1HQ3iH8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2/go.mod h1:wRQv0nN6v9wDXuWThpovGQjqF1HFdcgWjporw14lS8k= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 h1:EtOU5jsPdIQNP+6Q2C5e3d65NKT1PeCiQk+9OdzO12Q= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2/go.mod h1:tyF5sKccmDz0Bv4NrstEr+/9YkSPJHrcO7UsUKf7pWM= +github.com/aws/aws-sdk-go v1.51.14 h1:qedX6zZEO1a+5kra+D4ythOYR3TgaROC0hTPxhTFh8I= +github.com/aws/aws-sdk-go v1.51.14/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.26.1 h1:5554eUqIYVWpU0YmeeYZ0wU64H2VLBs8TlhRB2L+EkA= +github.com/aws/aws-sdk-go-v2 v1.26.1/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= +github.com/aws/aws-sdk-go-v2/config v1.27.10 h1:PS+65jThT0T/snC5WjyfHHyUgG+eBoupSDV+f838cro= +github.com/aws/aws-sdk-go-v2/config v1.27.10/go.mod h1:BePM7Vo4OBpHreKRUMuDXX+/+JWP38FLkzl5m27/Jjs= +github.com/aws/aws-sdk-go-v2/credentials v1.17.10 h1:qDZ3EA2lv1KangvQB6y258OssCHD0xvaGiEDkG4X/10= +github.com/aws/aws-sdk-go-v2/credentials v1.17.10/go.mod h1:6t3sucOaYDwDssHQa0ojH1RpmVmF5/jArkye1b2FKMI= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.13 h1:loQ4VSt3hTm9n8ST9jveArwmhqAc5aiRJXlxLPxCNTw= +github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.13/go.mod h1:RjdeQvzJuUf9jWj+ta+7l3VnVpDZ+RmtP/p+QdwRIpI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 h1:FVJ0r5XTHSmIHJV6KuDmdYhEpvlHpiSd38RQWhut5J4= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1/go.mod h1:zusuAeqezXzAB24LGuzuekqMAEgWkVYukBec3kr3jUg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 h1:aw39xVGeRWlWx9EzGVnhOR4yOjQDHPQ6o6NmBlscyQg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5/go.mod h1:FSaRudD0dXiMPK2UjknVwwTYyZMRsHv3TtkabsZih5I= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 h1:PG1F3OD1szkuQPzDw3CIQsRIrtTlUC3lP84taWzHlq0= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5/go.mod h1:jU1li6RFryMz+so64PpKtudI+QzbKoIEivqdf6LNpOc= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.30.3 h1:redziOZeT6YVgJfTS3c/dIG0KDbT+x4eAsAKuCHro+s= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.30.3/go.mod h1:BzzW6QegtSMnC1BhD+lagiUDSRYjRTOhXAb1mLfEaMg= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.1 h1:kZR1TZ0VYcRK2LFiFt61EReplssCq9SZO4gVSYV1Aww= -github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.1/go.mod h1:ifHRXsCyLVIdvDaAScQnM7jtsXtoBZFmyZiLMex8FTA= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 h1:EyBZibRTVAs6ECHZOw5/wlylS9OcTzwyjeQMudmREjE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1/go.mod h1:JKpmtYhhPs7D97NL/ltqz7yCkERFW5dOlHyVl66ZYF8= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.3 h1:/MpYoYvgshlGMFmSyfzGWf6HKoEo/DrKBoHxXR3vh+U= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.3/go.mod h1:1Pf5vPqk8t9pdYB3dmUMRE/0m8u0IHHg8ESSiutJd0I= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.4 h1:jRiWxyuVO8PlkN72wDMVn/haVH4SDCBkUt0Lf/dxd7s= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.4/go.mod h1:Ru7vg1iQ7cR4i7SZ/JTLYN9kaXtbL69UdgG0OQWQxW0= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 h1:utEGkfdQ4L6YW/ietH7111ZYglLJvS+sLriHJ1NBJEQ= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.1/go.mod h1:RsYqzYr2F2oPDdpy+PdhephuZxTfjHQe7SOBcZGoAU8= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 h1:9/GylMS45hGGFCcMrUZDVayQE1jYSIN6da9jo7RAYIw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1/go.mod h1:YjAPFn4kGFqKC54VsHs5fn5B6d+PCY2tziEa3U/GB5Y= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.3 h1:TkiFkSVX990ryWIMBCT4kPqZEgThQe1xPU/AQXavtvU= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.3/go.mod h1:xYNauIUqSuvzlPVb3VB5no/n48YGhmlInD3Uh0Co8Zc= -github.com/aws/smithy-go v1.20.1 h1:4SZlSlMr36UEqC7XOyRVb27XMeZubNcBNN+9IgEPIQw= -github.com/aws/smithy-go v1.20.1/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1 h1:dZXY07Dm59TxAjJcUfNMJHLDI/gLMxTRZefn2jFAVsw= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1/go.mod h1:lVLqEtX+ezgtfalyJs7Peb0uv9dEpAQP5yuq2O26R44= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4 h1:hSwDD19/e01z3pfyx+hDeX5T/0Sn+ZEnnTO5pVWKWx8= +github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4/go.mod h1:61CuGwE7jYn0g2gl7K3qoT4vCY59ZQEixkPu8PN5IrE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 h1:6tayEze2Y+hiL3kdnEUxSPsP+pJsUfwLSFspFl1ru9Q= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6/go.mod h1:qVNb/9IOVsLCZh0x2lnagrBwQ9fxajUpXS7OZfIsKn0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 h1:ogRAwT1/gxJBcSWDMZlgyFUM962F51A5CRhDLbxLdmo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7/go.mod h1:YCsIZhXfRPLFFCl5xxY+1T9RKzOKjCut+28JSX2DnAk= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.4 h1:WzFol5Cd+yDxPAdnzTA5LmpHYSWinhmSj4rQChV0ee8= +github.com/aws/aws-sdk-go-v2/service/sso v1.20.4/go.mod h1:qGzynb/msuZIE8I75DVRCUXw3o3ZyBmUvMwQ2t/BrGM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 h1:Jux+gDDyi1Lruk+KHF91tK2KCuY61kzoCpvtvJJBtOE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4/go.mod h1:mUYPBhaF2lGiukDEjJX2BLRRKTmoUSitGDUgM4tRxak= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 h1:cwIxeBttqPN3qkaAjcEcsh8NYr8n2HZPkcKgPAi1phU= +github.com/aws/aws-sdk-go-v2/service/sts v1.28.6/go.mod h1:FZf1/nKNEkHdGGJP/cI2MoIMquumuRK6ol3QQJNDxmw= +github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= +github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY= @@ -74,25 +681,60 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded h1:WcPFZzCIqGt/TdFJHsOiX5dIlB/MUzrftltMhpjzfA8= +github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= +github.com/buengese/sgzip v0.1.1 h1:ry+T8l1mlmiWEsDrH/YHZnCVWD2S3im1KLsyO+8ZmTU= +github.com/buengese/sgzip v0.1.1/go.mod h1:i5ZiXGF3fhV7gL1xaRRL1nDnmpNj0X061FQzOS8VMas= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK84APFuMvxqsk3tEIaKH/z4Rpu3g= github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE= +github.com/calebcase/tmpfile v1.0.2 h1:1AGuhKiUu4J6wxz6lxuF6ck3f8G2kaV6KSEny0RGCig= +github.com/calebcase/tmpfile v1.0.2/go.mod h1:iErLeG/iqJr8LaQ/gYRv4GXdqssi3jg4iSzvrA06/lw= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= -github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/colinmarc/hdfs/v2 v2.2.0 h1:4AaIlTq+/sWmeqYhI0dX8bD4YrMQM990tRjm636FkGM= +github.com/colinmarc/hdfs/v2 v2.2.0/go.mod h1:Wss6n3mtaZyRwWaqtSH+6ge01qT0rw9dJJmvoUnIQ/E= github.com/containers/image/v5 v5.30.0 h1:CmHeSwI6W2kTRWnUsxATDFY5TEX4b58gPkaQcEyrLIA= github.com/containers/image/v5 v5.30.0/go.mod h1:gSD8MVOyqBspc0ynLsuiMR9qmt8UQ4jpVImjmK0uXfk= github.com/containers/storage v1.53.0 h1:VSES3C/u1pxjTJIXvLrSmyP7OBtDky04oGu07UvdTEA= github.com/containers/storage v1.53.0/go.mod h1:pujcoOSc+upx15Jirdkebhtd8uJiLwbSd/mYT6zDJK8= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= @@ -100,60 +742,95 @@ github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible h1:DtumzkLk2zZ2SeElEr+VNz+zV7l+BTe509cV4sKPXbM= +github.com/dropbox/dropbox-sdk-go-unofficial v5.6.0+incompatible/go.mod h1:lr+LhMM3F6Y3lW1T9j2U5l7QeuWm87N9+PPXo3yH4qY= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/emicklei/go-restful/v3 v3.11.3 h1:yagOQz/38xJmcNeZJtrUcKjkHRltIaIFXKWeG1SkWGE= -github.com/emicklei/go-restful/v3 v3.11.3/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.1-0.20230524094728-9239064ad72f/go.mod h1:sfYdkwUW4BA3PbKjySwjJy+O4Pu0h62rlqCMHNk+K+Q= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/fatih/set v0.2.1 h1:nn2CaJyknWE/6txyUDGwysr3G5QC6xWB/PtVjPBbeaA= github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= -github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= +github.com/gabriel-vasile/mimetype v1.1.1 h1:qbN9MPuRf3bstHu9zkI9jDWNfH//9+9kHxr9oRBBBOA= +github.com/gabriel-vasile/mimetype v1.1.1/go.mod h1:6CDPel/o/3/s4+bp6kIbsWATq8pmgOisOPG40CJa6To= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= -github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/analysis v0.22.3 h1:JfHesJsGyiNUlcDuuE1wg9QVQmXd6iB/TKCmHz9+P2U= -github.com/go-openapi/analysis v0.22.3/go.mod h1:y2vjQNdRVOg0gG88n7gBGKWm3yjNOlaTVkxBLHaNlts= -github.com/go-openapi/errors v0.21.1 h1:rVisxQPdETctjlYntm0Ek4dKf68nAQocCloCT50vWuI= -github.com/go-openapi/errors v0.21.1/go.mod h1:LyiY9bgc7AVVh6wtVvMYEyoj3KJYNoRw92mmvnMWgj8= -github.com/go-openapi/jsonpointer v0.20.3 h1:jykzYWS/kyGtsHfRt6aV8JTB9pcQAXPIA7qlZ5aRlyk= -github.com/go-openapi/jsonpointer v0.20.3/go.mod h1:c7l0rjoouAuIxCm8v/JWKRgMjDG/+/7UBWsXMrv6PsM= -github.com/go-openapi/jsonreference v0.20.5 h1:hutI+cQI+HbSQaIGSfsBsYI0pHk+CATf8Fk5gCSj0yI= -github.com/go-openapi/jsonreference v0.20.5/go.mod h1:thAqAp31UABtI+FQGKAQfmv7DbFpKNUlva2UPCxKu2Y= -github.com/go-openapi/loads v0.21.6 h1:qo9Ow4mbOe+epbJcFxPSYKVvPgHT+vvZRNC2BRatEeE= -github.com/go-openapi/loads v0.21.6/go.mod h1:eEquguZx+S9eigxJ7QhrzfhW1Me47n54wlHX9RK8to4= -github.com/go-openapi/runtime v0.27.2 h1:AOvytl8s9DzL7B27r6dZ4sqjVOJT6/3LzKeZoDIAh+g= -github.com/go-openapi/runtime v0.27.2/go.mod h1:a5AkfzISU/Iwq51ZiQLM+oNRDwqC9RtlSt57xUSyZhg= -github.com/go-openapi/spec v0.20.15 h1:8bDcVxF607pTh9NpPwgsH4J5Uhh5mV5XoWnkurdiY+U= -github.com/go-openapi/spec v0.20.15/go.mod h1:o0upgqg5uYFG7O5mADrDVmSG3Wa6y6OLhwiCqQ+sTv4= -github.com/go-openapi/strfmt v0.22.2 h1:DPYOrm6gexCfZZfXUaXFS4+Jw6HAaIIG0SZ5630f8yw= -github.com/go-openapi/strfmt v0.22.2/go.mod h1:HB/b7TCm91rno75Dembc1dFW/0FPLk5CEXsoF9ReNc4= -github.com/go-openapi/swag v0.22.10 h1:4y86NVn7Z2yYd6pfS4Z+Nyh3aAUL3Nul+LMbhFKy0gA= -github.com/go-openapi/swag v0.22.10/go.mod h1:Cnn8BYtRlx6BNE3DPN86f/xkapGIcLWzh3CLEb4C1jI= -github.com/go-openapi/validate v0.23.2 h1:dSV8fmCwFwTE6TYGVmWtpWN9aOTsidzcBsB2qPohZYI= -github.com/go-openapi/validate v0.23.2/go.mod h1:FencnMQqop3HPZk+wIkLsQHgOKP1EDAgF2LZDW7fWr8= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobs/pretty v0.0.0-20180724170744-09732c25a95b h1:/vQ+oYKu+JoyaMPDsv5FzwuL2wwWBgBbtj/YLCi4LuA= @@ -162,46 +839,123 @@ github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q= -github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240402174815-29b9bb013b0f h1:f00RU+zOX+B3rLAmMMkzHUF2h1z4DeYR9tTCvEq2REY= +github.com/google/pprof v0.0.0-20240402174815-29b9bb013b0f/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= -github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= @@ -209,20 +963,45 @@ github.com/grafana/grafana-api-golang-client v0.27.0 h1:zIwMXcbCB4n588i3O2N6HfNc github.com/grafana/grafana-api-golang-client v0.27.0/go.mod h1:uNLZEmgKtTjHBtCQMwNn3qsx2mpMb8zU+7T4Xv3NR9Y= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hbollon/go-edlib v1.6.0 h1:ga7AwwVIvP8mHm9GsPueC0d71cfRU/52hmPJ7Tprv4E= github.com/hbollon/go-edlib v1.6.0/go.mod h1:wnt6o6EIVEzUfgbUZY7BerzQ2uvzp354qmS2xaLkrhM= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/iguanesolutions/go-systemd/v5 v5.0.0 h1:E4OUiBdmlD1IsClS6cmRIdzWBW8T8UBitCqYem7A1KY= +github.com/iguanesolutions/go-systemd/v5 v5.0.0/go.mod h1:VPlzL6z0rXd3HU7oLkMoEqTWBhHClInYX9rP2U/+giI= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJzodkA= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jlaffaye/ftp v0.0.0-20201112195030-9aae4d151126 h1:ly2C51IMpCCV8RpTDRXgzG/L9iZXb8ePEixaew/HwBs= +github.com/jlaffaye/ftp v0.0.0-20201112195030-9aae4d151126/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -233,14 +1012,34 @@ github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2E github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 h1:G+9t9cEtnC9jFiTxyptEKuNIAbiN5ZCQzX2a74lj3xg= github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004/go.mod h1:KmHnJWQrgEvbuy0vcvj00gtMqbvNn1L+3YUZLK/B92c= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/koofr/go-httpclient v0.0.0-20200420163713-93aa7c75b348 h1:Lrn8srO9JDBCf2iPjqy62stl49UDwoOxZ9/NGVi+fnk= +github.com/koofr/go-httpclient v0.0.0-20200420163713-93aa7c75b348/go.mod h1:JBLy//Q5jzU3XSMxdONTD5EIj1LhTPktosxG2Bw1iho= +github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a h1:02cx9xF4W2FQ1oh8CK9dWV5BnZK2mUtcbr9xR+bZiKk= +github.com/koofr/go-koofrclient v0.0.0-20190724113126-8e5366da203a/go.mod h1:MRAz4Gsxd+OzrZ0owwrUHc0zLESL+1Y5syqK/sJxK2A= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -251,19 +1050,27 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lnquy/cron v1.1.1 h1:iaDX1ublgQ9LBhA8l9BVU+FrTE1PPSPAuvAdhgdnXgA= github.com/lnquy/cron v1.1.1/go.mod h1:hu2Y7H68/8oKk6T4+K4qdbopbnaP4rGltK3ylWiiDss= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-ieproxy v0.0.11 h1:MQ/5BuGSgDAHZOJe6YY80IF2UVCfGkwfo6AeD7HtHYo= +github.com/mattn/go-ieproxy v0.0.11/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -287,72 +1094,100 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU= -github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1 h1:nAjWYc03awJAjsozNehdGZsm5LP7AhLOvjgbS8zN1tk= +github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0= +github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks= +github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM= -github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= -github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo= -github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0= +github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= +github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= +github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 h1:XeOYlK9W1uCmhjJSsY78Mcuh7MVkNjTzmHx1yBzizSU= +github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14/go.mod h1:jVblp62SafmidSkvWrXyxAme3gaTfEtWwRPGz5cpvHg= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= -github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= -github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= -github.com/prometheus/common v0.50.0 h1:YSZE6aa9+luNa2da6/Tik0q0A5AbR+U003TItK57CPQ= -github.com/prometheus/common v0.50.0/go.mod h1:wHFBCEVWVmHMUpg7pYcOm2QUR/ocQdYSJVQJKnHc3xQ= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.52.2 h1:LW8Vk7BccEdONfrJBDffQGRtpSzi5CQaRZGtboOO2ck= +github.com/prometheus/common v0.52.2/go.mod h1:lrWtQx+iDfn2mbH5GUzlH9TSHyfZpHkSiG1W7y3sF2Q= github.com/prometheus/procfs v0.13.0 h1:GqzLlQyfsPbaEHaQkO7tbDlriv/4o5Hudv6OXHGKX7o= github.com/prometheus/procfs v0.13.0/go.mod h1:cd4PFCR54QLnGKPaKGA6l+cfuNXtht43ZKY6tow0Y1g= -github.com/rclone/rclone v1.51.0 h1:tna+E5mF9BxC6ZVpAfy/k2/cDHej/5JtPA0Ao2BB58I= -github.com/rclone/rclone v1.51.0/go.mod h1:H4jaCoYf6554GT/f8HZ6IwnNbkCXI9HyXwMn8+FLivs= -github.com/rfjakob/eme v1.1.1 h1:t+CgvcOn+eDvj2xdglxsSnkgg8LM8jwdxnV7OnsrTn0= -github.com/rfjakob/eme v1.1.1/go.mod h1:U2bmx0hDj8EyDdcxmD5t3XHDnBFnyNNc22n1R4008eM= +github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8= +github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4= +github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/scylladb/go-log v0.0.7 h1:gtIfCRaUcYpoJN17bpNkdX7tEoyXtRH6neTuihWSrT0= github.com/scylladb/go-log v0.0.7/go.mod h1:kzHqijtjSAIQEoqJZ6c181WyeYa8+0nHiRperB5op0s= github.com/scylladb/go-reflectx v1.0.1 h1:b917wZM7189pZdlND9PbIJ6NQxfDPfBvUaQ7cjj1iZQ= github.com/scylladb/go-reflectx v1.0.1/go.mod h1:rWnOfDIRWBGN0miMLIcoPt/Dhi2doCMZqwMCJ3KupFc= github.com/scylladb/go-set v1.0.2 h1:SkvlMCKhP0wyyct6j+0IHJkBkSZL+TDzZ4E7f7BCcRE= github.com/scylladb/go-set v1.0.2/go.mod h1:DkpGd78rljTxKAnTDPFqXSGxvETQnJyuSOQwsHycqfs= -github.com/scylladb/gocql v1.12.0 h1:KaP25dC2Mu0H382M8KZmkQp1XuasgBG97bBhFeFKVyk= -github.com/scylladb/gocql v1.12.0/go.mod h1:ZLEJ0EVE5JhmtxIW2stgHq/v1P4fWap0qyyXSKyV8K0= +github.com/scylladb/gocql v1.13.0 h1:QOr2yYuJuAfsoYPICxTj1RPm3Qm7qllSb3Q9B7WFEgU= +github.com/scylladb/gocql v1.13.0/go.mod h1:ZLEJ0EVE5JhmtxIW2stgHq/v1P4fWap0qyyXSKyV8K0= github.com/scylladb/gocqlx/v2 v2.8.0 h1:f/oIgoEPjKDKd+RIoeHqexsIQVIbalVmT+axwvUqQUg= github.com/scylladb/gocqlx/v2 v2.8.0/go.mod h1:4/+cga34PVqjhgSoo5Nr2fX1MQIqZB5eCE5DK4xeDig= -github.com/scylladb/scylla-manager/v3 v3.2.6 h1:kw0qkwSIOaz4YxvloejPSxl9gN5txG1/34mKwOzbuPI= -github.com/scylladb/scylla-manager/v3 v3.2.6/go.mod h1:t8hRJ4bZwS1MyvV0khOVlm8gj880Gceh2WpzlkVF1so= +github.com/scylladb/google-api-go-client v0.34.1-patched h1:DW+T0HA+74o6FDr3TFzVwgESabOB1eTwb4woE6oUziY= +github.com/scylladb/google-api-go-client v0.34.1-patched/go.mod h1:RriRmS2wJXH+2yd9PRTEcR380U9AXmurWwznqVhzsSc= +github.com/scylladb/rclone v1.54.1-0.20240312172628-afe1fd2aa65e h1:lJRphCtu+nKd+mfo8whOTeFkgjMWvk8iCSlqgibKSa8= +github.com/scylladb/rclone v1.54.1-0.20240312172628-afe1fd2aa65e/go.mod h1:JGZp4EvCUK+6AM1Fe1dye5xvihTc/Bk0WnHHSCJOePM= +github.com/scylladb/scylla-manager/v3 v3.2.7 h1:uQJYXOULBbBQVOER+YCy54kYNWV4z4MOZCMTV7wpgbk= +github.com/scylladb/scylla-manager/v3 v3.2.7/go.mod h1:LlVeRoeGfDntpq8jsLBvF2Qeahy2zzZO18xr5Kh4ytc= github.com/scylladb/scylladb-swagger-go-client v0.2.0 h1:WRzrS07NSQSwxkKz67UnzssOEtRb2Ri4yUEi/xOMLQ0= github.com/scylladb/scylladb-swagger-go-client v0.2.0/go.mod h1:64059ZeUsLRLVJoTh7AT5elhBwyJudXKGHKxXerSoBo= github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4 h1:8qmTC5ByIXO3GP/IzBkxcZ/99VITvnIETDhdFz/om7A= github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shurcooL/githubv4 v0.0.0-20240120211514-18a1ae0e79dc h1:vH0NQbIDk+mJLvBliNGfcQgUmhlniWBDXC79oRxfZA0= github.com/shurcooL/githubv4 v0.0.0-20240120211514-18a1ae0e79dc/go.mod h1:zqMwyHmnN/eDOZOdiTohqIUKUrTFX62PNlu7IJdu0q8= github.com/shurcooL/graphql v0.0.0-20230722043721-ed46e5a46466 h1:17JxqqJY66GmZVHkmAsGEkcIu0oCe3AM420QDgGwZx0= @@ -360,178 +1195,597 @@ github.com/shurcooL/graphql v0.0.0-20230722043721-ed46e5a46466/go.mod h1:9dIRpgI github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/spacemonkeygo/monkit/v3 v3.0.7 h1:LsGdIXl8mccqJrYEh4Uf4sLVGu/g0tjhNqQzdn9MzVk= +github.com/spacemonkeygo/monkit/v3 v3.0.7/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8 h1:IGJQmLBLYBdAknj21W3JsVof0yjEXfy1Q0K3YZebDOg= +github.com/t3rm1n4l/go-mega v0.0.0-20200416171014-ffad7fcb44b8/go.mod h1:XWL4vDyd3JKmJx+hZWUVgCNmmhZ2dTBcaNDcxH465s0= +github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k= +github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= +github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yunify/qingstor-sdk-go/v3 v3.2.0 h1:9sB2WZMgjwSUNZhrgvaNGazVltoFUUfuS9f0uCWtTr8= +github.com/yunify/qingstor-sdk-go/v3 v3.2.0/go.mod h1:KciFNuMu6F4WLk9nGwwK69sCGKLCdd9f97ac/wfumS4= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g= +github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= +go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= -go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= -go.starlark.net v0.0.0-20240123142251-f86470692795 h1:LmbG8Pq7KDGkglKVn8VpZOZj6vb9b8nKEGcg9l03epM= -go.starlark.net v0.0.0-20240123142251-f86470692795/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.starlark.net v0.0.0-20240329153429-e6e8e7ce1b7a h1:Oe+v9w90BBIxQZ4U39+axR8KxrBbxqnRudPPcBIlP3o= +go.starlark.net v0.0.0-20240329153429-e6e8e7ce1b7a/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/config v1.4.0 h1:upnMPpMm6WlbZtXoasNkK4f0FhxwS+W4Iqz5oNznehQ= go.uber.org/config v1.4.0/go.mod h1:aCyrMHmUAc/s2h9sv1koP84M9ZF/4K+g2oleyESO/Ig= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220526153639-5463443f8c37/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191104232314-dc038396d1f0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.167.0 h1:CKHrQD1BLRii6xdkatBDXyKzM0mkawt2QP+H3LtPmSE= -google.golang.org/api v0.167.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8 h1:IR+hp6ypxjH24bkMfEJ0yHR21+gwPWdV+/IBrPQyn3k= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -549,34 +1803,42 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= -k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= -k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg= -k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8= -k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= -k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= -k8s.io/apiserver v0.29.2 h1:+Z9S0dSNr+CjnVXQePG8TcBWHr3Q7BmAr7NraHvsMiQ= -k8s.io/apiserver v0.29.2/go.mod h1:B0LieKVoyU7ykQvPFm7XSdIHaCHSzCzQWPFa5bqbeMQ= -k8s.io/cli-runtime v0.29.2 h1:smfsOcT4QujeghsNjECKN3lwyX9AwcFU0nvJ7sFN3ro= -k8s.io/cli-runtime v0.29.2/go.mod h1:KLisYYfoqeNfO+MkTWvpqIyb1wpJmmFJhioA0xd4MW8= -k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg= -k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA= -k8s.io/code-generator v0.29.2 h1:c9/iw2KnNpw2IRV+wwuG/Wns2TjPSgjWzbbjTevyiHI= -k8s.io/code-generator v0.29.2/go.mod h1:FwFi3C9jCrmbPjekhaCYcYG1n07CYiW1+PAPCockaos= -k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8= -k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM= -k8s.io/component-helpers v0.29.2 h1:1kTIanIdqUVG2nW3e2ENVEaYbZKphqPgEdCmJvk71aw= -k8s.io/component-helpers v0.29.2/go.mod h1:gFc/p60rYtpD8UCcNfPCmbokHT2uy0yDpmr/KKUMNAw= -k8s.io/cri-api v0.29.2 h1:LLSeWVC3h1nVMpV9vHiE+mO3spDYmz/C0GvxH6p6tkg= -k8s.io/cri-api v0.29.2/go.mod h1:9fQTFm+wi4FLyqrkVUoMJiUB3mE74XrVvHz8uFY/sSw= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= +k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= +k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI= +k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc= +k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= +k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= +k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= +k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= +k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= +k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= +k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= +k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/code-generator v0.29.3 h1:m7E25/t9R9NvejspO2zBdyu+/Gl0Z5m7dCRc680KS14= +k8s.io/code-generator v0.29.3/go.mod h1:x47ofBhN4gxYFcxeKA1PYXeaPreAGaDN85Y/lNUsPoM= +k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo= +k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio= +k8s.io/component-helpers v0.29.3 h1:1dqZswuZgT2ZMixYeORyCUOAApXxgsvjVSgfoUT+P4o= +k8s.io/component-helpers v0.29.3/go.mod h1:yiDqbRQrnQY+sPju/bL7EkwDJb6LVOots53uZNMZBos= +k8s.io/cri-api v0.29.3 h1:ppKSui+hhTJW774Mou6x+/ealmzt2jmTM0vsEQVWrjI= +k8s.io/cri-api v0.29.3/go.mod h1:3X7EnhsNaQnCweGhQCJwKNHlH7wHEYuKQ19bRvXMoJY= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= @@ -584,14 +1846,52 @@ k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0 k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/kubectl v0.29.2 h1:uaDYaBhumvkwz0S2XHt36fK0v5IdNgL7HyUniwb2IUo= -k8s.io/kubectl v0.29.2/go.mod h1:BhizuYBGcKaHWyq+G7txGw2fXg576QbPrrnQdQDZgqI= -k8s.io/kubelet v0.29.2 h1:bQ2StqkUqPCFNLtGLsb3v3O2LKQHXNMju537zOGboRg= -k8s.io/kubelet v0.29.2/go.mod h1:i5orNPqW/fAMrqptbCXFW/vLBBP12TZZc41IrrvF7SY= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= -k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 h1:w6nThEmGo9zcL+xH1Tu6pjxJ3K1jXFW+V0u4peqN8ks= +k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= +k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= +k8s.io/kubelet v0.29.3 h1:X9h0ZHzc+eUeNTaksbN0ItHyvGhQ7Z0HPjnQD2oHdwU= +k8s.io/kubelet v0.29.3/go.mod h1:jDiGuTkFOUynyBKzOoC1xRSWlgAZ9UPcTYeFyjr6vas= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY= +k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/controller-tools v0.11.3 h1:T1xzLkog9saiyQSLz1XOImu4OcbdXWytc5cmYsBeBiE= sigs.k8s.io/controller-tools v0.11.3/go.mod h1:qcfX7jfcfYD/b7lAhvqAyTbt/px4GpvN88WKLFFv7p8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= @@ -606,3 +1906,9 @@ sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +storj.io/common v0.0.0-20201204143755-a03c37168cb1 h1:SwSIESeyaX3kOhZN1jeNPbegSraFTdxtWD+Dn0dT7y4= +storj.io/common v0.0.0-20201204143755-a03c37168cb1/go.mod h1:6sepaQTRLuygvA+GNPzdgRPOB1+wFfjde76KBWofbMY= +storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ= +storj.io/drpc v0.0.16/go.mod h1:zdmQ93nx4Z35u11pQ+GAnBy4DGOK3HJCSOfeh2RryTo= +storj.io/uplink v1.4.1 h1:+vqMsmDArZuFC3TGW+fwJ1FaukjjSmj7dPcOhfYuHBY= +storj.io/uplink v1.4.1/go.mod h1:cN4UhPehwrdrbrtJPyvMp0sW5JbjaeDEFajQP3RX1ug= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index f494e95b15d..40d1a891605 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.25.0" +const Version = "1.25.1" diff --git a/vendor/github.com/Azure/azure-pipeline-go/LICENSE b/vendor/github.com/Azure/azure-pipeline-go/LICENSE new file mode 100644 index 00000000000..d1ca00f20a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go new file mode 100644 index 00000000000..d7b866cdf95 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/core.go @@ -0,0 +1,284 @@ +package pipeline + +import ( + "context" + "github.com/mattn/go-ieproxy" + "net" + "net/http" + "os" + "time" +) + +// The Factory interface represents an object that can create its Policy object. Each HTTP request sent +// requires that this Factory create a new instance of its Policy object. +type Factory interface { + New(next Policy, po *PolicyOptions) Policy +} + +// FactoryFunc is an adapter that allows the use of an ordinary function as a Factory interface. +type FactoryFunc func(next Policy, po *PolicyOptions) PolicyFunc + +// New calls f(next,po). +func (f FactoryFunc) New(next Policy, po *PolicyOptions) Policy { + return f(next, po) +} + +// The Policy interface represents a mutable Policy object created by a Factory. The object can mutate/process +// the HTTP request and then forward it on to the next Policy object in the linked-list. The returned +// Response goes backward through the linked-list for additional processing. +// NOTE: Request is passed by value so changes do not change the caller's version of +// the request. However, Request has some fields that reference mutable objects (not strings). +// These references are copied; a deep copy is not performed. Specifically, this means that +// you should avoid modifying the objects referred to by these fields: URL, Header, Body, +// GetBody, TransferEncoding, Form, MultipartForm, Trailer, TLS, Cancel, and Response. +type Policy interface { + Do(ctx context.Context, request Request) (Response, error) +} + +// PolicyFunc is an adapter that allows the use of an ordinary function as a Policy interface. +type PolicyFunc func(ctx context.Context, request Request) (Response, error) + +// Do calls f(ctx, request). +func (f PolicyFunc) Do(ctx context.Context, request Request) (Response, error) { + return f(ctx, request) +} + +// Options configures a Pipeline's behavior. +type Options struct { + HTTPSender Factory // If sender is nil, then the pipeline's default client is used to send the HTTP requests. + Log LogOptions +} + +// LogLevel tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LogLevel uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LogLevel = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +// LogOptions configures the pipeline's logging mechanism & level filtering. +type LogOptions struct { + Log func(level LogLevel, message string) + + // ShouldLog is called periodically allowing you to return whether the specified LogLevel should be logged or not. + // An application can return different values over the its lifetime; this allows the application to dynamically + // alter what is logged. NOTE: This method can be called by multiple goroutines simultaneously so make sure + // you implement it in a goroutine-safe way. If nil, nothing is logged (the equivalent of returning LogNone). + // Usually, the function will be implemented simply like this: return level <= LogWarning + ShouldLog func(level LogLevel) bool +} + +type pipeline struct { + factories []Factory + options Options +} + +// The Pipeline interface represents an ordered list of Factory objects and an object implementing the HTTPSender interface. +// You construct a Pipeline by calling the pipeline.NewPipeline function. To send an HTTP request, call pipeline.NewRequest +// and then call Pipeline's Do method passing a context, the request, and a method-specific Factory (or nil). Passing a +// method-specific Factory allows this one call to Do to inject a Policy into the linked-list. The policy is injected where +// the MethodFactoryMarker (see the pipeline.MethodFactoryMarker function) is in the slice of Factory objects. +// +// When Do is called, the Pipeline object asks each Factory object to construct its Policy object and adds each Policy to a linked-list. +// THen, Do sends the Context and Request through all the Policy objects. The final Policy object sends the request over the network +// (via the HTTPSender object passed to NewPipeline) and the response is returned backwards through all the Policy objects. +// Since Pipeline and Factory objects are goroutine-safe, you typically create 1 Pipeline object and reuse it to make many HTTP requests. +type Pipeline interface { + Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) +} + +// NewPipeline creates a new goroutine-safe Pipeline object from the slice of Factory objects and the specified options. +func NewPipeline(factories []Factory, o Options) Pipeline { + if o.HTTPSender == nil { + o.HTTPSender = newDefaultHTTPClientFactory() + } + if o.Log.Log == nil { + o.Log.Log = func(LogLevel, string) {} // No-op logger + } + return &pipeline{factories: factories, options: o} +} + +// Do is called for each and every HTTP request. It tells each Factory to create its own (mutable) Policy object +// replacing a MethodFactoryMarker factory (if it exists) with the methodFactory passed in. Then, the Context and Request +// are sent through the pipeline of Policy objects (which can transform the Request's URL/query parameters/headers) and +// ultimately sends the transformed HTTP request over the network. +func (p *pipeline) Do(ctx context.Context, methodFactory Factory, request Request) (Response, error) { + response, err := p.newPolicies(methodFactory).Do(ctx, request) + request.close() + return response, err +} + +func (p *pipeline) newPolicies(methodFactory Factory) Policy { + // The last Policy is the one that actually sends the request over the wire and gets the response. + // It is overridable via the Options' HTTPSender field. + po := &PolicyOptions{pipeline: p} // One object shared by all policy objects + next := p.options.HTTPSender.New(nil, po) + + // Walk over the slice of Factory objects in reverse (from wire to API) + markers := 0 + for i := len(p.factories) - 1; i >= 0; i-- { + factory := p.factories[i] + if _, ok := factory.(methodFactoryMarker); ok { + markers++ + if markers > 1 { + panic("MethodFactoryMarker can only appear once in the pipeline") + } + if methodFactory != nil { + // Replace MethodFactoryMarker with passed-in methodFactory + next = methodFactory.New(next, po) + } + } else { + // Use the slice's Factory to construct its Policy + next = factory.New(next, po) + } + } + + // Each Factory has created its Policy + if markers == 0 && methodFactory != nil { + panic("Non-nil methodFactory requires MethodFactoryMarker in the pipeline") + } + return next // Return head of the Policy object linked-list +} + +// A PolicyOptions represents optional information that can be used by a node in the +// linked-list of Policy objects. A PolicyOptions is passed to the Factory's New method +// which passes it (if desired) to the Policy object it creates. Today, the Policy object +// uses the options to perform logging. But, in the future, this could be used for more. +type PolicyOptions struct { + pipeline *pipeline +} + +// ShouldLog returns true if the specified log level should be logged. +func (po *PolicyOptions) ShouldLog(level LogLevel) bool { + if po.pipeline.options.Log.ShouldLog != nil { + return po.pipeline.options.Log.ShouldLog(level) + } + return false +} + +// Log logs a string to the Pipeline's Logger. +func (po *PolicyOptions) Log(level LogLevel, msg string) { + if !po.ShouldLog(level) { + return // Short circuit message formatting if we're not logging it + } + + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + po.pipeline.options.Log.Log(level, msg) + + // If logger doesn't handle fatal/panic, we'll do it here. + if level == LogFatal { + os.Exit(1) + } else if level == LogPanic { + panic(msg) + } +} + +var pipelineHTTPClient = newDefaultHTTPClient() + +func newDefaultHTTPClient() *http.Client { + // We want the Transport to have a large connection pool + return &http.Client{ + Transport: &http.Transport{ + Proxy: ieproxy.GetProxyFunc(), + // We use Dial instead of DialContext as DialContext has been reported to cause slower performance. + Dial /*Context*/ : (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).Dial, /*Context*/ + MaxIdleConns: 0, // No limit + MaxIdleConnsPerHost: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableKeepAlives: false, + DisableCompression: false, + MaxResponseHeaderBytes: 0, + //ResponseHeaderTimeout: time.Duration{}, + //ExpectContinueTimeout: time.Duration{}, + }, + } +} + +// newDefaultHTTPClientFactory creates a DefaultHTTPClientPolicyFactory object that sends HTTP requests to a Go's default http.Client. +func newDefaultHTTPClientFactory() Factory { + return FactoryFunc(func(next Policy, po *PolicyOptions) PolicyFunc { + return func(ctx context.Context, request Request) (Response, error) { + r, err := pipelineHTTPClient.Do(request.WithContext(ctx)) + if err != nil { + err = NewError(err, "HTTP request failed") + } + return NewHTTPResponse(r), err + } + }) +} + +var mfm = methodFactoryMarker{} // Singleton + +// MethodFactoryMarker returns a special marker Factory object. When Pipeline's Do method is called, any +// MethodMarkerFactory object is replaced with the specified methodFactory object. If nil is passed fro Do's +// methodFactory parameter, then the MethodFactoryMarker is ignored as the linked-list of Policy objects is created. +func MethodFactoryMarker() Factory { + return mfm +} + +type methodFactoryMarker struct { +} + +func (methodFactoryMarker) New(next Policy, po *PolicyOptions) Policy { + panic("methodFactoryMarker policy should have been replaced with a method policy") +} + +// LogSanitizer can be implemented to clean secrets from lines logged by ForceLog +// By default no implemetation is provided here, because pipeline may be used in many different +// contexts, so the correct implementation is context-dependent +type LogSanitizer interface { + SanitizeLogMessage(raw string) string +} + +var sanitizer LogSanitizer +var enableForceLog bool = true + +// SetLogSanitizer can be called to supply a custom LogSanitizer. +// There is no threadsafety or locking on the underlying variable, +// so call this function just once at startup of your application +// (Don't later try to change the sanitizer on the fly). +func SetLogSanitizer(s LogSanitizer)(){ + sanitizer = s +} + +// SetForceLogEnabled can be used to disable ForceLog +// There is no threadsafety or locking on the underlying variable, +// so call this function just once at startup of your application +// (Don't later try to change the setting on the fly). +func SetForceLogEnabled(enable bool)() { + enableForceLog = enable +} + + diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go new file mode 100644 index 00000000000..e7ce4970b8b --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog.go @@ -0,0 +1,14 @@ +package pipeline + + +// ForceLog should rarely be used. It forceable logs an entry to the +// Windows Event Log (on Windows) or to the SysLog (on Linux) +func ForceLog(level LogLevel, msg string) { + if !enableForceLog { + return + } + if sanitizer != nil { + msg = sanitizer.SanitizeLogMessage(msg) + } + forceLog(level, msg) +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go new file mode 100644 index 00000000000..819509a1e54 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_syslog.go @@ -0,0 +1,33 @@ +// +build !windows,!nacl,!plan9 + +package pipeline + +import ( + "log" + "log/syslog" +) + +// forceLog should rarely be used. It forceable logs an entry to the +// Windows Event Log (on Windows) or to the SysLog (on Linux) +func forceLog(level LogLevel, msg string) { + if defaultLogger == nil { + return // Return fast if we failed to create the logger. + } + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + switch level { + case LogFatal: + defaultLogger.Fatal(msg) + case LogPanic: + defaultLogger.Panic(msg) + case LogError, LogWarning, LogInfo: + defaultLogger.Print(msg) + } +} + +var defaultLogger = func() *log.Logger { + l, _ := syslog.NewLogger(syslog.LOG_USER|syslog.LOG_WARNING, log.LstdFlags) + return l +}() diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go new file mode 100644 index 00000000000..5fcf40014aa --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/defaultlog_windows.go @@ -0,0 +1,61 @@ +package pipeline + +import ( + "os" + "syscall" + "unsafe" +) + +// forceLog should rarely be used. It forceable logs an entry to the +// Windows Event Log (on Windows) or to the SysLog (on Linux) +func forceLog(level LogLevel, msg string) { + var el eventType + switch level { + case LogError, LogFatal, LogPanic: + el = elError + case LogWarning: + el = elWarning + case LogInfo: + el = elInfo + } + // We are logging it, ensure trailing newline + if len(msg) == 0 || msg[len(msg)-1] != '\n' { + msg += "\n" // Ensure trailing newline + } + reportEvent(el, 0, msg) +} + +type eventType int16 + +const ( + elSuccess eventType = 0 + elError eventType = 1 + elWarning eventType = 2 + elInfo eventType = 4 +) + +var reportEvent = func() func(eventType eventType, eventID int32, msg string) { + advAPI32 := syscall.MustLoadDLL("advapi32.dll") // lower case to tie in with Go's sysdll registration + registerEventSource := advAPI32.MustFindProc("RegisterEventSourceW") + + sourceName, _ := os.Executable() + sourceNameUTF16, _ := syscall.UTF16PtrFromString(sourceName) + handle, _, lastErr := registerEventSource.Call(uintptr(0), uintptr(unsafe.Pointer(sourceNameUTF16))) + if lastErr == nil { // On error, logging is a no-op + return func(eventType eventType, eventID int32, msg string) {} + } + reportEvent := advAPI32.MustFindProc("ReportEventW") + return func(eventType eventType, eventID int32, msg string) { + s, _ := syscall.UTF16PtrFromString(msg) + _, _, _ = reportEvent.Call( + uintptr(handle), // HANDLE hEventLog + uintptr(eventType), // WORD wType + uintptr(0), // WORD wCategory + uintptr(eventID), // DWORD dwEventID + uintptr(0), // PSID lpUserSid + uintptr(1), // WORD wNumStrings + uintptr(0), // DWORD dwDataSize + uintptr(unsafe.Pointer(&s)), // LPCTSTR *lpStrings + uintptr(0)) // LPVOID lpRawData + } +}() diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go new file mode 100644 index 00000000000..b5ab05f4dee --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/doc.go @@ -0,0 +1,161 @@ +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package pipeline implements an HTTP request/response middleware pipeline whose +policy objects mutate an HTTP request's URL, query parameters, and/or headers before +the request is sent over the wire. + +Not all policy objects mutate an HTTP request; some policy objects simply impact the +flow of requests/responses by performing operations such as logging, retry policies, +timeouts, failure injection, and deserialization of response payloads. + +Implementing the Policy Interface + +To implement a policy, define a struct that implements the pipeline.Policy interface's Do method. Your Do +method is called when an HTTP request wants to be sent over the network. Your Do method can perform any +operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, and/or query +parameters, inject a failure, etc. Your Do method must then forward the HTTP request to next Policy object +in a linked-list ensuring that the remaining Policy objects perform their work. Ultimately, the last Policy +object sends the HTTP request over the network (by calling the HTTPSender's Do method). + +When an HTTP response comes back, each Policy object in the linked-list gets a chance to process the response +(in reverse order). The Policy object can log the response, retry the operation if due to a transient failure +or timeout, deserialize the response body, etc. Ultimately, the last Policy object returns the HTTP response +to the code that initiated the original HTTP request. + +Here is a template for how to define a pipeline.Policy object: + + type myPolicy struct { + node PolicyNode + // TODO: Add configuration/setting fields here (if desired)... + } + + func (p *myPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // TODO: Mutate/process the HTTP request here... + response, err := p.node.Do(ctx, request) // Forward HTTP request to next Policy & get HTTP response + // TODO: Mutate/process the HTTP response here... + return response, err // Return response/error to previous Policy + } + +Implementing the Factory Interface + +Each Policy struct definition requires a factory struct definition that implements the pipeline.Factory interface's New +method. The New method is called when application code wants to initiate a new HTTP request. Factory's New method is +passed a pipeline.PolicyNode object which contains a reference to the owning pipeline.Pipeline object (discussed later) and +a reference to the next Policy object in the linked list. The New method should create its corresponding Policy object +passing it the PolicyNode and any other configuration/settings fields appropriate for the specific Policy object. + +Here is a template for how to define a pipeline.Policy object: + + // NOTE: Once created & initialized, Factory objects should be goroutine-safe (ex: immutable); + // this allows reuse (efficient use of memory) and makes these objects usable by multiple goroutines concurrently. + type myPolicyFactory struct { + // TODO: Add any configuration/setting fields if desired... + } + + func (f *myPolicyFactory) New(node pipeline.PolicyNode) Policy { + return &myPolicy{node: node} // TODO: Also initialize any configuration/setting fields here (if desired)... + } + +Using your Factory and Policy objects via a Pipeline + +To use the Factory and Policy objects, an application constructs a slice of Factory objects and passes +this slice to the pipeline.NewPipeline function. + + func NewPipeline(factories []pipeline.Factory, sender pipeline.HTTPSender) Pipeline + +This function also requires an object implementing the HTTPSender interface. For simple scenarios, +passing nil for HTTPSender causes a standard Go http.Client object to be created and used to actually +send the HTTP response over the network. For more advanced scenarios, you can pass your own HTTPSender +object in. This allows sharing of http.Client objects or the use of custom-configured http.Client objects +or other objects that can simulate the network requests for testing purposes. + +Now that you have a pipeline.Pipeline object, you can create a pipeline.Request object (which is a simple +wrapper around Go's standard http.Request object) and pass it to Pipeline's Do method along with passing a +context.Context for cancelling the HTTP request (if desired). + + type Pipeline interface { + Do(ctx context.Context, methodFactory pipeline.Factory, request pipeline.Request) (pipeline.Response, error) + } + +Do iterates over the slice of Factory objects and tells each one to create its corresponding +Policy object. After the linked-list of Policy objects have been created, Do calls the first +Policy object passing it the Context & HTTP request parameters. These parameters now flow through +all the Policy objects giving each object a chance to look at and/or mutate the HTTP request. +The last Policy object sends the message over the network. + +When the network operation completes, the HTTP response and error return values pass +back through the same Policy objects in reverse order. Most Policy objects ignore the +response/error but some log the result, retry the operation (depending on the exact +reason the operation failed), or deserialize the response's body. Your own Policy +objects can do whatever they like when processing outgoing requests or incoming responses. + +Note that after an I/O request runs to completion, the Policy objects for that request +are garbage collected. However, Pipeline object (like Factory objects) are goroutine-safe allowing +them to be created once and reused over many I/O operations. This allows for efficient use of +memory and also makes them safely usable by multiple goroutines concurrently. + +Inserting a Method-Specific Factory into the Linked-List of Policy Objects + +While Pipeline and Factory objects can be reused over many different operations, it is +common to have special behavior for a specific operation/method. For example, a method +may need to deserialize the response's body to an instance of a specific data type. +To accommodate this, the Pipeline's Do method takes an additional method-specific +Factory object. The Do method tells this Factory to create a Policy object and +injects this method-specific Policy object into the linked-list of Policy objects. + +When creating a Pipeline object, the slice of Factory objects passed must have 1 +(and only 1) entry marking where the method-specific Factory should be injected. +The Factory marker is obtained by calling the pipeline.MethodFactoryMarker() function: + + func MethodFactoryMarker() pipeline.Factory + +Creating an HTTP Request Object + +The HTTP request object passed to Pipeline's Do method is not Go's http.Request struct. +Instead, it is a pipeline.Request struct which is a simple wrapper around Go's standard +http.Request. You create a pipeline.Request object by calling the pipeline.NewRequest function: + + func NewRequest(method string, url url.URL, options pipeline.RequestOptions) (request pipeline.Request, err error) + +To this function, you must pass a pipeline.RequestOptions that looks like this: + + type RequestOptions struct { + // The readable and seekable stream to be sent to the server as the request's body. + Body io.ReadSeeker + + // The callback method (if not nil) to be invoked to report progress as the stream is uploaded in the HTTP request. + Progress ProgressReceiver + } + +The method and struct ensure that the request's body stream is a read/seekable stream. +A seekable stream is required so that upon retry, the final Policy object can seek +the stream back to the beginning before retrying the network request and re-uploading the +body. In addition, you can associate a ProgressReceiver callback function which will be +invoked periodically to report progress while bytes are being read from the body stream +and sent over the network. + +Processing the HTTP Response + +When an HTTP response comes in from the network, a reference to Go's http.Response struct is +embedded in a struct that implements the pipeline.Response interface: + + type Response interface { + Response() *http.Response + } + +This interface is returned through all the Policy objects. Each Policy object can call the Response +interface's Response method to examine (or mutate) the embedded http.Response object. + +A Policy object can internally define another struct (implementing the pipeline.Response interface) +that embeds an http.Response and adds additional fields and return this structure to other Policy +objects. This allows a Policy object to deserialize the body to some other struct and return the +original http.Response and the additional struct back through the Policy chain. Other Policy objects +can see the Response but cannot see the additional struct with the deserialized body. After all the +Policy objects have returned, the pipeline.Response interface is returned by Pipeline's Do method. +The caller of this method can perform a type assertion attempting to get back to the struct type +really returned by the Policy object. If the type assertion is successful, the caller now has +access to both the http.Response and the deserialized struct object.*/ +package pipeline diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go new file mode 100644 index 00000000000..5d3d4339e4f --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/error.go @@ -0,0 +1,184 @@ +package pipeline + +import ( + "fmt" + "runtime" +) + +type causer interface { + Cause() error +} + +func errorWithPC(msg string, pc uintptr) string { + s := "" + if fn := runtime.FuncForPC(pc); fn != nil { + file, line := fn.FileLine(pc) + s = fmt.Sprintf("-> %v, %v:%v\n", fn.Name(), file, line) + } + s += msg + "\n\n" + return s +} + +func getPC(callersToSkip int) uintptr { + // Get the PC of Initialize method's caller. + pc := [1]uintptr{} + _ = runtime.Callers(callersToSkip, pc[:]) + return pc[0] +} + +// ErrorNode can be an embedded field in a private error object. This field +// adds Program Counter support and a 'cause' (reference to a preceding error). +// When initializing a error type with this embedded field, initialize the +// ErrorNode field by calling ErrorNode{}.Initialize(cause). +type ErrorNode struct { + pc uintptr // Represents a Program Counter that you can get symbols for. + cause error // Refers to the preceding error (or nil) +} + +// Error returns a string with the PC's symbols or "" if the PC is invalid. +// When defining a new error type, have its Error method call this one passing +// it the string representation of the error. +func (e *ErrorNode) Error(msg string) string { + s := errorWithPC(msg, e.pc) + if e.cause != nil { + s += e.cause.Error() + "\n" + } + return s +} + +// Cause returns the error that preceded this error. +func (e *ErrorNode) Cause() error { return e.cause } + +// Unwrap provides compatibility for Go 1.13 error chains. +func (e *ErrorNode) Unwrap() error { return e.cause } + +// Temporary returns true if the error occurred due to a temporary condition. +func (e ErrorNode) Temporary() bool { + type temporary interface { + Temporary() bool + } + + for err := e.cause; err != nil; { + if t, ok := err.(temporary); ok { + return t.Temporary() + } + + if cause, ok := err.(causer); ok { + err = cause.Cause() + } else { + err = nil + } + } + return false +} + +// Timeout returns true if the error occurred due to time expiring. +func (e ErrorNode) Timeout() bool { + type timeout interface { + Timeout() bool + } + + for err := e.cause; err != nil; { + if t, ok := err.(timeout); ok { + return t.Timeout() + } + + if cause, ok := err.(causer); ok { + err = cause.Cause() + } else { + err = nil + } + } + return false +} + +// Initialize is used to initialize an embedded ErrorNode field. +// It captures the caller's program counter and saves the cause (preceding error). +// To initialize the field, use "ErrorNode{}.Initialize(cause, 3)". A callersToSkip +// value of 3 is very common; but, depending on your code nesting, you may need +// a different value. +func (ErrorNode) Initialize(cause error, callersToSkip int) ErrorNode { + pc := getPC(callersToSkip) + return ErrorNode{pc: pc, cause: cause} +} + +// Cause walks all the preceding errors and return the originating error. +func Cause(err error) error { + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} + +// ErrorNodeNoCause can be an embedded field in a private error object. This field +// adds Program Counter support. +// When initializing a error type with this embedded field, initialize the +// ErrorNodeNoCause field by calling ErrorNodeNoCause{}.Initialize(). +type ErrorNodeNoCause struct { + pc uintptr // Represents a Program Counter that you can get symbols for. +} + +// Error returns a string with the PC's symbols or "" if the PC is invalid. +// When defining a new error type, have its Error method call this one passing +// it the string representation of the error. +func (e *ErrorNodeNoCause) Error(msg string) string { + return errorWithPC(msg, e.pc) +} + +// Temporary returns true if the error occurred due to a temporary condition. +func (e ErrorNodeNoCause) Temporary() bool { + return false +} + +// Timeout returns true if the error occurred due to time expiring. +func (e ErrorNodeNoCause) Timeout() bool { + return false +} + +// Initialize is used to initialize an embedded ErrorNode field. +// It captures the caller's program counter. +// To initialize the field, use "ErrorNodeNoCause{}.Initialize(3)". A callersToSkip +// value of 3 is very common; but, depending on your code nesting, you may need +// a different value. +func (ErrorNodeNoCause) Initialize(callersToSkip int) ErrorNodeNoCause { + pc := getPC(callersToSkip) + return ErrorNodeNoCause{pc: pc} +} + +// NewError creates a simple string error (like Error.New). But, this +// error also captures the caller's Program Counter and the preceding error (if provided). +func NewError(cause error, msg string) error { + if cause != nil { + return &pcError{ + ErrorNode: ErrorNode{}.Initialize(cause, 3), + msg: msg, + } + } + return &pcErrorNoCause{ + ErrorNodeNoCause: ErrorNodeNoCause{}.Initialize(3), + msg: msg, + } +} + +// pcError is a simple string error (like error.New) with an ErrorNode (PC & cause). +type pcError struct { + ErrorNode + msg string +} + +// Error satisfies the error interface. It shows the error with Program Counter +// symbols and calls Error on the preceding error so you can see the full error chain. +func (e *pcError) Error() string { return e.ErrorNode.Error(e.msg) } + +// pcErrorNoCause is a simple string error (like error.New) with an ErrorNode (PC). +type pcErrorNoCause struct { + ErrorNodeNoCause + msg string +} + +// Error satisfies the error interface. It shows the error with Program Counter symbols. +func (e *pcErrorNoCause) Error() string { return e.ErrorNodeNoCause.Error(e.msg) } diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go new file mode 100644 index 00000000000..efa3c8ed06b --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/progress.go @@ -0,0 +1,82 @@ +package pipeline + +import "io" + +// ********** The following is common between the request body AND the response body. + +// ProgressReceiver defines the signature of a callback function invoked as progress is reported. +type ProgressReceiver func(bytesTransferred int64) + +// ********** The following are specific to the request body (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type requestBodyProgress struct { + requestBody io.ReadSeeker // Seeking is required to support retries + pr ProgressReceiver +} + +// NewRequestBodyProgress adds progress reporting to an HTTP request's body stream. +func NewRequestBodyProgress(requestBody io.ReadSeeker, pr ProgressReceiver) io.ReadSeeker { + if pr == nil { + panic("pr must not be nil") + } + return &requestBodyProgress{requestBody: requestBody, pr: pr} +} + +// Read reads a block of data from an inner stream and reports progress +func (rbp *requestBodyProgress) Read(p []byte) (n int, err error) { + n, err = rbp.requestBody.Read(p) + if err != nil { + return + } + // Invokes the user's callback method to report progress + position, err := rbp.requestBody.Seek(0, io.SeekCurrent) + if err != nil { + panic(err) + } + rbp.pr(position) + return +} + +func (rbp *requestBodyProgress) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return rbp.requestBody.Seek(offset, whence) +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (rbp *requestBodyProgress) Close() error { + if c, ok := rbp.requestBody.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following are specific to the response body (a ReadCloser) + +// This struct is used when sending a body to the network +type responseBodyProgress struct { + responseBody io.ReadCloser + pr ProgressReceiver + offset int64 +} + +// NewResponseBodyProgress adds progress reporting to an HTTP response's body stream. +func NewResponseBodyProgress(responseBody io.ReadCloser, pr ProgressReceiver) io.ReadCloser { + if pr == nil { + panic("pr must not be nil") + } + return &responseBodyProgress{responseBody: responseBody, pr: pr, offset: 0} +} + +// Read reads a block of data from an inner stream and reports progress +func (rbp *responseBodyProgress) Read(p []byte) (n int, err error) { + n, err = rbp.responseBody.Read(p) + rbp.offset += int64(n) + + // Invokes the user's callback method to report progress + rbp.pr(rbp.offset) + return +} + +func (rbp *responseBodyProgress) Close() error { + return rbp.responseBody.Close() +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go new file mode 100644 index 00000000000..1fbe72bd4dd --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/request.go @@ -0,0 +1,147 @@ +package pipeline + +import ( + "io" + "net/http" + "net/url" + "strconv" +) + +// Request is a thin wrapper over an http.Request. The wrapper provides several helper methods. +type Request struct { + *http.Request +} + +// NewRequest initializes a new HTTP request object with any desired options. +func NewRequest(method string, url url.URL, body io.ReadSeeker) (request Request, err error) { + // Note: the url is passed by value so that any pipeline operations that modify it do so on a copy. + + // This code to construct an http.Request is copied from http.NewRequest(); we intentionally omitted removeEmptyPort for now. + request.Request = &http.Request{ + Method: method, + URL: &url, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: url.Host, + } + + if body != nil { + err = request.SetBody(body) + } + return +} + +// SetBody sets the body and content length, assumes body is not nil. +func (r Request) SetBody(body io.ReadSeeker) error { + size, err := body.Seek(0, io.SeekEnd) + if err != nil { + return err + } + + body.Seek(0, io.SeekStart) + r.ContentLength = size + r.Header["Content-Length"] = []string{strconv.FormatInt(size, 10)} + + if size != 0 { + r.Body = &retryableRequestBody{body: body} + r.GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) + if err != nil { + return nil, err + } + return r.Body, nil + } + } else { + // in case the body is an empty stream, we need to use http.NoBody to explicitly provide no content + r.Body = http.NoBody + r.GetBody = func() (io.ReadCloser, error) { + return http.NoBody, nil + } + + // close the user-provided empty body + if c, ok := body.(io.Closer); ok { + c.Close() + } + } + + return nil +} + +// Copy makes a copy of an http.Request. Specifically, it makes a deep copy +// of its Method, URL, Host, Proto(Major/Minor), Header. ContentLength, Close, +// RemoteAddr, RequestURI. Copy makes a shallow copy of the Body, GetBody, TLS, +// Cancel, Response, and ctx fields. Copy panics if any of these fields are +// not nil: TransferEncoding, Form, PostForm, MultipartForm, or Trailer. +func (r Request) Copy() Request { + if r.TransferEncoding != nil || r.Form != nil || r.PostForm != nil || r.MultipartForm != nil || r.Trailer != nil { + panic("Can't make a deep copy of the http.Request because at least one of the following is not nil:" + + "TransferEncoding, Form, PostForm, MultipartForm, or Trailer.") + } + copy := *r.Request // Copy the request + urlCopy := *(r.Request.URL) // Copy the URL + copy.URL = &urlCopy + copy.Header = http.Header{} // Copy the header + for k, vs := range r.Header { + for _, value := range vs { + copy.Header.Add(k, value) + } + } + return Request{Request: ©} // Return the copy +} + +func (r Request) close() error { + if r.Body != nil && r.Body != http.NoBody { + c, ok := r.Body.(*retryableRequestBody) + if !ok { + panic("unexpected request body type (should be *retryableReadSeekerCloser)") + } + return c.realClose() + } + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (r Request) RewindBody() error { + if r.Body != nil && r.Body != http.NoBody { + s, ok := r.Body.(io.Seeker) + if !ok { + panic("unexpected request body type (should be io.Seeker)") + } + + // Reset the stream back to the beginning + _, err := s.Seek(0, io.SeekStart) + return err + } + return nil +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The pipeline closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go new file mode 100644 index 00000000000..f2dc164821d --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/response.go @@ -0,0 +1,74 @@ +package pipeline + +import ( + "bytes" + "fmt" + "net/http" + "sort" + "strings" +) + +// The Response interface exposes an http.Response object as it returns through the pipeline of Policy objects. +// This ensures that Policy objects have access to the HTTP response. However, the object this interface encapsulates +// might be a struct with additional fields that is created by a Policy object (typically a method-specific Factory). +// The method that injected the method-specific Factory gets this returned Response and performs a type assertion +// to the expected struct and returns the struct to its caller. +type Response interface { + Response() *http.Response +} + +// This is the default struct that has the http.Response. +// A method can replace this struct with its own struct containing an http.Response +// field and any other additional fields. +type httpResponse struct { + response *http.Response +} + +// NewHTTPResponse is typically called by a Policy object to return a Response object. +func NewHTTPResponse(response *http.Response) Response { + return &httpResponse{response: response} +} + +// This method satisfies the public Response interface's Response method +func (r httpResponse) Response() *http.Response { + return r.response +} + +// WriteRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func WriteRequestWithResponse(b *bytes.Buffer, request *http.Request, response *http.Response, err error) { + // Write the request into the buffer. + fmt.Fprint(b, " "+request.Method+" "+request.URL.String()+"\n") + writeHeader(b, request.Header) + if response != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n") + writeHeader(b, response.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func writeHeader(b *bytes.Buffer, header map[string][]string) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + // Redact the value of any Authorization header to prevent security information from persisting in logs + value := interface{}("REDACTED") + if !strings.EqualFold(k, "Authorization") { + value = header[k] + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} diff --git a/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go new file mode 100644 index 00000000000..899f996b542 --- /dev/null +++ b/vendor/github.com/Azure/azure-pipeline-go/pipeline/version.go @@ -0,0 +1,9 @@ +package pipeline + +const ( + // UserAgent is the string to be used in the user agent string when making requests. + UserAgent = "azure-pipeline-go/" + Version + + // Version is the semantic version (see http://semver.org) of the pipeline package. + Version = "0.2.1" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/LICENSE b/vendor/github.com/Azure/azure-storage-blob-go/LICENSE new file mode 100644 index 00000000000..d1ca00f20a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/LICENSE @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go new file mode 100644 index 00000000000..25fe6842215 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/access_conditions.go @@ -0,0 +1,65 @@ +package azblob + +import ( + "time" +) + +// ModifiedAccessConditions identifies standard HTTP access conditions which you optionally set. +type ModifiedAccessConditions struct { + IfModifiedSince time.Time + IfUnmodifiedSince time.Time + IfMatch ETag + IfNoneMatch ETag +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac ModifiedAccessConditions) pointers() (ims *time.Time, ius *time.Time, ime *ETag, inme *ETag) { + if !ac.IfModifiedSince.IsZero() { + ims = &ac.IfModifiedSince + } + if !ac.IfUnmodifiedSince.IsZero() { + ius = &ac.IfUnmodifiedSince + } + if ac.IfMatch != ETagNone { + ime = &ac.IfMatch + } + if ac.IfNoneMatch != ETagNone { + inme = &ac.IfNoneMatch + } + return +} + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions +} + +// LeaseAccessConditions identifies lease access conditions for a container or blob which you optionally set. +type LeaseAccessConditions struct { + LeaseID string +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac LeaseAccessConditions) pointers() (leaseID *string) { + if ac.LeaseID != "" { + leaseID = &ac.LeaseID + } + return +} + +/* +// getInt32 is for internal infrastructure. It is used with access condition values where +// 0 (the default setting) is meaningful. The library interprets 0 as do not send the header +// and the privately-storage field in the access condition object is stored as +1 higher than desired. +// THis method returns true, if the value is > 0 (explicitly set) and the stored value - 1 (the set desired value). +func getInt32(value int32) (bool, int32) { + return value > 0, value - 1 +} +*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go new file mode 100644 index 00000000000..8d82ebe8c6a --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/bytes_writer.go @@ -0,0 +1,24 @@ +package azblob + +import ( + "errors" +) + +type bytesWriter []byte + +func newBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("Offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("Not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go new file mode 100644 index 00000000000..6beb80f8800 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/chunkwriting.go @@ -0,0 +1,215 @@ +package azblob + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + + guuid "github.com/google/uuid" +) + +// blockWriter provides methods to upload blocks that represent a file to a server and commit them. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type blockWriter interface { + StageBlock(context.Context, string, io.ReadSeeker, LeaseAccessConditions, []byte, ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error) + CommitBlockList(context.Context, []string, BlobHTTPHeaders, Metadata, BlobAccessConditions, AccessTierType, BlobTagsMap, ClientProvidedKeyOptions, ImmutabilityPolicyOptions) (*BlockBlobCommitBlockListResponse, error) +} + +// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. +// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably +// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The +// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload +// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works +// well, 4 MiB or 8 MiB, and autoscale to as many goroutines within the memory limit. This gives a single dial to tweak and we can +// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). +// We can even provide a utility to dial this number in for customer networks to optimize their copies. +func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamToBlockBlobOptions) (*BlockBlobCommitBlockListResponse, error) { + if err := o.defaults(); err != nil { + return nil, err + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + cp := &copier{ + ctx: ctx, + cancel: cancel, + reader: from, + to: to, + id: newID(), + o: o, + errCh: make(chan error, 1), + } + + // Send all our chunks until we get an error. + var err error + for { + if err = cp.sendChunk(); err != nil { + break + } + } + // If the error is not EOF, then we have a problem. + if err != nil && !errors.Is(err, io.EOF) { + cp.wg.Wait() + return nil, err + } + + // Close out our upload. + if err := cp.close(); err != nil { + return nil, err + } + + return cp.result, nil +} + +// copier streams a file via chunks in parallel from a reader representing a file. +// Do not use directly, instead use copyFromReader(). +type copier struct { + // ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case, + // the copier has the lifetime of a function call, so its fine. + ctx context.Context + cancel context.CancelFunc + + // o contains our options for uploading. + o UploadStreamToBlockBlobOptions + + // id provides the ids for each chunk. + id *id + + // reader is the source to be written to storage. + reader io.Reader + // to is the location we are writing our chunks to. + to blockWriter + + // errCh is used to hold the first error from our concurrent writers. + errCh chan error + // wg provides a count of how many writers we are waiting to finish. + wg sync.WaitGroup + + // result holds the final result from blob storage after we have submitted all chunks. + result *BlockBlobCommitBlockListResponse +} + +type copierChunk struct { + buffer []byte + id string + length int +} + +// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error +// it returns that error. Otherwise it is nil. getErr supports only returning an error once per copier. +func (c *copier) getErr() error { + select { + case err := <-c.errCh: + return err + default: + } + return c.ctx.Err() +} + +// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel. +// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF. +func (c *copier) sendChunk() error { + if err := c.getErr(); err != nil { + return err + } + + buffer := c.o.TransferManager.Get() + if len(buffer) == 0 { + return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager") + } + + n, err := io.ReadFull(c.reader, buffer) + if n > 0 { + // Some data was read, schedule the write. + id := c.id.next() + c.wg.Add(1) + c.o.TransferManager.Run( + func() { + defer c.wg.Done() + c.write(copierChunk{buffer: buffer, id: id, length: n}) + }, + ) + } else { + // Return the unused buffer to the manager. + c.o.TransferManager.Put(buffer) + } + + if err == nil { + return nil + } else if err == io.EOF || err == io.ErrUnexpectedEOF { + return io.EOF + } + + if cerr := c.getErr(); cerr != nil { + return cerr + } + + return err +} + +// write uploads a chunk to blob storage. +func (c *copier) write(chunk copierChunk) { + defer c.o.TransferManager.Put(chunk.buffer) + + if err := c.ctx.Err(); err != nil { + return + } + + _, err := c.to.StageBlock(c.ctx, chunk.id, bytes.NewReader(chunk.buffer[:chunk.length]), c.o.AccessConditions.LeaseAccessConditions, nil, c.o.ClientProvidedKeyOptions) + if err != nil { + c.errCh <- fmt.Errorf("write error: %w", err) + return + } +} + +// close commits our blocks to blob storage and closes our writer. +func (c *copier) close() error { + c.wg.Wait() + + if err := c.getErr(); err != nil { + return err + } + + var err error + c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), c.o.BlobHTTPHeaders, c.o.Metadata, c.o.AccessConditions, c.o.BlobAccessTier, c.o.BlobTagsMap, c.o.ClientProvidedKeyOptions, c.o.ImmutabilityPolicyOptions) + return err +} + +// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments. +type id struct { + u [64]byte + num uint32 + all []string +} + +// newID constructs a new id. +func newID() *id { + uu := guuid.New() + u := [64]byte{} + copy(u[:], uu[:]) + return &id{u: u} +} + +// next returns the next ID. +func (id *id) next() string { + defer atomic.AddUint32(&id.num, 1) + + binary.BigEndian.PutUint32(id.u[len(guuid.UUID{}):], atomic.LoadUint32(&id.num)) + str := base64.StdEncoding.EncodeToString(id.u[:]) + id.all = append(id.all, str) + + return str +} + +// issued returns all ids that have been issued. This returned value shares the internal slice so it is not safe to modify the return. +// The value is only valid until the next time next() is called. +func (id *id) issued() []string { + return id.all +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go new file mode 100644 index 00000000000..18c3c2655da --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/common_utils.go @@ -0,0 +1 @@ +package azblob diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go new file mode 100644 index 00000000000..1a6da586fd5 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/highlevel.go @@ -0,0 +1,569 @@ +package azblob + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "os" + "sync" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// CommonResponse returns the headers common to all blob REST API responses. +type CommonResponse interface { + // ETag returns the value for header ETag. + ETag() ETag + + // LastModified returns the value for header Last-Modified. + LastModified() time.Time + + // RequestID returns the value for header x-ms-request-id. + RequestID() string + + // Date returns the value for header Date. + Date() time.Time + + // Version returns the value for header x-ms-version. + Version() string + + // Response returns the raw HTTP response object. + Response() *http.Response +} + +// UploadToBlockBlobOptions identifies options used by the UploadBufferToBlockBlob and UploadFileToBlockBlob functions. +type UploadToBlockBlobOptions struct { + // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobURL. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress pipeline.ProgressReceiver + + // BlobHTTPHeaders indicates the HTTP headers to be associated with the blob. + BlobHTTPHeaders BlobHTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata Metadata + + // AccessConditions indicates the access conditions for the block blob. + AccessConditions BlobAccessConditions + + // BlobAccessTier indicates the tier of blob + BlobAccessTier AccessTierType + + // BlobTagsMap + BlobTagsMap BlobTagsMap + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + ClientProvidedKeyOptions ClientProvidedKeyOptions + + // ImmutabilityPolicyOptions indicates a immutability policy or legal hold to be placed upon finishing upload. + // A container with object-level immutability enabled is required. + ImmutabilityPolicyOptions ImmutabilityPolicyOptions + + // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) + Parallelism uint16 +} + +// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob. +func uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + if o.BlockSize == 0 { + // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error + if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { + return nil, errors.New("buffer is too large to upload to a block blob") + } + // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if readerSize <= BlockBlobMaxUploadBlobBytes { + o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified + } else { + o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks + if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = BlobDefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize). + } + } + + if readerSize <= BlockBlobMaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + if o.Progress != nil { + body = pipeline.NewRequestBodyProgress(body, o.Progress) + } + return blockBlobURL.Upload(ctx, body, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions, o.ImmutabilityPolicyOptions) + } + + var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := DoBatchTransfer(ctx, BatchTransferOptions{ + OperationName: "uploadReaderAtToBlockBlob", + TransferSize: readerSize, + ChunkSize: o.BlockSize, + Parallelism: o.Parallelism, + Operation: func(offset int64, count int64, ctx context.Context) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = pipeline.NewRequestBodyProgress(body, + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets a progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + blockIDList[blockNum] = base64.StdEncoding.EncodeToString(newUUID().bytes()) + _, err := blockBlobURL.StageBlock(ctx, blockIDList[blockNum], body, o.AccessConditions.LeaseAccessConditions, nil, o.ClientProvidedKeyOptions) + return err + }, + }) + if err != nil { + return nil, err + } + // All put blocks were successful, call Put Block List to finalize the blob + return blockBlobURL.CommitBlockList(ctx, blockIDList, o.BlobHTTPHeaders, o.Metadata, o.AccessConditions, o.BlobAccessTier, o.BlobTagsMap, o.ClientProvidedKeyOptions, o.ImmutabilityPolicyOptions) +} + +// UploadBufferToBlockBlob uploads a buffer in blocks to a block blob. +func UploadBufferToBlockBlob(ctx context.Context, b []byte, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + return uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), blockBlobURL, o) +} + +// UploadFileToBlockBlob uploads a file in blocks to a block blob. +func UploadFileToBlockBlob(ctx context.Context, file *os.File, + blockBlobURL BlockBlobURL, o UploadToBlockBlobOptions) (CommonResponse, error) { + + stat, err := file.Stat() + if err != nil { + return nil, err + } + return uploadReaderAtToBlockBlob(ctx, file, stat.Size(), blockBlobURL, o) +} + +/////////////////////////////////////////////////////////////////////////////// + +const BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB + +// DownloadFromBlobOptions identifies options used by the DownloadBlobToBuffer and DownloadBlobToFile functions. +type DownloadFromBlobOptions struct { + // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress pipeline.ProgressReceiver + + // AccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + AccessConditions BlobAccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + ClientProvidedKeyOptions ClientProvidedKeyOptions + + // Parallelism indicates the maximum number of blocks to download in parallel (0=default) + Parallelism uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +// downloadBlobToWriterAt downloads an Azure blob to a buffer with parallel. +func downloadBlobToWriterAt(ctx context.Context, blobURL BlobURL, offset int64, count int64, + writer io.WriterAt, o DownloadFromBlobOptions, initialDownloadResponse *DownloadResponse) error { + if o.BlockSize == 0 { + o.BlockSize = BlobDefaultDownloadBlockSize + } + + if count == CountToEnd { // If size not specified, calculate it + if initialDownloadResponse != nil { + count = initialDownloadResponse.ContentLength() - offset // if we have the length, use it + } else { + // If we don't have the length at all, get it + dr, err := blobURL.Download(ctx, 0, CountToEnd, o.AccessConditions, false, o.ClientProvidedKeyOptions) + if err != nil { + return err + } + count = dr.ContentLength() - offset + } + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := DoBatchTransfer(ctx, BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + Parallelism: o.Parallelism, + Operation: func(chunkStart int64, count int64, ctx context.Context) error { + dr, err := blobURL.Download(ctx, chunkStart+offset, count, o.AccessConditions, false, o.ClientProvidedKeyOptions) + if err != nil { + return err + } + body := dr.Body(o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = pipeline.NewResponseBodyProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(newSectionWriter(writer, chunkStart, count), body) + body.Close() + return err + }, + }) + if err != nil { + return err + } + return nil +} + +// DownloadBlobToBuffer downloads an Azure blob to a buffer with parallel. +// Offset and count are optional, pass 0 for both to download the entire blob. +func DownloadBlobToBuffer(ctx context.Context, blobURL BlobURL, offset int64, count int64, + b []byte, o DownloadFromBlobOptions) error { + return downloadBlobToWriterAt(ctx, blobURL, offset, count, newBytesWriter(b), o, nil) +} + +// DownloadBlobToFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +// Offset and count are optional, pass 0 for both to download the entire blob. +func DownloadBlobToFile(ctx context.Context, blobURL BlobURL, offset int64, count int64, + file *os.File, o DownloadFromBlobOptions) error { + // 1. Calculate the size of the destination file + var size int64 + + if count == CountToEnd { + // Try to get Azure blob's size + props, err := blobURL.GetProperties(ctx, o.AccessConditions, o.ClientProvidedKeyOptions) + if err != nil { + return err + } + size = props.ContentLength() - offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. + stat, err := file.Stat() + if err != nil { + return err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return err + } + } + + if size > 0 { + return downloadBlobToWriterAt(ctx, blobURL, offset, size, file, o, nil) + } else { // if the blob's size is 0, there is no need in downloading it + return nil + } +} + +/////////////////////////////////////////////////////////////////////////////// + +// BatchTransferOptions identifies options used by DoBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + Parallelism uint16 + Operation func(offset int64, chunkSize int64, ctx context.Context) error + OperationName string +} + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Parallelism == 0 { + o.Parallelism = 5 // default Parallelism + } + + // Prepare and do parallel operations. + numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) + operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Parallelism; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + + operationChannel <- func() error { + return o.Operation(offset, curChunkSize, ctx) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} + +//////////////////////////////////////////////////////////////////////////////////////////////// + +// TransferManager provides a buffer and thread pool manager for certain transfer options. +// It is undefined behavior if code outside of this package call any of these methods. +type TransferManager interface { + // Get provides a buffer that will be used to read data into and write out to the stream. + // It is guaranteed by this package to not read or write beyond the size of the slice. + Get() []byte + // Put may or may not put the buffer into underlying storage, depending on settings. + // The buffer must not be touched after this has been called. + Put(b []byte) + // Run will use a goroutine pool entry to run a function. This blocks until a pool + // goroutine becomes available. + Run(func()) + // Closes shuts down all internal goroutines. This must be called when the TransferManager + // will no longer be used. Not closing it will cause a goroutine leak. + Close() +} + +type staticBuffer struct { + buffers chan []byte + size int + threadpool chan func() +} + +// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer +// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This +// can be shared between calls if you wish to control maximum memory and concurrency with +// multiple concurrent calls. +func NewStaticBuffer(size, max int) (TransferManager, error) { + if size < 1 || max < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), max) + buffers := make(chan []byte, max) + for i := 0; i < max; i++ { + go func() { + for f := range threadpool { + f() + } + }() + + buffers <- make([]byte, size) + } + return staticBuffer{ + buffers: buffers, + size: size, + threadpool: threadpool, + }, nil +} + +// Get implements TransferManager.Get(). +func (s staticBuffer) Get() []byte { + return <-s.buffers +} + +// Put implements TransferManager.Put(). +func (s staticBuffer) Put(b []byte) { + select { + case s.buffers <- b: + default: // This shouldn't happen, but just in case they call Put() with there own buffer. + } +} + +// Run implements TransferManager.Run(). +func (s staticBuffer) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s staticBuffer) Close() { + close(s.threadpool) + close(s.buffers) +} + +type syncPool struct { + threadpool chan func() + pool sync.Pool +} + +// NewSyncPool creates a TransferManager that will use a sync.Pool +// that can hold a non-capped number of buffers constrained by concurrency. This +// can be shared between calls if you wish to share memory and concurrency. +func NewSyncPool(size, concurrency int) (TransferManager, error) { + if size < 1 || concurrency < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for f := range threadpool { + f() + } + }() + } + + return &syncPool{ + threadpool: threadpool, + pool: sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + }, + }, nil +} + +// Get implements TransferManager.Get(). +func (s *syncPool) Get() []byte { + return s.pool.Get().([]byte) +} + +// Put implements TransferManager.Put(). +func (s *syncPool) Put(b []byte) { + s.pool.Put(b) +} + +// Run implements TransferManager.Run(). +func (s *syncPool) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s *syncPool) Close() { + close(s.threadpool) +} + +const _1MiB = 1024 * 1024 + +// UploadStreamToBlockBlobOptions is options for UploadStreamToBlockBlob. +type UploadStreamToBlockBlobOptions struct { + // TransferManager provides a TransferManager that controls buffer allocation/reuse and + // concurrency. This overrides BufferSize and MaxBuffers if set. + TransferManager TransferManager + transferMangerNotSet bool + // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB. + BufferSize int + // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file. + MaxBuffers int + BlobHTTPHeaders BlobHTTPHeaders + Metadata Metadata + AccessConditions BlobAccessConditions + BlobAccessTier AccessTierType + BlobTagsMap BlobTagsMap + ClientProvidedKeyOptions ClientProvidedKeyOptions + ImmutabilityPolicyOptions ImmutabilityPolicyOptions +} + +func (u *UploadStreamToBlockBlobOptions) defaults() error { + if u.TransferManager != nil { + return nil + } + + if u.MaxBuffers == 0 { + u.MaxBuffers = 1 + } + + if u.BufferSize < _1MiB { + u.BufferSize = _1MiB + } + + var err error + u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers) + if err != nil { + return fmt.Errorf("bug: default transfer manager could not be created: %s", err) + } + u.transferMangerNotSet = true + return nil +} + +// UploadStreamToBlockBlob copies the file held in io.Reader to the Blob at blockBlobURL. +// A Context deadline or cancellation will cause this to error. +func UploadStreamToBlockBlob(ctx context.Context, reader io.Reader, blockBlobURL BlockBlobURL, o UploadStreamToBlockBlobOptions) (CommonResponse, error) { + if err := o.defaults(); err != nil { + return nil, err + } + + // If we used the default manager, we need to close it. + if o.transferMangerNotSet { + defer o.TransferManager.Close() + } + + result, err := copyFromReader(ctx, reader, blockBlobURL, o) + if err != nil { + return nil, err + } + + return result, nil +} + +// UploadStreamOptions (defunct) was used internally. This will be removed or made private in a future version. +// TODO: Remove on next minor release in v0 or before v1. +type UploadStreamOptions struct { + BufferSize int + MaxBuffers int +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go new file mode 100644 index 00000000000..ff055865c8a --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/parsing_urls.go @@ -0,0 +1,173 @@ +package azblob + +import ( + "net" + "net/url" + "strings" +) + +const ( + snapshot = "snapshot" + versionId = "versionid" + SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" +) + +// A BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an +// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type BlobURLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo + ContainerName string // "" if no container + BlobName string // "" if no blob + Snapshot string // "" if not a snapshot + SAS SASQueryParameters + UnparsedParams string + VersionID string // "" if not versioning enabled +} + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func isIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} + +// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. +func NewBlobURLParts(u url.URL) BlobURLParts { + up := BlobURLParts{ + Scheme: u.Scheme, + Host: u.Host, + } + + // Find the container & blob names (if any) + if u.Path != "" { + path := u.Path + if path[0] == '/' { + path = path[1:] // If path starts with a slash, remove it + } + if isIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob + up.IPEndpointStyleInfo.AccountName = path + path = "" // No ContainerName present in the URL so path should be empty + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names) + } + } + + containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) + if containerEndIndex == -1 { // Slash not found; path has container name & no blob name + up.ContainerName = path + } else { + up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes + up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := u.Query() + + up.Snapshot = "" // Assume no snapshot + up.VersionID = "" // Assume no versionID + if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { + up.Snapshot = snapshotStr[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, snapshot) + } + + if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok { + up.VersionID = versionIDs[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, versionId) // delete "versionid" from paramsMap + delete(paramsMap, "versionId") // delete "versionId" from paramsMap + } + up.SAS = newSASQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up +} + +type caseInsensitiveValues url.Values // map[string][]string +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} + +// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up BlobURLParts) URL() url.URL { + path := "" + if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate container & blob names (if they exist) + if up.ContainerName != "" { + path += "/" + up.ContainerName + if up.BlobName != "" { + path += "/" + up.BlobName + } + } + + rawQuery := up.UnparsedParams + + //If no snapshot is initially provided, fill it in from the SAS query properties to help the user + if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { + up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) + } + + // Concatenate blob snapshot query parameter (if it exists) + if up.Snapshot != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += snapshot + "=" + up.Snapshot + } + + // Concatenate blob version id query parameter (if it exists) + if up.VersionID != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += versionId + "=" + up.VersionID + } + + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go new file mode 100644 index 00000000000..1cb90dc95df --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/request_common.go @@ -0,0 +1,56 @@ +package azblob + +import ( + "time" +) + +// ClientProvidedKeyOptions contains headers which may be be specified from service version 2019-02-02 +// or higher to encrypts the data on the service-side with the given key. Use of customer-provided keys +// must be done over HTTPS. As the encryption key itself is provided in the request, a secure connection +// must be established to transfer the key. +// Note: Azure Storage does not store or manage customer provided encryption keys. Keys are securely discarded +// as soon as possible after they’ve been used to encrypt or decrypt the blob data. +// https://docs.microsoft.com/en-us/azure/storage/common/storage-service-encryption +// https://docs.microsoft.com/en-us/azure/storage/common/customer-managed-keys-overview +type ClientProvidedKeyOptions struct { + // A Base64-encoded AES-256 encryption key value. + EncryptionKey *string + + // The Base64-encoded SHA256 of the encryption key. + EncryptionKeySha256 *string + + // Specifies the algorithm to use when encrypting data using the given key. Must be AES256. + EncryptionAlgorithm EncryptionAlgorithmType + + // Specifies the name of the encryption scope to use to encrypt the data provided in the request + // https://docs.microsoft.com/en-us/azure/storage/blobs/encryption-scope-overview + // https://docs.microsoft.com/en-us/azure/key-vault/general/overview + EncryptionScope *string +} + +// NewClientProvidedKeyOptions function. +// By default the value of encryption algorithm params is "AES256" for service version 2019-02-02 or higher. +func NewClientProvidedKeyOptions(ek *string, eksha256 *string, es *string) (cpk ClientProvidedKeyOptions) { + cpk = ClientProvidedKeyOptions{} + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, cpk.EncryptionScope = ek, eksha256, EncryptionAlgorithmAES256, es + return cpk +} + +type ImmutabilityPolicyOptions struct { + // A container with object-level immutability enabled is required for any options. + // Both ImmutabilityPolicy options must be filled to set an immutability policy. + ImmutabilityPolicyUntilDate *time.Time + ImmutabilityPolicyMode BlobImmutabilityPolicyModeType + + LegalHold *bool +} + +func NewImmutabilityPolicyOptions(untilDate *time.Time, policyMode BlobImmutabilityPolicyModeType, legalHold *bool) ImmutabilityPolicyOptions { + opt := ImmutabilityPolicyOptions{} + opt.ImmutabilityPolicyUntilDate, opt.ImmutabilityPolicyMode, opt.LegalHold = untilDate, policyMode, legalHold + return opt +} + +func (pol *ImmutabilityPolicyOptions) pointers() (*time.Time, BlobImmutabilityPolicyModeType, *bool) { + return pol.ImmutabilityPolicyUntilDate, pol.ImmutabilityPolicyMode, pol.LegalHold +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go new file mode 100644 index 00000000000..2719b7366a7 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/sas_service.go @@ -0,0 +1,371 @@ +package azblob + +import ( + "bytes" + "fmt" + "strings" + "time" +) + +// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +type BlobSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + SnapshotTime time.Time + Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ContainerName string + BlobName string // Use "" to create a Container SAS + Directory string // Not nil for a directory SAS (ie sr=d) + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct + BlobVersion string // sr=bv + PreauthorizedAgentObjectId string + AgentObjectId string + CorrelationId string +} + +func getDirectoryDepth(path string) string { + if path == "" { + return "" + } + return fmt.Sprint(strings.Count(path, "/") + 1) +} + +// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce +// the proper SAS query parameters. +// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential +func (v BlobSASSignatureValues) NewSASQueryParameters(credential StorageAccountCredential) (SASQueryParameters, error) { + resource := "c" + if credential == nil { + return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without StorageAccountCredential") + } + + if !v.SnapshotTime.IsZero() { + resource = "bs" + //Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.BlobVersion != "" { + resource = "bv" + //Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.BlobName == "" { + // Make sure the permission characters are in the correct order + perms := &ContainerSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else { + resource = "b" + // Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } + if v.Version == "" { + v.Version = SASVersion + } + startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + signedIdentifier := v.Identifier + + udk := credential.getUDKParams() + + if udk != nil { + udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{}) + //I don't like this answer to combining the functions + //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. + signedIdentifier = strings.Join([]string{ + udk.SignedOid, + udk.SignedTid, + udkStart, + udkExpiry, + udk.SignedService, + udk.SignedVersion, + v.PreauthorizedAgentObjectId, + v.AgentObjectId, + v.CorrelationId, + }, "\n") + } + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(credential.AccountName(), v.ContainerName, v.BlobName, v.Directory), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature := "" + signature = credential.ComputeHMACSHA256(stringToSign) + + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + preauthorizedAgentObjectId: v.PreauthorizedAgentObjectId, + agentObjectId: v.AgentObjectId, + correlationId: v.CorrelationId, + // Calculated SAS signature + signature: signature, + } + + //User delegation SAS specific parameters + if udk != nil { + p.signedOid = udk.SignedOid + p.signedTid = udk.SignedTid + p.signedStart = udk.SignedStart + p.signedExpiry = udk.SignedExpiry + p.signedService = udk.SignedService + p.signedVersion = udk.SignedVersion + } + + return p, nil +} + +// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. +func getCanonicalName(account string, containerName string, blobName string, directoryName string) string { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + elements := []string{"/blob/", account, "/", containerName} + if blobName != "" { + elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + } else if directoryName != "" { + elements = append(elements, "/", directoryName) + } + return strings.Join(elements, "") +} + +// The ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob +type ContainerSASPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag, Immutability bool + Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only +} + +// String produces the SAS permissions string for an Azure Storage container. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p ContainerSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.List { + b.WriteRune('l') + } + if p.Tag { + b.WriteRune('t') + } + if p.Execute { + b.WriteRune('e') + } + if p.ModifyOwnership { + b.WriteRune('o') + } + if p.ModifyPermissions { + b.WriteRune('p') + } + if p.Immutability { + b.WriteRune('i') + } + return b.String() +} + +// Parse initializes the ContainerSASPermissions's fields from a string. +func (p *ContainerSASPermissions) Parse(s string) error { + *p = ContainerSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'l': + p.List = true + case 't': + p.Tag = true + case 'e': + p.Execute = true + case 'o': + p.ModifyOwnership = true + case 'p': + p.ModifyPermissions = true + case 'i': + p.Immutability = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} + +// The BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +type BlobSASPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions, PermanentDelete, Immutability bool +} + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p BlobSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.Tag { + b.WriteRune('t') + } + if p.List { + b.WriteRune('l') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.Ownership { + b.WriteRune('o') + } + if p.Permissions { + b.WriteRune('p') + } + if p.PermanentDelete { + b.WriteRune('y') + } + if p.Immutability { + b.WriteRune('i') + } + return b.String() +} + +// Parse initializes the BlobSASPermissions's fields from a string. +func (p *BlobSASPermissions) Parse(s string) error { + *p = BlobSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 't': + p.Tag = true + case 'l': + p.List = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.Ownership = true + case 'p': + p.Permissions = true + case 'y': + p.PermanentDelete = true + case 'i': + p.Immutability = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go new file mode 100644 index 00000000000..6d86f6eb9df --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/section_writer.go @@ -0,0 +1,47 @@ +package azblob + +import ( + "errors" + "io" +) + +type sectionWriter struct { + count int64 + offset int64 + position int64 + writerAt io.WriterAt +} + +func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter { + return §ionWriter{ + count: count, + offset: off, + writerAt: c, + } +} + +func (c *sectionWriter) Write(p []byte) (int, error) { + remaining := c.count - c.position + + if remaining <= 0 { + return 0, errors.New("End of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.writerAt.WriteAt(slice, c.offset+c.position) + c.position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("Not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go new file mode 100644 index 00000000000..292710cc349 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/service_codes_blob.go @@ -0,0 +1,198 @@ +package azblob + +// https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes + +// ServiceCode values indicate a service failure. +const ( + // ServiceCodeAppendPositionConditionNotMet means the append position condition specified was not met. + ServiceCodeAppendPositionConditionNotMet ServiceCodeType = "AppendPositionConditionNotMet" + + // ServiceCodeBlobAlreadyExists means the specified blob already exists. + ServiceCodeBlobAlreadyExists ServiceCodeType = "BlobAlreadyExists" + + // ServiceCodeBlobNotFound means the specified blob does not exist. + ServiceCodeBlobNotFound ServiceCodeType = "BlobNotFound" + + // ServiceCodeBlobOverwritten means the blob has been recreated since the previous snapshot was taken. + ServiceCodeBlobOverwritten ServiceCodeType = "BlobOverwritten" + + // ServiceCodeBlobTierInadequateForContentLength means the specified blob tier size limit cannot be less than content length. + ServiceCodeBlobTierInadequateForContentLength ServiceCodeType = "BlobTierInadequateForContentLength" + + // ServiceCodeBlockCountExceedsLimit means the committed block count cannot exceed the maximum limit of 50,000 blocks + // or that the uncommitted block count cannot exceed the maximum limit of 100,000 blocks. + ServiceCodeBlockCountExceedsLimit ServiceCodeType = "BlockCountExceedsLimit" + + // ServiceCodeBlockListTooLong means the block list may not contain more than 50,000 blocks. + ServiceCodeBlockListTooLong ServiceCodeType = "BlockListTooLong" + + // ServiceCodeCannotChangeToLowerTier means that a higher blob tier has already been explicitly set. + ServiceCodeCannotChangeToLowerTier ServiceCodeType = "CannotChangeToLowerTier" + + // ServiceCodeCannotVerifyCopySource means that the service could not verify the copy source within the specified time. + // Examine the HTTP status code and message for more information about the failure. + ServiceCodeCannotVerifyCopySource ServiceCodeType = "CannotVerifyCopySource" + + // ServiceCodeContainerAlreadyExists means the specified container already exists. + ServiceCodeContainerAlreadyExists ServiceCodeType = "ContainerAlreadyExists" + + // ServiceCodeContainerBeingDeleted means the specified container is being deleted. + ServiceCodeContainerBeingDeleted ServiceCodeType = "ContainerBeingDeleted" + + // ServiceCodeContainerDisabled means the specified container has been disabled by the administrator. + ServiceCodeContainerDisabled ServiceCodeType = "ContainerDisabled" + + // ServiceCodeContainerNotFound means the specified container does not exist. + ServiceCodeContainerNotFound ServiceCodeType = "ContainerNotFound" + + // ServiceCodeContentLengthLargerThanTierLimit means the blob's content length cannot exceed its tier limit. + ServiceCodeContentLengthLargerThanTierLimit ServiceCodeType = "ContentLengthLargerThanTierLimit" + + // ServiceCodeCopyAcrossAccountsNotSupported means the copy source account and destination account must be the same. + ServiceCodeCopyAcrossAccountsNotSupported ServiceCodeType = "CopyAcrossAccountsNotSupported" + + // ServiceCodeCopyIDMismatch means the specified copy ID did not match the copy ID for the pending copy operation. + ServiceCodeCopyIDMismatch ServiceCodeType = "CopyIdMismatch" + + // ServiceCodeFeatureVersionMismatch means the type of blob in the container is unrecognized by this version or + // that the operation for AppendBlob requires at least version 2015-02-21. + ServiceCodeFeatureVersionMismatch ServiceCodeType = "FeatureVersionMismatch" + + // ServiceCodeIncrementalCopyBlobMismatch means the specified source blob is different than the copy source of the existing incremental copy blob. + ServiceCodeIncrementalCopyBlobMismatch ServiceCodeType = "IncrementalCopyBlobMismatch" + + // ServiceCodeFeatureEncryptionMismatch means the given customer specified encryption does not match the encryption used to encrypt the blob. + ServiceCodeFeatureEncryptionMismatch ServiceCodeType = "BlobCustomerSpecifiedEncryptionMismatch" + + // ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed means the specified snapshot is earlier than the last snapshot copied into the incremental copy blob. + ServiceCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed ServiceCodeType = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed" + + // ServiceCodeIncrementalCopySourceMustBeSnapshot means the source for incremental copy request must be a snapshot. + ServiceCodeIncrementalCopySourceMustBeSnapshot ServiceCodeType = "IncrementalCopySourceMustBeSnapshot" + + // ServiceCodeInfiniteLeaseDurationRequired means the lease ID matched, but the specified lease must be an infinite-duration lease. + ServiceCodeInfiniteLeaseDurationRequired ServiceCodeType = "InfiniteLeaseDurationRequired" + + // ServiceCodeInvalidBlobOrBlock means the specified blob or block content is invalid. + ServiceCodeInvalidBlobOrBlock ServiceCodeType = "InvalidBlobOrBlock" + + // ServiceCodeInvalidBlobType means the blob type is invalid for this operation. + ServiceCodeInvalidBlobType ServiceCodeType = "InvalidBlobType" + + // ServiceCodeInvalidBlockID means the specified block ID is invalid. The block ID must be Base64-encoded. + ServiceCodeInvalidBlockID ServiceCodeType = "InvalidBlockId" + + // ServiceCodeInvalidBlockList means the specified block list is invalid. + ServiceCodeInvalidBlockList ServiceCodeType = "InvalidBlockList" + + // ServiceCodeInvalidOperation means an invalid operation against a blob snapshot. + ServiceCodeInvalidOperation ServiceCodeType = "InvalidOperation" + + // ServiceCodeInvalidPageRange means the page range specified is invalid. + ServiceCodeInvalidPageRange ServiceCodeType = "InvalidPageRange" + + // ServiceCodeInvalidSourceBlobType means the copy source blob type is invalid for this operation. + ServiceCodeInvalidSourceBlobType ServiceCodeType = "InvalidSourceBlobType" + + // ServiceCodeInvalidSourceBlobURL means the source URL for incremental copy request must be valid Azure Storage blob URL. + ServiceCodeInvalidSourceBlobURL ServiceCodeType = "InvalidSourceBlobUrl" + + // ServiceCodeInvalidVersionForPageBlobOperation means that all operations on page blobs require at least version 2009-09-19. + ServiceCodeInvalidVersionForPageBlobOperation ServiceCodeType = "InvalidVersionForPageBlobOperation" + + // ServiceCodeLeaseAlreadyPresent means there is already a lease present. + ServiceCodeLeaseAlreadyPresent ServiceCodeType = "LeaseAlreadyPresent" + + // ServiceCodeLeaseAlreadyBroken means the lease has already been broken and cannot be broken again. + ServiceCodeLeaseAlreadyBroken ServiceCodeType = "LeaseAlreadyBroken" + + // ServiceCodeLeaseIDMismatchWithBlobOperation means the lease ID specified did not match the lease ID for the blob. + ServiceCodeLeaseIDMismatchWithBlobOperation ServiceCodeType = "LeaseIdMismatchWithBlobOperation" + + // ServiceCodeLeaseIDMismatchWithContainerOperation means the lease ID specified did not match the lease ID for the container. + ServiceCodeLeaseIDMismatchWithContainerOperation ServiceCodeType = "LeaseIdMismatchWithContainerOperation" + + // ServiceCodeLeaseIDMismatchWithLeaseOperation means the lease ID specified did not match the lease ID for the blob/container. + ServiceCodeLeaseIDMismatchWithLeaseOperation ServiceCodeType = "LeaseIdMismatchWithLeaseOperation" + + // ServiceCodeLeaseIDMissing means there is currently a lease on the blob/container and no lease ID was specified in the request. + ServiceCodeLeaseIDMissing ServiceCodeType = "LeaseIdMissing" + + // ServiceCodeLeaseIsBreakingAndCannotBeAcquired means the lease ID matched, but the lease is currently in breaking state and cannot be acquired until it is broken. + ServiceCodeLeaseIsBreakingAndCannotBeAcquired ServiceCodeType = "LeaseIsBreakingAndCannotBeAcquired" + + // ServiceCodeLeaseIsBreakingAndCannotBeChanged means the lease ID matched, but the lease is currently in breaking state and cannot be changed. + ServiceCodeLeaseIsBreakingAndCannotBeChanged ServiceCodeType = "LeaseIsBreakingAndCannotBeChanged" + + // ServiceCodeLeaseIsBrokenAndCannotBeRenewed means the lease ID matched, but the lease has been broken explicitly and cannot be renewed. + ServiceCodeLeaseIsBrokenAndCannotBeRenewed ServiceCodeType = "LeaseIsBrokenAndCannotBeRenewed" + + // ServiceCodeLeaseLost means a lease ID was specified, but the lease for the blob/container has expired. + ServiceCodeLeaseLost ServiceCodeType = "LeaseLost" + + // ServiceCodeLeaseNotPresentWithBlobOperation means there is currently no lease on the blob. + ServiceCodeLeaseNotPresentWithBlobOperation ServiceCodeType = "LeaseNotPresentWithBlobOperation" + + // ServiceCodeLeaseNotPresentWithContainerOperation means there is currently no lease on the container. + ServiceCodeLeaseNotPresentWithContainerOperation ServiceCodeType = "LeaseNotPresentWithContainerOperation" + + // ServiceCodeLeaseNotPresentWithLeaseOperation means there is currently no lease on the blob/container. + ServiceCodeLeaseNotPresentWithLeaseOperation ServiceCodeType = "LeaseNotPresentWithLeaseOperation" + + // ServiceCodeMaxBlobSizeConditionNotMet means the max blob size condition specified was not met. + ServiceCodeMaxBlobSizeConditionNotMet ServiceCodeType = "MaxBlobSizeConditionNotMet" + + // ServiceCodeNoPendingCopyOperation means there is currently no pending copy operation. + ServiceCodeNoPendingCopyOperation ServiceCodeType = "NoPendingCopyOperation" + + // ServiceCodeOperationNotAllowedOnIncrementalCopyBlob means the specified operation is not allowed on an incremental copy blob. + ServiceCodeOperationNotAllowedOnIncrementalCopyBlob ServiceCodeType = "OperationNotAllowedOnIncrementalCopyBlob" + + // ServiceCodePendingCopyOperation means there is currently a pending copy operation. + ServiceCodePendingCopyOperation ServiceCodeType = "PendingCopyOperation" + + // ServiceCodePreviousSnapshotCannotBeNewer means the prevsnapshot query parameter value cannot be newer than snapshot query parameter value. + ServiceCodePreviousSnapshotCannotBeNewer ServiceCodeType = "PreviousSnapshotCannotBeNewer" + + // ServiceCodePreviousSnapshotNotFound means the previous snapshot is not found. + ServiceCodePreviousSnapshotNotFound ServiceCodeType = "PreviousSnapshotNotFound" + + // ServiceCodePreviousSnapshotOperationNotSupported means that differential Get Page Ranges is not supported on the previous snapshot. + ServiceCodePreviousSnapshotOperationNotSupported ServiceCodeType = "PreviousSnapshotOperationNotSupported" + + // ServiceCodeSequenceNumberConditionNotMet means the sequence number condition specified was not met. + ServiceCodeSequenceNumberConditionNotMet ServiceCodeType = "SequenceNumberConditionNotMet" + + // ServiceCodeSequenceNumberIncrementTooLarge means the sequence number increment cannot be performed because it would result in overflow of the sequence number. + ServiceCodeSequenceNumberIncrementTooLarge ServiceCodeType = "SequenceNumberIncrementTooLarge" + + // ServiceCodeSnapshotCountExceeded means the snapshot count against this blob has been exceeded. + ServiceCodeSnapshotCountExceeded ServiceCodeType = "SnapshotCountExceeded" + + // ServiceCodeSnaphotOperationRateExceeded means the rate of snapshot operations against this blob has been exceeded. + ServiceCodeSnaphotOperationRateExceeded ServiceCodeType = "SnaphotOperationRateExceeded" + + // ServiceCodeSnapshotsPresent means this operation is not permitted while the blob has snapshots. + ServiceCodeSnapshotsPresent ServiceCodeType = "SnapshotsPresent" + + // ServiceCodeSourceConditionNotMet means the source condition specified using HTTP conditional header(s) is not met. + ServiceCodeSourceConditionNotMet ServiceCodeType = "SourceConditionNotMet" + + // ServiceCodeSystemInUse means this blob is in use by the system. + ServiceCodeSystemInUse ServiceCodeType = "SystemInUse" + + // ServiceCodeTargetConditionNotMet means the target condition specified using HTTP conditional header(s) is not met. + ServiceCodeTargetConditionNotMet ServiceCodeType = "TargetConditionNotMet" + + // ServiceCodeUnauthorizedBlobOverwrite means this request is not authorized to perform blob overwrites. + ServiceCodeUnauthorizedBlobOverwrite ServiceCodeType = "UnauthorizedBlobOverwrite" + + // ServiceCodeBlobBeingRehydrated means this operation is not permitted because the blob is being rehydrated. + ServiceCodeBlobBeingRehydrated ServiceCodeType = "BlobBeingRehydrated" + + // ServiceCodeBlobArchived means this operation is not permitted on an archived blob. + ServiceCodeBlobArchived ServiceCodeType = "BlobArchived" + + // ServiceCodeBlobNotArchived means this blob is currently not in the archived state. + ServiceCodeBlobNotArchived ServiceCodeType = "BlobNotArchived" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go new file mode 100644 index 00000000000..b89b18bb411 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/storage_account_credential.go @@ -0,0 +1,8 @@ +package azblob + +// StorageAccountCredential is a wrapper interface for SharedKeyCredential and UserDelegationCredential +type StorageAccountCredential interface { + AccountName() string + ComputeHMACSHA256(message string) (base64String string) + getUDKParams() *UserDelegationKey +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go new file mode 100644 index 00000000000..0fdf038cadf --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_append_blob.go @@ -0,0 +1,161 @@ +package azblob + +import ( + "context" + "io" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // AppendBlobMaxAppendBlockBytes indicates the maximum number of bytes that can be sent in a call to AppendBlock. + AppendBlobMaxAppendBlockBytes = 4 * 1024 * 1024 // 4MB + + // AppendBlobMaxBlocks indicates the maximum number of blocks allowed in an append blob. + AppendBlobMaxBlocks = 50000 +) + +// AppendBlobURL defines a set of operations applicable to append blobs. +type AppendBlobURL struct { + BlobURL + abClient appendBlobClient +} + +// NewAppendBlobURL creates an AppendBlobURL object using the specified URL and request policy pipeline. +func NewAppendBlobURL(url url.URL, p pipeline.Pipeline) AppendBlobURL { + blobClient := newBlobClient(url, p) + abClient := newAppendBlobClient(url, p) + return AppendBlobURL{BlobURL: BlobURL{blobClient: blobClient}, abClient: abClient} +} + +// WithPipeline creates a new AppendBlobURL object identical to the source but with the specific request policy pipeline. +func (ab AppendBlobURL) WithPipeline(p pipeline.Pipeline) AppendBlobURL { + return NewAppendBlobURL(ab.blobClient.URL(), p) +} + +// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab AppendBlobURL) WithSnapshot(snapshot string) AppendBlobURL { + p := NewBlobURLParts(ab.URL()) + p.Snapshot = snapshot + return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab AppendBlobURL) WithVersionID(versionId string) AppendBlobURL { + p := NewBlobURLParts(ab.URL()) + p.VersionID = versionId + return NewAppendBlobURL(p.URL(), ab.blobClient.Pipeline()) +} + +func (ab AppendBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + return ab.blobClient.GetAccountInfo(ctx) +} + +// Create creates a 0-length append blob. Call AppendBlock to append data to an append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (ab AppendBlobURL) Create(ctx context.Context, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*AppendBlobCreateResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() + return ab.abClient.Create(ctx, 0, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, + nil, // Blob ifTags + nil, + blobTagsString, // Blob tags + // immutability policy + immutabilityExpiry, immutabilityMode, legalHold, + ) +} + +// AppendBlock writes a stream to a new block of data to the end of the existing append blob. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. +func (ab AppendBlobURL) AppendBlock(ctx context.Context, body io.ReadSeeker, ac AppendBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*AppendBlobAppendBlockResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := ac.AppendPositionAccessConditions.pointers() + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, err + } + return ab.abClient.AppendBlock(ctx, body, count, nil, + transactionalMD5, + nil, // CRC + ac.LeaseAccessConditions.pointers(), + ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. +func (ab AppendBlobURL) AppendBlockFromURL(ctx context.Context, sourceURL url.URL, offset int64, count int64, destinationAccessConditions AppendBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*AppendBlobAppendBlockFromURLResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() + ifAppendPositionEqual, ifMaxSizeLessThanOrEqual := destinationAccessConditions.AppendPositionAccessConditions.pointers() + return ab.abClient.AppendBlockFromURL(ctx, sourceURL.String(), 0, httpRange{offset: offset, count: count}.pointers(), + transactionalMD5, nil, nil, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + destinationAccessConditions.LeaseAccessConditions.pointers(), + ifMaxSizeLessThanOrEqual, ifAppendPositionEqual, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil, tokenCredentialPointers(sourceAuthorization)) +} + +type AppendBlobAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions + AppendPositionAccessConditions +} + +// AppendPositionAccessConditions identifies append blob-specific access conditions which you optionally set. +type AppendPositionAccessConditions struct { + // IfAppendPositionEqual ensures that the AppendBlock operation succeeds + // only if the append position is equal to a value. + // IfAppendPositionEqual=0 means no 'IfAppendPositionEqual' header specified. + // IfAppendPositionEqual>0 means 'IfAppendPositionEqual' header specified with its value + // IfAppendPositionEqual==-1 means IfAppendPositionEqual' header specified with a value of 0 + IfAppendPositionEqual int64 + + // IfMaxSizeLessThanOrEqual ensures that the AppendBlock operation succeeds + // only if the append blob's size is less than or equal to a value. + // IfMaxSizeLessThanOrEqual=0 means no 'IfMaxSizeLessThanOrEqual' header specified. + // IfMaxSizeLessThanOrEqual>0 means 'IfMaxSizeLessThanOrEqual' header specified with its value + // IfMaxSizeLessThanOrEqual==-1 means 'IfMaxSizeLessThanOrEqual' header specified with a value of 0 + IfMaxSizeLessThanOrEqual int64 +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac AppendPositionAccessConditions) pointers() (iape *int64, imsltoe *int64) { + var zero int64 // defaults to 0 + switch ac.IfAppendPositionEqual { + case -1: + iape = &zero + case 0: + iape = nil + default: + iape = &ac.IfAppendPositionEqual + } + + switch ac.IfMaxSizeLessThanOrEqual { + case -1: + imsltoe = &zero + case 0: + imsltoe = nil + default: + imsltoe = &ac.IfMaxSizeLessThanOrEqual + } + return +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go new file mode 100644 index 00000000000..301d90825c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_blob.go @@ -0,0 +1,363 @@ +package azblob + +import ( + "context" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// A BlobURL represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type BlobURL struct { + blobClient blobClient +} + +type BlobTagsMap map[string]string + +var DefaultAccessTier = AccessTierNone +var DefaultPremiumBlobAccessTier = PremiumPageBlobAccessTierNone + +// NewBlobURL creates a BlobURL object using the specified URL and request policy pipeline. +func NewBlobURL(url url.URL, p pipeline.Pipeline) BlobURL { + blobClient := newBlobClient(url, p) + return BlobURL{blobClient: blobClient} +} + +// URL returns the URL endpoint used by the BlobURL object. +func (b BlobURL) URL() url.URL { + return b.blobClient.URL() +} + +// String returns the URL as a string. +func (b BlobURL) String() string { + u := b.URL() + return u.String() +} + +func (b BlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + return b.blobClient.GetAccountInfo(ctx) +} + +// WithPipeline creates a new BlobURL object identical to the source but with the specified request policy pipeline. +func (b BlobURL) WithPipeline(p pipeline.Pipeline) BlobURL { + return NewBlobURL(b.blobClient.URL(), p) +} + +// WithSnapshot creates a new BlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b BlobURL) WithSnapshot(snapshot string) BlobURL { + p := NewBlobURLParts(b.URL()) + p.Snapshot = snapshot + return NewBlobURL(p.URL(), b.blobClient.Pipeline()) +} + +// WithVersionID creates a new BlobURL object identical to the source but with the specified version id. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b BlobURL) WithVersionID(versionID string) BlobURL { + p := NewBlobURLParts(b.URL()) + p.VersionID = versionID + return NewBlobURL(p.URL(), b.blobClient.Pipeline()) +} + +// ToAppendBlobURL creates an AppendBlobURL using the source's URL and pipeline. +func (b BlobURL) ToAppendBlobURL() AppendBlobURL { + return NewAppendBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +// ToBlockBlobURL creates a BlockBlobURL using the source's URL and pipeline. +func (b BlobURL) ToBlockBlobURL() BlockBlobURL { + return NewBlockBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +// ToPageBlobURL creates a PageBlobURL using the source's URL and pipeline. +func (b BlobURL) ToPageBlobURL() PageBlobURL { + return NewPageBlobURL(b.URL(), b.blobClient.Pipeline()) +} + +func SerializeBlobTagsHeader(blobTagsMap BlobTagsMap) *string { + if len(blobTagsMap) == 0 { + return nil + } + tags := make([]string, 0) + for key, val := range blobTagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + //tags = tags[:len(tags)-1] + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func SerializeBlobTags(blobTagsMap BlobTagsMap) BlobTags { + if len(blobTagsMap) == 0 { + return BlobTags{} + } + blobTagSet := make([]BlobTag, 0, len(blobTagsMap)) + for key, val := range blobTagsMap { + blobTagSet = append(blobTagSet, BlobTag{Key: key, Value: val}) + } + return BlobTags{BlobTagSet: blobTagSet} +} + +// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// Passing azblob.CountToEnd (0) for count will download the blob from the offset to the end. +// Note: Snapshot/VersionId are optional parameters which are part of request URL query params. +// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) +// Therefore it not required to pass these here. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b BlobURL) Download(ctx context.Context, offset int64, count int64, ac BlobAccessConditions, rangeGetContentMD5 bool, cpk ClientProvidedKeyOptions) (*DownloadResponse, error) { + var xRangeGetContentMD5 *bool + if rangeGetContentMD5 { + xRangeGetContentMD5 = &rangeGetContentMD5 + } + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + dr, err := b.blobClient.Download(ctx, nil, nil, nil, + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), xRangeGetContentMD5, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) + if err != nil { + return nil, err + } + return &DownloadResponse{ + b: b, + r: dr, + ctx: ctx, + getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: dr.ETag()}, + }, err +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note 1: that deleting a blob also deletes all its snapshots. +// Note 2: Snapshot/VersionId are optional parameters which are part of request URL query params. +// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) +// Therefore it not required to pass these here. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b BlobURL) Delete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.Delete(ctx, nil, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil, BlobDeleteNone) +} + +// PermanentDelete permanently deletes soft-deleted snapshots & soft-deleted version blobs and is a dangerous operation and SHOULD NOT BE USED. +// WARNING: This operation should not be used unless you know exactly the implications. We will not provide support for this API. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b BlobURL) PermanentDelete(ctx context.Context, deleteOptions DeleteSnapshotsOptionType, ac BlobAccessConditions) (*BlobDeleteResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.Delete(ctx, nil, nil, nil, ac.LeaseAccessConditions.pointers(), deleteOptions, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil, BlobDeletePermanent) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (b BlobURL) SetTags(ctx context.Context, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, ifTags *string, blobTagsMap BlobTagsMap) (*BlobSetTagsResponse, error) { + tags := SerializeBlobTags(blobTagsMap) + return b.blobClient.SetTags(ctx, nil, nil, transactionalContentMD5, transactionalContentCrc64, nil, ifTags, nil, &tags) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (b BlobURL) GetTags(ctx context.Context, ifTags *string) (*BlobTags, error) { + return b.blobClient.GetTags(ctx, nil, nil, nil, nil, ifTags, nil) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b BlobURL) Undelete(ctx context.Context) (*BlobUndeleteResponse, error) { + return b.blobClient.Undelete(ctx, nil, nil) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account +// and on a block blob in a blob storage account (locally redundant storage only). +// A premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. +// A block blob's tier determines Hot/Cool/Archive storage type. This operation does not update the blob's ETag. +// Note: VersionId is an optional parameter which is part of request URL query params. +// It can be explicitly set by calling WithVersionID(versionID string) function and hence it not required to pass it here. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b BlobURL) SetTier(ctx context.Context, tier AccessTierType, lac LeaseAccessConditions, rehydratePriority RehydratePriorityType) (*BlobSetTierResponse, error) { + return b.blobClient.SetTier(ctx, tier, nil, + nil, // Blob versioning + nil, rehydratePriority, nil, lac.pointers(), + nil) // Blob ifTags +} + +// GetProperties returns the blob's properties. +// Note: Snapshot/VersionId are optional parameters which are part of request URL query params. +// These parameters can be explicitly set by calling WithSnapshot(snapshot string)/WithVersionID(versionID string) +// Therefore it not required to pass these here. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b BlobURL) GetProperties(ctx context.Context, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobGetPropertiesResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.GetProperties(ctx, nil, + nil, // Blob versioning + nil, ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b BlobURL) SetHTTPHeaders(ctx context.Context, h BlobHTTPHeaders, ac BlobAccessConditions) (*BlobSetHTTPHeadersResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.SetHTTPHeaders(ctx, nil, + &h.CacheControl, &h.ContentType, h.ContentMD5, &h.ContentEncoding, &h.ContentLanguage, + ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + &h.ContentDisposition, nil) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b BlobURL) SetMetadata(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobSetMetadataResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.SetMetadata(ctx, nil, metadata, ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b BlobURL) CreateSnapshot(ctx context.Context, metadata Metadata, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*BlobCreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path and I don't think the common case should suffer this + // performance hit. + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return b.blobClient.CreateSnapshot(ctx, nil, metadata, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + ac.LeaseAccessConditions.pointers(), nil) +} + +// AcquireLease acquires a lease on the blob for write and delete operations. The lease duration must be between +// 15 to 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*BlobAcquireLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.AcquireLease(ctx, nil, &duration, &proposedID, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// RenewLease renews the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobRenewLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.RenewLease(ctx, leaseID, nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// ReleaseLease releases the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*BlobReleaseLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.ReleaseLease(ctx, leaseID, nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) +// constant to break a fixed-duration lease when it expires or an infinite lease immediately. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) BreakLease(ctx context.Context, breakPeriodInSeconds int32, ac ModifiedAccessConditions) (*BlobBreakLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.BreakLease(ctx, nil, leasePeriodPointer(breakPeriodInSeconds), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// ChangeLease changes the blob's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (b BlobURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*BlobChangeLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.pointers() + return b.blobClient.ChangeLease(ctx, leaseID, proposedID, + nil, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// LeaseBreakNaturally tells ContainerURL's or BlobURL's BreakLease method to break the lease using service semantics. +const LeaseBreakNaturally = -1 + +func leasePeriodPointer(period int32) (p *int32) { + if period != LeaseBreakNaturally { + p = &period + } + return nil +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b BlobURL) StartCopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap) (*BlobStartCopyFromURLResponse, error) { + srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() + dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() + dstLeaseID := dstac.LeaseAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + return b.blobClient.StartCopyFromURL(ctx, source.String(), nil, metadata, + tier, RehydratePriorityNone, srcIfModifiedSince, srcIfUnmodifiedSince, + srcIfMatchETag, srcIfNoneMatchETag, + nil, // source ifTags + dstIfModifiedSince, dstIfUnmodifiedSince, + dstIfMatchETag, dstIfNoneMatchETag, + nil, // Blob ifTags + dstLeaseID, + nil, + blobTagsString, // Blob tags + nil, + // immutability policy + nil, BlobImmutabilityPolicyModeNone, nil, + ) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b BlobURL) AbortCopyFromURL(ctx context.Context, copyID string, ac LeaseAccessConditions) (*BlobAbortCopyFromURLResponse, error) { + return b.blobClient.AbortCopyFromURL(ctx, copyID, nil, ac.pointers(), nil) +} + +// SetImmutabilityPolicy sets a temporary immutability policy with an expiration date. The expiration date must be in the future. +// While the immutability policy is active, the blob can be read but not modified or deleted. +// For more information, see https://docs.microsoft.com/en-us/azure/storage/blobs/immutable-time-based-retention-policy-overview (Feature overview) +// and https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-immutability-policy (REST API reference) +// A container with object-level immutability enabled is required. +func (b BlobURL) SetImmutabilityPolicy(ctx context.Context, expiry time.Time, mode BlobImmutabilityPolicyModeType, ifUnmodifiedSince *time.Time) (*BlobSetImmutabilityPolicyResponse, error) { + return b.blobClient.SetImmutabilityPolicy(ctx, nil, nil, ifUnmodifiedSince, &expiry, mode) +} + +// DeleteImmutabilityPolicy deletes a temporary immutability policy with an expiration date. +// While the immutability policy is active, the blob can be read but not modified or deleted. +// For more information, see https://docs.microsoft.com/en-us/azure/storage/blobs/immutable-time-based-retention-policy-overview (Feature overview) +// and https://docs.microsoft.com/en-us/rest/api/storageservices/delete-blob-immutability-policy (REST API reference) +// A container with object-level immutability enabled is required. +func (b BlobURL) DeleteImmutabilityPolicy(ctx context.Context) (*BlobDeleteImmutabilityPolicyResponse, error) { + return b.blobClient.DeleteImmutabilityPolicy(ctx, nil, nil) +} + +// SetLegalHold enables a temporary immutability policy that can be applied for general data protection purposes. +// It stores the current blob version in a WORM (Write-Once Read-Many) state. While in effect, the blob can be read but not modified or deleted. +// For more information, see https://docs.microsoft.com/en-us/azure/storage/blobs/immutable-legal-hold-overview (Feature overview) +// and https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-legal-hold (REST API reference) +// A container with object-level immutability enabled is required. +func (b BlobURL) SetLegalHold(ctx context.Context, legalHold bool) (*BlobSetLegalHoldResponse, error) { + return b.blobClient.SetLegalHold(ctx, legalHold, nil, nil) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go new file mode 100644 index 00000000000..ae0079e8e5d --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_block_blob.go @@ -0,0 +1,182 @@ +package azblob + +import ( + "context" + "io" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4000MiB + + // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. + BlockBlobMaxBlocks = 50000 +) + +// BlockBlobURL defines a set of operations applicable to block blobs. +type BlockBlobURL struct { + BlobURL + bbClient blockBlobClient +} + +// NewBlockBlobURL creates a BlockBlobURL object using the specified URL and request policy pipeline. +func NewBlockBlobURL(url url.URL, p pipeline.Pipeline) BlockBlobURL { + blobClient := newBlobClient(url, p) + bbClient := newBlockBlobClient(url, p) + return BlockBlobURL{BlobURL: BlobURL{blobClient: blobClient}, bbClient: bbClient} +} + +// WithPipeline creates a new BlockBlobURL object identical to the source but with the specific request policy pipeline. +func (bb BlockBlobURL) WithPipeline(p pipeline.Pipeline) BlockBlobURL { + return NewBlockBlobURL(bb.blobClient.URL(), p) +} + +// WithSnapshot creates a new BlockBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb BlockBlobURL) WithSnapshot(snapshot string) BlockBlobURL { + p := NewBlobURLParts(bb.URL()) + p.Snapshot = snapshot + return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) +} + +// WithVersionID creates a new BlockBlobURRL object identical to the source but with the specified version id. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb BlockBlobURL) WithVersionID(versionId string) BlockBlobURL { + p := NewBlobURLParts(bb.URL()) + p.VersionID = versionId + return NewBlockBlobURL(p.URL(), bb.blobClient.Pipeline()) +} + +func (bb BlockBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + return bb.blobClient.GetAccountInfo(ctx) +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb BlockBlobURL) Upload(ctx context.Context, body io.ReadSeeker, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*BlockBlobUploadResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + count, err := validateSeekableStreamAt0AndGetCount(body) + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() + if err != nil { + return nil, err + } + return bb.bbClient.Upload(ctx, body, count, nil, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, + &h.CacheControl, metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + tier, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil, + blobTagsString, // Blob tags + // immutability policy + immutabilityExpiry, immutabilityMode, legalHold, + ) +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb BlockBlobURL) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeeker, ac LeaseAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*BlockBlobStageBlockResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, err + } + return bb.bbClient.StageBlock(ctx, base64BlockID, count, body, transactionalMD5, nil, nil, ac.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + nil) +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb BlockBlobURL) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL url.URL, offset int64, count int64, destinationAccessConditions LeaseAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*BlockBlobStageBlockFromURLResponse, error) { + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() + return bb.bbClient.StageBlockFromURL(ctx, base64BlockID, 0, sourceURL.String(), httpRange{offset: offset, count: count}.pointers(), nil, nil, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + destinationAccessConditions.pointers(), sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil, tokenCredentialPointers(sourceAuthorization)) +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb BlockBlobURL) CommitBlockList(ctx context.Context, base64BlockIDs []string, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*BlockBlobCommitBlockListResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() + return bb.bbClient.CommitBlockList(ctx, BlockLookupList{Latest: base64BlockIDs}, nil, + &h.CacheControl, &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, nil, nil, + metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + tier, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil, + blobTagsString, // Blob tags + // immutability policy + immutabilityExpiry, immutabilityMode, legalHold, + ) +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb BlockBlobURL) GetBlockList(ctx context.Context, listType BlockListType, ac LeaseAccessConditions) (*BlockList, error) { + return bb.bbClient.GetBlockList(ctx, listType, nil, nil, ac.pointers(), + nil, // Blob ifTags + nil) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (bb BlockBlobURL) CopyFromURL(ctx context.Context, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap, immutability ImmutabilityPolicyOptions, sourceAuthorization TokenCredential) (*BlobCopyFromURLResponse, error) { + srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() + dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() + dstLeaseID := dstac.LeaseAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() + return bb.blobClient.CopyFromURL(ctx, source.String(), nil, metadata, tier, + srcIfModifiedSince, srcIfUnmodifiedSince, + srcIfMatchETag, srcIfNoneMatchETag, + dstIfModifiedSince, dstIfUnmodifiedSince, + dstIfMatchETag, dstIfNoneMatchETag, + nil, // Blob ifTags + dstLeaseID, nil, srcContentMD5, + blobTagsString, // Blob tags + // immutability policy + immutabilityExpiry, immutabilityMode, legalHold, tokenCredentialPointers(sourceAuthorization)) +} + +// PutBlobFromURL synchronously creates a new Block Blob with data from the source URL up to a max length of 256MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-blob-from-url. +func (bb BlockBlobURL) PutBlobFromURL(ctx context.Context, h BlobHTTPHeaders, source url.URL, metadata Metadata, srcac ModifiedAccessConditions, dstac BlobAccessConditions, srcContentMD5 []byte, dstContentMD5 []byte, tier AccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*BlockBlobPutBlobFromURLResponse, error) { + + srcIfModifiedSince, srcIfUnmodifiedSince, srcIfMatchETag, srcIfNoneMatchETag := srcac.pointers() + dstIfModifiedSince, dstIfUnmodifiedSince, dstIfMatchETag, dstIfNoneMatchETag := dstac.ModifiedAccessConditions.pointers() + dstLeaseID := dstac.LeaseAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + + return bb.bbClient.PutBlobFromURL(ctx, 0, source.String(), nil, nil, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, dstContentMD5, &h.CacheControl, + metadata, dstLeaseID, &h.ContentDisposition, cpk.EncryptionKey, cpk.EncryptionKeySha256, + cpk.EncryptionAlgorithm, cpk.EncryptionScope, tier, dstIfModifiedSince, dstIfUnmodifiedSince, + dstIfMatchETag, dstIfNoneMatchETag, nil, srcIfModifiedSince, srcIfUnmodifiedSince, + srcIfMatchETag, srcIfNoneMatchETag, nil, nil, srcContentMD5, blobTagsString, nil, tokenCredentialPointers(sourceAuthorization)) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go new file mode 100644 index 00000000000..8fd78619755 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_container.go @@ -0,0 +1,319 @@ +package azblob + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// A ContainerURL represents a URL to the Azure Storage container allowing you to manipulate its blobs. +type ContainerURL struct { + client containerClient +} + +// NewContainerURL creates a ContainerURL object using the specified URL and request policy pipeline. +func NewContainerURL(url url.URL, p pipeline.Pipeline) ContainerURL { + client := newContainerClient(url, p) + return ContainerURL{client: client} +} + +// URL returns the URL endpoint used by the ContainerURL object. +func (c ContainerURL) URL() url.URL { + return c.client.URL() +} + +// String returns the URL as a string. +func (c ContainerURL) String() string { + u := c.URL() + return u.String() +} + +func (c ContainerURL) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) { + return c.client.GetAccountInfo(ctx) +} + +// WithPipeline creates a new ContainerURL object identical to the source but with the specified request policy pipeline. +func (c ContainerURL) WithPipeline(p pipeline.Pipeline) ContainerURL { + return NewContainerURL(c.URL(), p) +} + +// NewBlobURL creates a new BlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new BlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the BlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlobURL instead of calling this object's +// NewBlobURL method. +func (c ContainerURL) NewBlobURL(blobName string) BlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewBlobURL(blobURL, c.client.Pipeline()) +} + +// NewAppendBlobURL creates a new AppendBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewAppendBlobURL instead of calling this object's +// NewAppendBlobURL method. +func (c ContainerURL) NewAppendBlobURL(blobName string) AppendBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewAppendBlobURL(blobURL, c.client.Pipeline()) +} + +// NewBlockBlobURL creates a new BlockBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new BlockBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the BlockBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlockBlobURL instead of calling this object's +// NewBlockBlobURL method. +func (c ContainerURL) NewBlockBlobURL(blobName string) BlockBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewBlockBlobURL(blobURL, c.client.Pipeline()) +} + +// NewPageBlobURL creates a new PageBlobURL object by concatenating blobName to the end of +// ContainerURL's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerURL. +// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewPageBlobURL instead of calling this object's +// NewPageBlobURL method. +func (c ContainerURL) NewPageBlobURL(blobName string) PageBlobURL { + blobURL := appendToURLPath(c.URL(), blobName) + return NewPageBlobURL(blobURL, c.client.Pipeline()) +} + +// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. +func (c ContainerURL) Create(ctx context.Context, metadata Metadata, publicAccessType PublicAccessType) (*ContainerCreateResponse, error) { + return c.client.Create(ctx, nil, metadata, publicAccessType, nil, + nil, nil, // container encryption + ) +} + +// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. +func (c ContainerURL) Delete(ctx context.Context, ac ContainerAccessConditions) (*ContainerDeleteResponse, error) { + if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") + } + + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() + return c.client.Delete(ctx, nil, ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, nil) +} + +// GetProperties returns the container's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. +func (c ContainerURL) GetProperties(ctx context.Context, ac LeaseAccessConditions) (*ContainerGetPropertiesResponse, error) { + // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. + // This allows us to not expose a GetProperties method at all simplifying the API. + return c.client.GetProperties(ctx, nil, ac.pointers(), nil) +} + +// SetMetadata sets the container's metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. +func (c ContainerURL) SetMetadata(ctx context.Context, metadata Metadata, ac ContainerAccessConditions) (*ContainerSetMetadataResponse, error) { + if !ac.IfUnmodifiedSince.IsZero() || ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + return nil, errors.New("the IfUnmodifiedSince, IfMatch, and IfNoneMatch must have their default values because they are ignored by the blob service") + } + ifModifiedSince, _, _, _ := ac.ModifiedAccessConditions.pointers() + return c.client.SetMetadata(ctx, nil, ac.LeaseAccessConditions.pointers(), metadata, ifModifiedSince, nil) +} + +// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. +func (c ContainerURL) GetAccessPolicy(ctx context.Context, ac LeaseAccessConditions) (*SignedIdentifiers, error) { + return c.client.GetAccessPolicy(ctx, nil, ac.pointers(), nil) +} + +// The AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage container. +// Call this method to set AccessPolicy's Permission field. +func (p AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} + +// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. +func (c ContainerURL) SetAccessPolicy(ctx context.Context, accessType PublicAccessType, si []SignedIdentifier, + ac ContainerAccessConditions) (*ContainerSetAccessPolicyResponse, error) { + if ac.IfMatch != ETagNone || ac.IfNoneMatch != ETagNone { + return nil, errors.New("the IfMatch and IfNoneMatch access conditions must have their default values because they are ignored by the service") + } + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.ModifiedAccessConditions.pointers() + return c.client.SetAccessPolicy(ctx, si, nil, ac.LeaseAccessConditions.pointers(), + accessType, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// AcquireLease acquires a lease on the container for delete operations. The lease duration must be between 15 to 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) AcquireLease(ctx context.Context, proposedID string, duration int32, ac ModifiedAccessConditions) (*ContainerAcquireLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.AcquireLease(ctx, nil, &duration, &proposedID, + ifModifiedSince, ifUnmodifiedSince, nil) +} + +// RenewLease renews the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) RenewLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerRenewLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.RenewLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ReleaseLease releases the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) ReleaseLease(ctx context.Context, leaseID string, ac ModifiedAccessConditions) (*ContainerReleaseLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.ReleaseLease(ctx, leaseID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// BreakLease breaks the container's previously-acquired lease (if it exists). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) BreakLease(ctx context.Context, period int32, ac ModifiedAccessConditions) (*ContainerBreakLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.BreakLease(ctx, nil, leasePeriodPointer(period), ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ChangeLease changes the container's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (c ContainerURL) ChangeLease(ctx context.Context, leaseID string, proposedID string, ac ModifiedAccessConditions) (*ContainerChangeLeaseResponse, error) { + ifModifiedSince, ifUnmodifiedSince, _, _ := ac.pointers() + return c.client.ChangeLease(ctx, leaseID, proposedID, nil, ifModifiedSince, ifUnmodifiedSince, nil) +} + +// ListBlobsFlatSegment returns a single segment of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsFlatSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c ContainerURL) ListBlobsFlatSegment(ctx context.Context, marker Marker, o ListBlobsSegmentOptions) (*ListBlobsFlatSegmentResponse, error) { + prefix, include, maxResults := o.pointers() + return c.client.ListBlobFlatSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) +} + +// ListBlobsHierarchySegment returns a single segment of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c ContainerURL) ListBlobsHierarchySegment(ctx context.Context, marker Marker, delimiter string, o ListBlobsSegmentOptions) (*ListBlobsHierarchySegmentResponse, error) { + if o.Details.Snapshots { + return nil, errors.New("snapshots are not supported in this listing operation") + } + prefix, include, maxResults := o.pointers() + return c.client.ListBlobHierarchySegment(ctx, delimiter, prefix, marker.Val, maxResults, include, nil, nil) +} + +// ListBlobsSegmentOptions defines options available when calling ListBlobs. +type ListBlobsSegmentOptions struct { + Details BlobListingDetails // No IncludeType header is produced if "" + Prefix string // No Prefix header is produced if "" + + // SetMaxResults sets the maximum desired results you want the service to return. Note, the + // service may return fewer results than requested. + // MaxResults=0 means no 'MaxResults' header specified. + MaxResults int32 +} + +func (o *ListBlobsSegmentOptions) pointers() (prefix *string, include []ListBlobsIncludeItemType, maxResults *int32) { + if o.Prefix != "" { + prefix = &o.Prefix + } + include = o.Details.slice() + if o.MaxResults != 0 { + maxResults = &o.MaxResults + } + return +} + +// BlobListingDetails indicates what additional information the service should return with each blob. +type BlobListingDetails struct { + Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, Permissions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool +} + +// string produces the Include query parameter's value. +func (d *BlobListingDetails) slice() []ListBlobsIncludeItemType { + items := []ListBlobsIncludeItemType{} + // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! + if d.Copy { + items = append(items, ListBlobsIncludeItemCopy) + } + if d.Deleted { + items = append(items, ListBlobsIncludeItemDeleted) + } + if d.DeletedWithVersions { + items = append(items, ListBlobsIncludeItemDeletedwithversions) + } + if d.ImmutabilityPolicy { + items = append(items, ListBlobsIncludeItemImmutabilitypolicy) + } + if d.LegalHold { + items = append(items, ListBlobsIncludeItemLegalhold) + } + if d.Metadata { + items = append(items, ListBlobsIncludeItemMetadata) + } + if d.Permissions { + items = append(items, ListBlobsIncludeItemPermissions) + } + if d.Snapshots { + items = append(items, ListBlobsIncludeItemSnapshots) + } + if d.UncommittedBlobs { + items = append(items, ListBlobsIncludeItemUncommittedblobs) + } + if d.Tags { + items = append(items, ListBlobsIncludeItemTags) + } + if d.Versions { + items = append(items, ListBlobsIncludeItemVersions) + } + return items +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go new file mode 100644 index 00000000000..dc57765b416 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_page_blob.go @@ -0,0 +1,276 @@ +package azblob + +import ( + "context" + "fmt" + "io" + "net/url" + "strconv" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // PageBlobPageBytes indicates the number of bytes in a page (512). + PageBlobPageBytes = 512 + + // PageBlobMaxUploadPagesBytes indicates the maximum number of bytes that can be sent in a call to PutPage. + PageBlobMaxUploadPagesBytes = 4 * 1024 * 1024 // 4MB +) + +// PageBlobURL defines a set of operations applicable to page blobs. +type PageBlobURL struct { + BlobURL + pbClient pageBlobClient +} + +// NewPageBlobURL creates a PageBlobURL object using the specified URL and request policy pipeline. +func NewPageBlobURL(url url.URL, p pipeline.Pipeline) PageBlobURL { + blobClient := newBlobClient(url, p) + pbClient := newPageBlobClient(url, p) + return PageBlobURL{BlobURL: BlobURL{blobClient: blobClient}, pbClient: pbClient} +} + +// WithPipeline creates a new PageBlobURL object identical to the source but with the specific request policy pipeline. +func (pb PageBlobURL) WithPipeline(p pipeline.Pipeline) PageBlobURL { + return NewPageBlobURL(pb.blobClient.URL(), p) +} + +// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb PageBlobURL) WithSnapshot(snapshot string) PageBlobURL { + p := NewBlobURLParts(pb.URL()) + p.Snapshot = snapshot + return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) +} + +// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb PageBlobURL) WithVersionID(versionId string) PageBlobURL { + p := NewBlobURLParts(pb.URL()) + p.VersionID = versionId + return NewPageBlobURL(p.URL(), pb.blobClient.Pipeline()) +} + +func (pb PageBlobURL) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + return pb.blobClient.GetAccountInfo(ctx) +} + +// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (pb PageBlobURL) Create(ctx context.Context, size int64, sequenceNumber int64, h BlobHTTPHeaders, metadata Metadata, ac BlobAccessConditions, tier PremiumPageBlobAccessTierType, blobTagsMap BlobTagsMap, cpk ClientProvidedKeyOptions, immutability ImmutabilityPolicyOptions) (*PageBlobCreateResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + blobTagsString := SerializeBlobTagsHeader(blobTagsMap) + immutabilityExpiry, immutabilityMode, legalHold := immutability.pointers() + return pb.pbClient.Create(ctx, 0, size, nil, tier, + &h.ContentType, &h.ContentEncoding, &h.ContentLanguage, h.ContentMD5, &h.CacheControl, + metadata, ac.LeaseAccessConditions.pointers(), &h.ContentDisposition, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob tags + &sequenceNumber, nil, + blobTagsString, // Blob tags + // immutability policy + immutabilityExpiry, immutabilityMode, legalHold, + ) +} + +// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb PageBlobURL) UploadPages(ctx context.Context, offset int64, body io.ReadSeeker, ac PageBlobAccessConditions, transactionalMD5 []byte, cpk ClientProvidedKeyOptions) (*PageBlobUploadPagesResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return nil, err + } + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() + return pb.pbClient.UploadPages(ctx, body, count, transactionalMD5, nil, nil, + PageRange{Start: offset, End: offset + count - 1}.pointers(), + ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. +// The sourceOffset specifies the start offset of source data to copy from. +// The destOffset specifies the start offset of data in page blob will be written to. +// The count must be a multiple of 512 bytes. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. +func (pb PageBlobURL) UploadPagesFromURL(ctx context.Context, sourceURL url.URL, sourceOffset int64, destOffset int64, count int64, transactionalMD5 []byte, destinationAccessConditions PageBlobAccessConditions, sourceAccessConditions ModifiedAccessConditions, cpk ClientProvidedKeyOptions, sourceAuthorization TokenCredential) (*PageBlobUploadPagesFromURLResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := destinationAccessConditions.ModifiedAccessConditions.pointers() + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag := sourceAccessConditions.pointers() + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := destinationAccessConditions.SequenceNumberAccessConditions.pointers() + return pb.pbClient.UploadPagesFromURL(ctx, sourceURL.String(), *PageRange{Start: sourceOffset, End: sourceOffset + count - 1}.pointers(), 0, + *PageRange{Start: destOffset, End: destOffset + count - 1}.pointers(), transactionalMD5, nil, nil, + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK-V + cpk.EncryptionScope, // CPK-N + destinationAccessConditions.LeaseAccessConditions.pointers(), + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatchETag, sourceIfNoneMatchETag, nil, tokenCredentialPointers(sourceAuthorization)) +} + +// ClearPages frees the specified pages from the page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb PageBlobURL) ClearPages(ctx context.Context, offset int64, count int64, ac PageBlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobClearPagesResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, ifSequenceNumberEqual := ac.SequenceNumberAccessConditions.pointers() + return pb.pbClient.ClearPages(ctx, 0, nil, + PageRange{Start: offset, End: offset + count - 1}.pointers(), + ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + ifSequenceNumberLessThanOrEqual, ifSequenceNumberLessThan, + ifSequenceNumberEqual, ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil) +} + +// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetPageRanges(ctx context.Context, offset int64, count int64, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.GetPageRanges(ctx, nil, nil, + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// GetManagedDiskPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob representing managed disk. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetManagedDiskPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot *string, prevSnapshotURL *string, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + + return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, prevSnapshot, + prevSnapshotURL, // Get managed disk diff + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb PageBlobURL) GetPageRangesDiff(ctx context.Context, offset int64, count int64, prevSnapshot string, ac BlobAccessConditions) (*PageList, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.GetPageRangesDiff(ctx, nil, nil, &prevSnapshot, + nil, // Get managed disk diff + httpRange{offset: offset, count: count}.pointers(), + ac.LeaseAccessConditions.pointers(), + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, + nil, // Blob ifTags + nil) +} + +// Resize resizes the page blob to the specified size (which must be a multiple of 512). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb PageBlobURL) Resize(ctx context.Context, size int64, ac BlobAccessConditions, cpk ClientProvidedKeyOptions) (*PageBlobResizeResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.Resize(ctx, size, nil, ac.LeaseAccessConditions.pointers(), + cpk.EncryptionKey, cpk.EncryptionKeySha256, cpk.EncryptionAlgorithm, // CPK + cpk.EncryptionScope, // CPK-N + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil) +} + +// UpdateSequenceNumber sets the page blob's sequence number. +func (pb PageBlobURL) UpdateSequenceNumber(ctx context.Context, action SequenceNumberActionType, sequenceNumber int64, + ac BlobAccessConditions) (*PageBlobUpdateSequenceNumberResponse, error) { + sn := &sequenceNumber + if action == SequenceNumberActionIncrement { + sn = nil + } + ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch := ac.ModifiedAccessConditions.pointers() + return pb.pbClient.UpdateSequenceNumber(ctx, action, nil, + ac.LeaseAccessConditions.pointers(), ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, + nil, sn, nil) +} + +// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and +// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. +func (pb PageBlobURL) StartCopyIncremental(ctx context.Context, source url.URL, snapshot string, ac BlobAccessConditions) (*PageBlobCopyIncrementalResponse, error) { + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag := ac.ModifiedAccessConditions.pointers() + qp := source.Query() + qp.Set("snapshot", snapshot) + source.RawQuery = qp.Encode() + return pb.pbClient.CopyIncremental(ctx, source.String(), nil, + ifModifiedSince, ifUnmodifiedSince, ifMatchETag, ifNoneMatchETag, nil, nil) +} + +func (pr PageRange) pointers() *string { + endOffset := strconv.FormatInt(int64(pr.End), 10) + asString := fmt.Sprintf("bytes=%v-%s", pr.Start, endOffset) + return &asString +} + +type PageBlobAccessConditions struct { + ModifiedAccessConditions + LeaseAccessConditions + SequenceNumberAccessConditions +} + +// SequenceNumberAccessConditions identifies page blob-specific access conditions which you optionally set. +type SequenceNumberAccessConditions struct { + // IfSequenceNumberLessThan ensures that the page blob operation succeeds + // only if the blob's sequence number is less than a value. + // IfSequenceNumberLessThan=0 means no 'IfSequenceNumberLessThan' header specified. + // IfSequenceNumberLessThan>0 means 'IfSequenceNumberLessThan' header specified with its value + // IfSequenceNumberLessThan==-1 means 'IfSequenceNumberLessThan' header specified with a value of 0 + IfSequenceNumberLessThan int64 + + // IfSequenceNumberLessThanOrEqual ensures that the page blob operation succeeds + // only if the blob's sequence number is less than or equal to a value. + // IfSequenceNumberLessThanOrEqual=0 means no 'IfSequenceNumberLessThanOrEqual' header specified. + // IfSequenceNumberLessThanOrEqual>0 means 'IfSequenceNumberLessThanOrEqual' header specified with its value + // IfSequenceNumberLessThanOrEqual=-1 means 'IfSequenceNumberLessThanOrEqual' header specified with a value of 0 + IfSequenceNumberLessThanOrEqual int64 + + // IfSequenceNumberEqual ensures that the page blob operation succeeds + // only if the blob's sequence number is equal to a value. + // IfSequenceNumberEqual=0 means no 'IfSequenceNumberEqual' header specified. + // IfSequenceNumberEqual>0 means 'IfSequenceNumberEqual' header specified with its value + // IfSequenceNumberEqual=-1 means 'IfSequenceNumberEqual' header specified with a value of 0 + IfSequenceNumberEqual int64 +} + +// pointers is for internal infrastructure. It returns the fields as pointers. +func (ac SequenceNumberAccessConditions) pointers() (snltoe *int64, snlt *int64, sne *int64) { + var zero int64 // Defaults to 0 + switch ac.IfSequenceNumberLessThan { + case -1: + snlt = &zero + case 0: + snlt = nil + default: + snlt = &ac.IfSequenceNumberLessThan + } + + switch ac.IfSequenceNumberLessThanOrEqual { + case -1: + snltoe = &zero + case 0: + snltoe = nil + default: + snltoe = &ac.IfSequenceNumberLessThanOrEqual + } + switch ac.IfSequenceNumberEqual { + case -1: + sne = &zero + case 0: + sne = nil + default: + sne = &ac.IfSequenceNumberEqual + } + return +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go new file mode 100644 index 00000000000..ce3ac97dc70 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/url_service.go @@ -0,0 +1,177 @@ +package azblob + +import ( + "context" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +const ( + // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. + ContainerNameRoot = "$root" + + // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. + ContainerNameLogs = "$logs" +) + +// A ServiceURL represents a URL to the Azure Storage Blob service allowing you to manipulate blob containers. +type ServiceURL struct { + client serviceClient +} + +// NewServiceURL creates a ServiceURL object using the specified URL and request policy pipeline. +func NewServiceURL(primaryURL url.URL, p pipeline.Pipeline) ServiceURL { + client := newServiceClient(primaryURL, p) + return ServiceURL{client: client} +} + +//GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object. +//OAuth is required for this call, as well as any role that can delegate access to the storage account. +func (s ServiceURL) GetUserDelegationCredential(ctx context.Context, info KeyInfo, timeout *int32, requestID *string) (UserDelegationCredential, error) { + sc := newServiceClient(s.client.url, s.client.p) + udk, err := sc.GetUserDelegationKey(ctx, info, timeout, requestID) + if err != nil { + return UserDelegationCredential{}, err + } + return NewUserDelegationCredential(strings.Split(s.client.url.Host, ".")[0], *udk), nil +} + +//TODO this was supposed to be generated +//NewKeyInfo creates a new KeyInfo struct with the correct time formatting & conversion +func NewKeyInfo(Start, Expiry time.Time) KeyInfo { + return KeyInfo{ + Start: Start.UTC().Format(SASTimeFormat), + Expiry: Expiry.UTC().Format(SASTimeFormat), + } +} + +func (s ServiceURL) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { + return s.client.GetAccountInfo(ctx) +} + +// URL returns the URL endpoint used by the ServiceURL object. +func (s ServiceURL) URL() url.URL { + return s.client.URL() +} + +// String returns the URL as a string. +func (s ServiceURL) String() string { + u := s.URL() + return u.String() +} + +// WithPipeline creates a new ServiceURL object identical to the source but with the specified request policy pipeline. +func (s ServiceURL) WithPipeline(p pipeline.Pipeline) ServiceURL { + return NewServiceURL(s.URL(), p) +} + +// NewContainerURL creates a new ContainerURL object by concatenating containerName to the end of +// ServiceURL's URL. The new ContainerURL uses the same request policy pipeline as the ServiceURL. +// To change the pipeline, create the ContainerURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewContainerURL instead of calling this object's +// NewContainerURL method. +func (s ServiceURL) NewContainerURL(containerName string) ContainerURL { + containerURL := appendToURLPath(s.URL(), containerName) + return NewContainerURL(containerURL, s.client.Pipeline()) +} + +// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) +func appendToURLPath(u url.URL, name string) url.URL { + // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" + // When you call url.Parse() this is what you'll get: + // Scheme: "https" + // Opaque: "" + // User: nil + // Host: "ms.com" + // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash + // RawPath: "" + // ForceQuery: false + // RawQuery: "k1=v1&k2=v2" + // Fragment: "f" + if len(u.Path) == 0 || u.Path[len(u.Path)-1] != '/' { + u.Path += "/" // Append "/" to end before appending name + } + u.Path += name + return u +} + +// ListContainersFlatSegment returns a single segment of containers starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// After getting a segment, process it, and then call ListContainersFlatSegment again (passing the the +// previously-returned Marker) to get the next segment. For more information, see +// https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (s ServiceURL) ListContainersSegment(ctx context.Context, marker Marker, o ListContainersSegmentOptions) (*ListContainersSegmentResponse, error) { + prefix, include, maxResults := o.pointers() + return s.client.ListContainersSegment(ctx, prefix, marker.Val, maxResults, include, nil, nil) +} + +// ListContainersOptions defines options available when calling ListContainers. +type ListContainersSegmentOptions struct { + Detail ListContainersDetail // No IncludeType header is produced if "" + Prefix string // No Prefix header is produced if "" + MaxResults int32 // 0 means unspecified + // TODO: update swagger to generate this type? +} + +func (o *ListContainersSegmentOptions) pointers() (prefix *string, include []ListContainersIncludeType, maxResults *int32) { + if o.Prefix != "" { + prefix = &o.Prefix + } + if o.MaxResults != 0 { + maxResults = &o.MaxResults + } + details := o.Detail.string() + if len(details) > 0 { + include = []ListContainersIncludeType{ListContainersIncludeType(details)} + } + return +} + +// ListContainersFlatDetail indicates what additional information the service should return with each container. +type ListContainersDetail struct { + // Tells the service whether to return metadata for each container. + Metadata bool + + // Show containers that have been deleted when the soft-delete feature is enabled. + // Deleted bool +} + +// string produces the Include query parameter's value. +func (d *ListContainersDetail) string() string { + items := make([]string, 0, 2) + // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! + if d.Metadata { + items = append(items, string(ListContainersIncludeMetadata)) + } + // if d.Deleted { + // items = append(items, string(ListContainersIncludeDeleted)) + // } + if len(items) > 0 { + return strings.Join(items, ",") + } + return string(ListContainersIncludeNone) +} + +func (bsu ServiceURL) GetProperties(ctx context.Context) (*StorageServiceProperties, error) { + return bsu.client.GetProperties(ctx, nil, nil) +} + +func (bsu ServiceURL) SetProperties(ctx context.Context, properties StorageServiceProperties) (*ServiceSetPropertiesResponse, error) { + return bsu.client.SetProperties(ctx, properties, nil, nil) +} + +func (bsu ServiceURL) GetStatistics(ctx context.Context) (*StorageServiceStats, error) { + return bsu.client.GetStatistics(ctx, nil, nil) +} + +// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression. +// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" +func (bsu ServiceURL) FindBlobsByTags(ctx context.Context, timeout *int32, requestID *string, where *string, marker Marker, maxResults *int32) (*FilterBlobSegment, error) { + return bsu.client.FilterBlobs(ctx, timeout, requestID, where, marker.Val, maxResults) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go new file mode 100644 index 00000000000..9fcbbc4092c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/user_delegation_credential.go @@ -0,0 +1,38 @@ +package azblob + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" +) + +// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's name and a user delegation key from it +func NewUserDelegationCredential(accountName string, key UserDelegationKey) UserDelegationCredential { + return UserDelegationCredential{ + accountName: accountName, + accountKey: key, + } +} + +type UserDelegationCredential struct { + accountName string + accountKey UserDelegationKey +} + +// AccountName returns the Storage account's name +func (f UserDelegationCredential) AccountName() string { + return f.accountName +} + +// ComputeHMAC +func (f UserDelegationCredential) ComputeHMACSHA256(message string) (base64String string) { + bytes, _ := base64.StdEncoding.DecodeString(f.accountKey.Value) + h := hmac.New(sha256.New, bytes) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +// Private method to return important parameters for NewSASQueryParameters +func (f UserDelegationCredential) getUDKParams() *UserDelegationKey { + return &f.accountKey +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go new file mode 100644 index 00000000000..d89ccb09c14 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/version.go @@ -0,0 +1,3 @@ +package azblob + +const serviceLibVersion = "0.15" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go new file mode 100644 index 00000000000..a81987d54a3 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_anonymous.go @@ -0,0 +1,55 @@ +package azblob + +import ( + "context" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// Credential represent any credential type; it is used to create a credential policy Factory. +type Credential interface { + pipeline.Factory + credentialMarker() +} + +type credentialFunc pipeline.FactoryFunc + +func (f credentialFunc) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return f(next, po) +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (credentialFunc) credentialMarker() {} + +////////////////////////////// + +// NewAnonymousCredential creates an anonymous credential for use with HTTP(S) requests that read public resource +// or for use with Shared Access Signatures (SAS). +func NewAnonymousCredential() Credential { + return anonymousCredentialFactory +} + +var anonymousCredentialFactory Credential = &anonymousCredentialPolicyFactory{} // Singleton + +// anonymousCredentialPolicyFactory is the credential's policy factory. +type anonymousCredentialPolicyFactory struct { +} + +// New creates a credential policy object. +func (f *anonymousCredentialPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return &anonymousCredentialPolicy{next: next} +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*anonymousCredentialPolicyFactory) credentialMarker() {} + +// anonymousCredentialPolicy is the credential's policy object. +type anonymousCredentialPolicy struct { + next pipeline.Policy +} + +// Do implements the credential's policy interface. +func (p anonymousCredentialPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // For anonymous credentials, this is effectively a no-op + return p.next.Do(ctx, request) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go new file mode 100644 index 00000000000..cc59cbbed5d --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_shared_key.go @@ -0,0 +1,205 @@ +package azblob + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "errors" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) { + bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return &SharedKeyCredential{}, err + } + return &SharedKeyCredential{accountName: accountName, accountKey: bytes}, nil +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +// It is immutable making it shareable and goroutine-safe. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey []byte +} + +// AccountName returns the Storage account's name. +func (f SharedKeyCredential) AccountName() string { + return f.accountName +} + +func (f SharedKeyCredential) getAccountKey() []byte { + return f.accountKey +} + +// noop function to satisfy StorageAccountCredential interface +func (f SharedKeyCredential) getUDKParams() *UserDelegationKey { + return nil +} + +// New creates a credential policy object. +func (f *SharedKeyCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + // Add a x-ms-date header if it doesn't already exist + if d := request.Header.Get(headerXmsDate); d == "" { + request.Header[headerXmsDate] = []string{time.Now().UTC().Format(http.TimeFormat)} + } + stringToSign, err := f.buildStringToSign(request) + if err != nil { + return nil, err + } + signature := f.ComputeHMACSHA256(stringToSign) + authHeader := strings.Join([]string{"SharedKey ", f.accountName, ":", signature}, "") + request.Header[headerAuthorization] = []string{authHeader} + + response, err := next.Do(ctx, request) + if err != nil && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + po.Log(pipeline.LogError, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n") + } + return response, err + }) +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*SharedKeyCredential) credentialMarker() {} + +// Constants ensuring that header names are correctly spelled and consistently cased. +const ( + headerAuthorization = "Authorization" + headerCacheControl = "Cache-Control" + headerContentEncoding = "Content-Encoding" + headerContentDisposition = "Content-Disposition" + headerContentLanguage = "Content-Language" + headerContentLength = "Content-Length" + headerContentMD5 = "Content-MD5" + headerContentType = "Content-Type" + headerDate = "Date" + headerIfMatch = "If-Match" + headerIfModifiedSince = "If-Modified-Since" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerUserAgent = "User-Agent" + headerXmsDate = "x-ms-date" + headerXmsVersion = "x-ms-version" +) + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (f SharedKeyCredential) ComputeHMACSHA256(message string) (base64String string) { + h := hmac.New(sha256.New, f.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func (f *SharedKeyCredential) buildStringToSign(request pipeline.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := request.Header + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := f.buildCanonicalizedResource(request.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + request.Method, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + "", // Empty date because x-ms-date is expected (as per web page above) + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (f *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(f.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", errors.New("parsing query parameters must succeed, otherwise there might be serious problems in the SDK/generated code") + } + + if len(params) > 0 { // There is at least 1 query parameter + paramNames := []string{} // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go new file mode 100644 index 00000000000..19d8ea41881 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_credential_token.go @@ -0,0 +1,146 @@ +package azblob + +import ( + "context" + "errors" + "sync/atomic" + + "runtime" + "sync" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// TokenRefresher represents a callback method that you write; this method is called periodically +// so you can refresh the token credential's value. +type TokenRefresher func(credential TokenCredential) time.Duration + +// TokenCredential represents a token credential (which is also a pipeline.Factory). +type TokenCredential interface { + Credential + Token() string + SetToken(newToken string) +} + +func tokenCredentialPointers(credential TokenCredential) *string { + if credential == nil { + return nil + } + + out := "Bearer " + credential.Token() + return &out +} + +// NewTokenCredential creates a token credential for use with role-based access control (RBAC) access to Azure Storage +// resources. You initialize the TokenCredential with an initial token value. If you pass a non-nil value for +// tokenRefresher, then the function you pass will be called immediately so it can refresh and change the +// TokenCredential's token value by calling SetToken. Your tokenRefresher function must return a time.Duration +// indicating how long the TokenCredential object should wait before calling your tokenRefresher function again. +// If your tokenRefresher callback fails to refresh the token, you can return a duration of 0 to stop your +// TokenCredential object from ever invoking tokenRefresher again. Also, one way to deal with failing to refresh a +// token is to cancel a context.Context object used by requests that have the TokenCredential object in their pipeline. +func NewTokenCredential(initialToken string, tokenRefresher TokenRefresher) TokenCredential { + tc := &tokenCredential{} + tc.SetToken(initialToken) // We don't set it above to guarantee atomicity + if tokenRefresher == nil { + return tc // If no callback specified, return the simple tokenCredential + } + + tcwr := &tokenCredentialWithRefresh{token: tc} + tcwr.token.startRefresh(tokenRefresher) + runtime.SetFinalizer(tcwr, func(deadTC *tokenCredentialWithRefresh) { + deadTC.token.stopRefresh() + deadTC.token = nil // Sanity (not really required) + }) + return tcwr +} + +// tokenCredentialWithRefresh is a wrapper over a token credential. +// When this wrapper object gets GC'd, it stops the tokenCredential's timer +// which allows the tokenCredential object to also be GC'd. +type tokenCredentialWithRefresh struct { + token *tokenCredential +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*tokenCredentialWithRefresh) credentialMarker() {} + +// Token returns the current token value +func (f *tokenCredentialWithRefresh) Token() string { return f.token.Token() } + +// SetToken changes the current token value +func (f *tokenCredentialWithRefresh) SetToken(token string) { f.token.SetToken(token) } + +// New satisfies pipeline.Factory's New method creating a pipeline policy object. +func (f *tokenCredentialWithRefresh) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return f.token.New(next, po) +} + +// ///////////////////////////////////////////////////////////////////////////// + +// tokenCredential is a pipeline.Factory is the credential's policy factory. +type tokenCredential struct { + token atomic.Value + + // The members below are only used if the user specified a tokenRefresher callback function. + timer *time.Timer + tokenRefresher TokenRefresher + lock sync.Mutex + stopped bool +} + +// credentialMarker is a package-internal method that exists just to satisfy the Credential interface. +func (*tokenCredential) credentialMarker() {} + +// Token returns the current token value +func (f *tokenCredential) Token() string { return f.token.Load().(string) } + +// SetToken changes the current token value +func (f *tokenCredential) SetToken(token string) { f.token.Store(token) } + +// startRefresh calls refresh which immediately calls tokenRefresher +// and then starts a timer to call tokenRefresher in the future. +func (f *tokenCredential) startRefresh(tokenRefresher TokenRefresher) { + f.tokenRefresher = tokenRefresher + f.stopped = false // In case user calls StartRefresh, StopRefresh, & then StartRefresh again + f.refresh() +} + +// refresh calls the user's tokenRefresher so they can refresh the token (by +// calling SetToken) and then starts another time (based on the returned duration) +// in order to refresh the token again in the future. +func (f *tokenCredential) refresh() { + d := f.tokenRefresher(f) // Invoke the user's refresh callback outside of the lock + if d > 0 { // If duration is 0 or negative, refresher wants to not be called again + f.lock.Lock() + if !f.stopped { + f.timer = time.AfterFunc(d, f.refresh) + } + f.lock.Unlock() + } +} + +// stopRefresh stops any pending timer and sets stopped field to true to prevent +// any new timer from starting. +// NOTE: Stopping the timer allows the GC to destroy the tokenCredential object. +func (f *tokenCredential) stopRefresh() { + f.lock.Lock() + f.stopped = true + if f.timer != nil { + f.timer.Stop() + } + f.lock.Unlock() +} + +// New satisfies pipeline.Factory's New method creating a pipeline policy object. +func (f *tokenCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + if request.URL.Scheme != "https" { + // HTTPS must be used, otherwise the tokens are at the risk of being exposed + return nil, errors.New("token credentials require a URL using the https protocol scheme") + } + request.Header[headerAuthorization] = []string{"Bearer " + f.Token()} + return next.Do(ctx, request) + }) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go new file mode 100644 index 00000000000..ba99255c140 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_pipeline.go @@ -0,0 +1,45 @@ +package azblob + +import ( + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// PipelineOptions is used to configure a request policy pipeline's retry policy and logging. +type PipelineOptions struct { + // Log configures the pipeline's logging infrastructure indicating what information is logged and where. + Log pipeline.LogOptions + + // Retry configures the built-in retry policy behavior. + Retry RetryOptions + + // RequestLog configures the built-in request logging policy. + RequestLog RequestLogOptions + + // Telemetry configures the built-in telemetry policy behavior. + Telemetry TelemetryOptions + + // HTTPSender configures the sender of HTTP requests + HTTPSender pipeline.Factory +} + +// NewPipeline creates a Pipeline using the specified credentials and options. +func NewPipeline(c Credential, o PipelineOptions) pipeline.Pipeline { + // Closest to API goes first; closest to the wire goes last + f := []pipeline.Factory{ + NewTelemetryPolicyFactory(o.Telemetry), + NewUniqueRequestIDPolicyFactory(), + NewRetryPolicyFactory(o.Retry), + } + + if _, ok := c.(*anonymousCredentialPolicyFactory); !ok { + // For AnonymousCredential, we optimize out the policy factory since it doesn't do anything + // NOTE: The credential's policy factory must appear close to the wire so it can sign any + // changes made by other factories (like UniqueRequestIDPolicyFactory) + f = append(f, c) + } + f = append(f, + NewRequestLogPolicyFactory(o.RequestLog), + pipeline.MethodFactoryMarker()) // indicates at what stage in the pipeline the method factory is invoked + + return pipeline.NewPipeline(f, pipeline.Options{HTTPSender: o.HTTPSender, Log: o.Log}) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go new file mode 100644 index 00000000000..ddc83cc787e --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_request_log.go @@ -0,0 +1,194 @@ +package azblob + +import ( + "bytes" + "context" + "fmt" + "net/http" + "net/url" + "runtime" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// RequestLogOptions configures the retry policy's behavior. +type RequestLogOptions struct { + // LogWarningIfTryOverThreshold logs a warning if a tried operation takes longer than the specified + // duration (-1=no logging; 0=default threshold). + LogWarningIfTryOverThreshold time.Duration + + // SyslogDisabled is a flag to check if logging to Syslog/Windows-Event-Logger is enabled or not + // We by default print to Syslog/Windows-Event-Logger. + // If SyslogDisabled is not provided explicitly, the default value will be false. + SyslogDisabled bool +} + +func (o RequestLogOptions) defaults() RequestLogOptions { + if o.LogWarningIfTryOverThreshold == 0 { + // It would be good to relate this to https://azure.microsoft.com/en-us/support/legal/sla/storage/v1_2/ + // But this monitors the time to get the HTTP response; NOT the time to download the response body. + o.LogWarningIfTryOverThreshold = 3 * time.Second // Default to 3 seconds + } + return o +} + +// NewRequestLogPolicyFactory creates a RequestLogPolicyFactory object configured using the specified options. +func NewRequestLogPolicyFactory(o RequestLogOptions) pipeline.Factory { + o = o.defaults() // Force defaults to be calculated + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + // These variables are per-policy; shared by multiple calls to Do + var try int32 + operationStart := time.Now() // If this is the 1st try, record the operation state time + return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { + try++ // The first try is #1 (not #0) + + // Log the outgoing request as informational + if po.ShouldLog(pipeline.LogInfo) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", try) + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), nil, nil) + po.Log(pipeline.LogInfo, b.String()) + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err = next.Do(ctx, request) // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(operationStart) + + logLevel, forceLog := pipeline.LogInfo, false // Default logging information + + // If the response took too long, we'll upgrade to warning. + if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { + // Log a warning if the try duration exceeded the specified threshold + logLevel, forceLog = pipeline.LogWarning, !o.SyslogDisabled + } + + var sc int + if err == nil { // We got a valid response from the service + sc = response.Response().StatusCode + } else { // We got an error, so we should inspect if we got a response + if se, ok := err.(StorageError); ok { + if r := se.Response(); r != nil { + sc = r.StatusCode + } + } + } + + if sc == 0 || ((sc >= 400 && sc <= 499) && sc != http.StatusNotFound && sc != http.StatusConflict && + sc != http.StatusPreconditionFailed && sc != http.StatusRequestedRangeNotSatisfiable) || (sc >= 500 && sc <= 599) { + logLevel, forceLog = pipeline.LogError, !o.SyslogDisabled // Promote to Error any 4xx (except those listed is an error) or any 5xx + } else { + // For other status codes, we leave the level as is. + } + + if shouldLog := po.ShouldLog(logLevel); forceLog || shouldLog { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + slow := "" + if o.LogWarningIfTryOverThreshold > 0 && tryDuration > o.LogWarningIfTryOverThreshold { + slow = fmt.Sprintf("[SLOW >%v]", o.LogWarningIfTryOverThreshold) + } + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v%s, OpTime=%v) -- ", try, tryDuration, slow, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + if logLevel == pipeline.LogError { + fmt.Fprint(b, "RESPONSE STATUS CODE ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE SUCCESSFULLY RECEIVED\n") + } + } + + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(request), response.Response(), err) + if logLevel <= pipeline.LogError { + b.Write(stack()) // For errors (or lower levels), we append the stack trace (an expensive operation) + } + msg := b.String() + + if forceLog { + pipeline.ForceLog(logLevel, msg) + } + if shouldLog { + po.Log(logLevel, msg) + } + } + return response, err + } + }) +} + +// RedactSigQueryParam redacts the 'sig' query parameter in URL's raw query to protect secret. +func RedactSigQueryParam(rawQuery string) (bool, string) { + rawQuery = strings.ToLower(rawQuery) // lowercase the string so we can look for ?sig= and &sig= + sigFound := strings.Contains(rawQuery, "?sig=") + if !sigFound { + sigFound = strings.Contains(rawQuery, "&sig=") + if !sigFound { + return sigFound, rawQuery // [?|&]sig= not found; return same rawQuery passed in (no memory allocation) + } + } + // [?|&]sig= found, redact its value + values, _ := url.ParseQuery(rawQuery) + for name := range values { + if strings.EqualFold(name, "sig") { + values[name] = []string{"REDACTED"} + } + } + return sigFound, values.Encode() +} + +func prepareRequestForLogging(request pipeline.Request) *http.Request { + req := request + if sigFound, rawQuery := RedactSigQueryParam(req.URL.RawQuery); sigFound { + // Make copy so we don't destroy the query parameters we actually need to send in the request + req = request.Copy() + req.Request.URL.RawQuery = rawQuery + } + + return prepareRequestForServiceLogging(req) +} + +func stack() []byte { + buf := make([]byte, 1024) + for { + n := runtime.Stack(buf, false) + if n < len(buf) { + return buf[:n] + } + buf = make([]byte, 2*len(buf)) + } +} + +/////////////////////////////////////////////////////////////////////////////////////// +// Redact phase useful for blob and file service only. For other services, +// this method can directly return request.Request. +/////////////////////////////////////////////////////////////////////////////////////// +func prepareRequestForServiceLogging(request pipeline.Request) *http.Request { + req := request + if exist, key := doesHeaderExistCaseInsensitive(req.Header, xMsCopySourceHeader); exist { + req = request.Copy() + url, err := url.Parse(req.Header.Get(key)) + if err == nil { + if sigFound, rawQuery := RedactSigQueryParam(url.RawQuery); sigFound { + url.RawQuery = rawQuery + req.Header.Set(xMsCopySourceHeader, url.String()) + } + } + } + return req.Request +} + +const xMsCopySourceHeader = "x-ms-copy-source" + +func doesHeaderExistCaseInsensitive(header http.Header, key string) (bool, string) { + for keyInHeader := range header { + if strings.EqualFold(keyInHeader, key) { + return true, keyInHeader + } + } + return false, "" +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go new file mode 100644 index 00000000000..6286431a836 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_retry.go @@ -0,0 +1,419 @@ +package azblob + +import ( + "context" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// RetryPolicy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants. +type RetryPolicy int32 + +const ( + // RetryPolicyExponential tells the pipeline to use an exponential back-off retry policy + RetryPolicyExponential RetryPolicy = 0 + + // RetryPolicyFixed tells the pipeline to use a fixed back-off retry policy + RetryPolicyFixed RetryPolicy = 1 +) + +// RetryOptions configures the retry policy's behavior. +type RetryOptions struct { + // Policy tells the pipeline what kind of retry policy to use. See the RetryPolicy* constants.\ + // A value of zero means that you accept our default policy. + Policy RetryPolicy + + // MaxTries specifies the maximum number of attempts an operation will be tried before producing an error (0=default). + // A value of zero means that you accept our default policy. A value of 1 means 1 try and no retries. + MaxTries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // A value of zero means that you accept our default timeout. NOTE: When transferring large amounts + // of data, the default TryTimeout will probably not be sufficient. You should override this value + // based on the bandwidth available to the host machine and proximity to the Storage service. A good + // starting point may be something like (60 seconds per MB of anticipated-payload-size). + TryTimeout time.Duration + + // RetryDelay specifies the amount of delay to use before retrying an operation (0=default). + // When RetryPolicy is specified as RetryPolicyExponential, the delay increases exponentially + // with each retry up to a maximum specified by MaxRetryDelay. + // If you specify 0, then you must also specify 0 for MaxRetryDelay. + // If you specify RetryDelay, then you must also specify MaxRetryDelay, and MaxRetryDelay should be + // equal to or greater than RetryDelay. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation (0=default). + // If you specify 0, then you must also specify 0 for RetryDelay. + MaxRetryDelay time.Duration + + // RetryReadsFromSecondaryHost specifies whether the retry policy should retry a read operation against another host. + // If RetryReadsFromSecondaryHost is "" (the default) then operations are not retried against another host. + // NOTE: Before setting this field, make sure you understand the issues around reading stale & potentially-inconsistent + // data at this webpage: https://docs.microsoft.com/en-us/azure/storage/common/storage-designing-ha-apps-with-ragrs + RetryReadsFromSecondaryHost string // Comment this our for non-Blob SDKs +} + +func (o RetryOptions) retryReadsFromSecondaryHost() string { + return o.RetryReadsFromSecondaryHost // This is for the Blob SDK only + //return "" // This is for non-blob SDKs +} + +func (o RetryOptions) defaults() RetryOptions { + // We assume the following: + // 1. o.Policy should either be RetryPolicyExponential or RetryPolicyFixed + // 2. o.MaxTries >= 0 + // 3. o.TryTimeout, o.RetryDelay, and o.MaxRetryDelay >=0 + // 4. o.RetryDelay <= o.MaxRetryDelay + // 5. Both o.RetryDelay and o.MaxRetryDelay must be 0 or neither can be 0 + + IfDefault := func(current *time.Duration, desired time.Duration) { + if *current == time.Duration(0) { + *current = desired + } + } + + // Set defaults if unspecified + if o.MaxTries == 0 { + o.MaxTries = 4 + } + switch o.Policy { + case RetryPolicyExponential: + IfDefault(&o.TryTimeout, 1*time.Minute) + IfDefault(&o.RetryDelay, 4*time.Second) + IfDefault(&o.MaxRetryDelay, 120*time.Second) + + case RetryPolicyFixed: + IfDefault(&o.TryTimeout, 1*time.Minute) + IfDefault(&o.RetryDelay, 30*time.Second) + IfDefault(&o.MaxRetryDelay, 120*time.Second) + } + return o +} + +func (o RetryOptions) calcDelay(try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(0) + switch o.Policy { + case RetryPolicyExponential: + delay = time.Duration(pow(2, try-1)-1) * o.RetryDelay + + case RetryPolicyFixed: + if try > 1 { // Any try after the 1st uses the fixed delay + delay = o.RetryDelay + } + } + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757 + delay = time.Duration(float32(delay) * (rand.Float32()/2 + 0.8)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicyFactory creates a RetryPolicyFactory object configured using the specified options. +func NewRetryPolicyFactory(o RetryOptions) pipeline.Factory { + o = o.defaults() // Force defaults to be calculated + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (response pipeline.Response, err error) { + // Before each try, we'll select either the primary or secondary URL. + primaryTry := int32(0) // This indicates how many tries we've attempted against the primary DC + + // We only consider retrying against a secondary if we have a read request (GET/HEAD) AND this policy has a Secondary URL it can use + considerSecondary := (request.Method == http.MethodGet || request.Method == http.MethodHead) && o.retryReadsFromSecondaryHost() != "" + + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. NOTE: StorageError considers HTTP 500/503 as temporary & is therefore retryable + // If using a secondary: + // Even tries go against primary; odd tries go against the secondary + // For a primary wait ((2 ^ primaryTries - 1) * delay * random(0.8, 1.2) + // If secondary gets a 404, don't fail, retry but future retries are only against the primary + // When retrying against a secondary, ignore the retry count and wait (.1 second * random(0.8, 1.2)) + for try := int32(1); try <= o.MaxTries; try++ { + logf("\n=====> Try=%d\n", try) + + // Determine which endpoint to try. It's primary if there is no secondary or if it is an add # attempt. + tryingPrimary := !considerSecondary || (try%2 == 1) + // Select the correct host and delay + if tryingPrimary { + primaryTry++ + delay := o.calcDelay(primaryTry) + logf("Primary try=%d, Delay=%v\n", primaryTry, delay) + time.Sleep(delay) // The 1st try returns 0 delay + } else { + // For casts and rounding - be careful, as per https://github.com/golang/go/issues/20757 + delay := time.Duration(float32(time.Second) * (rand.Float32()/2 + 0.8)) + logf("Secondary try=%d, Delay=%v\n", try-primaryTry, delay) + time.Sleep(delay) // Delay with some jitter before trying secondary + } + + // Clone the original request to ensure that each try starts with the original (unmutated) request. + requestCopy := request.Copy() + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = requestCopy.RewindBody() + if err != nil { + return nil, errors.New("we must be able to seek on the Body Stream, otherwise retries would cause data corruption") + } + + if !tryingPrimary { + requestCopy.URL.Host = o.retryReadsFromSecondaryHost() + requestCopy.Host = o.retryReadsFromSecondaryHost() + } + + // Set the server-side timeout query parameter "timeout=[seconds]" + timeout := o.TryTimeout // Max time per try + if deadline, ok := ctx.Deadline(); ok { // If user's ctx has a deadline, make the timeout the smaller of the two + t := deadline.Sub(time.Now()) // Duration from now until user's ctx reaches its deadline + logf("MaxTryTimeout=%d secs, TimeTilDeadline=%d sec\n", int32(timeout.Seconds()), int32(t.Seconds())) + if t < timeout { + timeout = t + } + if timeout < 0 { + timeout = 0 // If timeout ever goes negative, set it to zero; this happen while debugging + } + logf("TryTimeout adjusted to=%d sec\n", int32(timeout.Seconds())) + } + q := requestCopy.Request.URL.Query() + q.Set("timeout", strconv.Itoa(int(timeout.Seconds()+1))) // Add 1 to "round up" + requestCopy.Request.URL.RawQuery = q.Encode() + logf("Url=%s\n", requestCopy.Request.URL.String()) + + // Set the time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(ctx, timeout) + //requestCopy.Body = &deadlineExceededReadCloser{r: requestCopy.Request.Body} + response, err = next.Do(tryCtx, requestCopy) // Make the request + /*err = improveDeadlineExceeded(err) + if err == nil { + response.Response().Body = &deadlineExceededReadCloser{r: response.Response().Body} + }*/ + logf("Err=%v, response=%v\n", err, response) + + action := "" // This MUST get changed within the switch code below + switch { + case ctx.Err() != nil: + action = "NoRetry: Op timeout" + case !tryingPrimary && response != nil && response.Response() != nil && response.Response().StatusCode == http.StatusNotFound: + // If attempt was against the secondary & it returned a StatusNotFound (404), then + // the resource was not found. This may be due to replication delay. So, in this + // case, we'll never try the secondary again for this operation. + considerSecondary = false + action = "Retry: Secondary URL returned 404" + case err != nil: + // NOTE: Protocol Responder returns non-nil if REST API returns invalid status code for the invoked operation. + // Use ServiceCode to verify if the error is related to storage service-side, + // ServiceCode is set only when error related to storage service happened. + if stErr, ok := err.(StorageError); ok { + if stErr.Temporary() { + action = "Retry: StorageError with error service code and Temporary()" + } else if stErr.Response() != nil && isSuccessStatusCode(stErr.Response()) { // TODO: This is a temporarily work around, remove this after protocol layer fix the issue that net.Error is wrapped as storageError + action = "Retry: StorageError with success status code" + } else { + action = "NoRetry: StorageError not Temporary() and without retriable status code" + } + } else if netErr, ok := err.(net.Error); ok { + // Use non-retriable net.Error list, but not retriable list. + // As there are errors without Temporary() implementation, + // while need be retried, like 'connection reset by peer', 'transport connection broken' and etc. + // So the SDK do retry for most of the case, unless the error should not be retried for sure. + if !isNotRetriable(netErr) { + action = "Retry: net.Error and not in the non-retriable list" + } else { + action = "NoRetry: net.Error and in the non-retriable list" + } + } else if err == io.ErrUnexpectedEOF { + action = "Retry: unexpected EOF" + } else { + action = "NoRetry: unrecognized error" + } + default: + action = "NoRetry: successful HTTP request" // no error + } + + logf("Action=%s\n", action) + // fmt.Println(action + "\n") // This is where we could log the retry operation; action is why we're retrying + if action[0] != 'R' { // Retry only if action starts with 'R' + if err != nil { + tryCancel() // If we're returning an error, cancel this current/last per-retry timeout context + } else { + // We wrap the last per-try context in a body and overwrite the Response's Body field with our wrapper. + // So, when the user closes the Body, then our per-try context gets closed too. + // Another option, is that the Last Policy do this wrapping for a per-retry context (not for the user's context) + if response == nil || response.Response() == nil { + // We do panic in the case response or response.Response() is nil, + // as for client, the response should not be nil if request is sent and the operations is executed successfully. + // Another option, is that execute the cancel function when response or response.Response() is nil, + // as in this case, current per-try has nothing to do in future. + return nil, errors.New("invalid state, response should not be nil when the operation is executed successfully") + } + if response.Response().Body == http.NoBody { + // If the response is empty the caller isn't obligated to call close + tryCancel(); + } else { + response.Response().Body = &contextCancelReadCloser{cf: tryCancel, body: response.Response().Body} + } + } + break // Don't retry + } + if response != nil && response.Response() != nil && response.Response().Body != nil { + // If we're going to retry and we got a previous response, then flush its body to avoid leaking its TCP connection + body := response.Response().Body + io.Copy(ioutil.Discard, body) + body.Close() + } + // If retrying, cancel the current per-try timeout context + tryCancel() + } + return response, err // Not retryable or too many retries; return the last response/error + } + }) +} + +// contextCancelReadCloser helps to invoke context's cancelFunc properly when the ReadCloser is closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + if rc.cf != nil { + rc.cf() + } + return err +} + +// isNotRetriable checks if the provided net.Error isn't retriable. +func isNotRetriable(errToParse net.Error) bool { + // No error, so this is NOT retriable. + if errToParse == nil { + return true + } + + // The error is either temporary or a timeout so it IS retriable (not not retriable). + if errToParse.Temporary() || errToParse.Timeout() { + return false + } + + genericErr := error(errToParse) + + // From here all the error are neither Temporary() nor Timeout(). + switch err := errToParse.(type) { + case *net.OpError: + // The net.Error is also a net.OpError but the inner error is nil, so this is not retriable. + if err.Err == nil { + return true + } + genericErr = err.Err + } + + switch genericErr.(type) { + case *net.AddrError, net.UnknownNetworkError, *net.DNSError, net.InvalidAddrError, *net.ParseError, *net.DNSConfigError: + // If the error is one of the ones listed, then it is NOT retriable. + return true + } + + // If it's invalid header field name/value error thrown by http module, then it is NOT retriable. + // This could happen when metadata's key or value is invalid. (RoundTrip in transport.go) + if strings.Contains(genericErr.Error(), "invalid header field") { + return true + } + + // Assume the error is retriable. + return false +} + +var successStatusCodes = []int{http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent, http.StatusPartialContent} + +func isSuccessStatusCode(resp *http.Response) bool { + if resp == nil { + return false + } + for _, i := range successStatusCodes { + if i == resp.StatusCode { + return true + } + } + return false +} + +// According to https://github.com/golang/go/wiki/CompilerOptimizations, the compiler will inline this method and hopefully optimize all calls to it away +var logf = func(format string, a ...interface{}) {} + +// Use this version to see the retry method's code path (import "fmt") +//var logf = fmt.Printf + +/* +type deadlineExceededReadCloser struct { + r io.ReadCloser +} + +func (r *deadlineExceededReadCloser) Read(p []byte) (int, error) { + n, err := 0, io.EOF + if r.r != nil { + n, err = r.r.Read(p) + } + return n, improveDeadlineExceeded(err) +} +func (r *deadlineExceededReadCloser) Seek(offset int64, whence int) (int64, error) { + // For an HTTP request, the ReadCloser MUST also implement seek + // For an HTTP response, Seek MUST not be called (or this will panic) + o, err := r.r.(io.Seeker).Seek(offset, whence) + return o, improveDeadlineExceeded(err) +} +func (r *deadlineExceededReadCloser) Close() error { + if c, ok := r.r.(io.Closer); ok { + c.Close() + } + return nil +} + +// timeoutError is the internal struct that implements our richer timeout error. +type deadlineExceeded struct { + responseError +} + +var _ net.Error = (*deadlineExceeded)(nil) // Ensure deadlineExceeded implements the net.Error interface at compile time + +// improveDeadlineExceeded creates a timeoutError object that implements the error interface IF cause is a context.DeadlineExceeded error. +func improveDeadlineExceeded(cause error) error { + // If cause is not DeadlineExceeded, return the same error passed in. + if cause != context.DeadlineExceeded { + return cause + } + // Else, convert DeadlineExceeded to our timeoutError which gives a richer string message + return &deadlineExceeded{ + responseError: responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + }, + } +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *deadlineExceeded) Error() string { + return e.ErrorNode.Error("context deadline exceeded; when creating a pipeline, consider increasing RetryOptions' TryTimeout field") +} +*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go new file mode 100644 index 00000000000..608e1051ca0 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_telemetry.go @@ -0,0 +1,51 @@ +package azblob + +import ( + "bytes" + "context" + "fmt" + "os" + "runtime" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // Value is a string prepended to each request's User-Agent and sent to the service. + // The service records the user-agent in logs for diagnostics and tracking of client requests. + Value string +} + +// NewTelemetryPolicyFactory creates a factory that can create telemetry policy objects +// which add telemetry information to outgoing HTTP requests. +func NewTelemetryPolicyFactory(o TelemetryOptions) pipeline.Factory { + b := &bytes.Buffer{} + b.WriteString(o.Value) + if b.Len() > 0 { + b.WriteRune(' ') + } + fmt.Fprintf(b, "Azure-Storage/%s %s", serviceLibVersion, platformInfo) + telemetryValue := b.String() + + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + request.Header.Set("User-Agent", telemetryValue) + return next.Do(ctx, request) + } + }) +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + // Azure-Storage/version (runtime; os type and version)” + // Azure-Storage/1.4.0 (NODE-VERSION v4.5.0; Windows_NT 10.0.14393)' + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go new file mode 100644 index 00000000000..1f7817d2df6 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_policy_unique_request_id.go @@ -0,0 +1,36 @@ +package azblob + +import ( + "context" + "errors" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +// NewUniqueRequestIDPolicyFactory creates a UniqueRequestIDPolicyFactory object +// that sets the request's x-ms-client-request-id header if it doesn't already exist. +func NewUniqueRequestIDPolicyFactory() pipeline.Factory { + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + // This is Policy's Do method: + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + id := request.Header.Get(xMsClientRequestID) + if id == "" { // Add a unique request ID if the caller didn't specify one already + id = newUUID().String() + request.Header.Set(xMsClientRequestID, id) + } + + resp, err := next.Do(ctx, request) + + if err == nil && resp != nil { + crId := resp.Response().Header.Get(xMsClientRequestID) + if crId != "" && crId != id { + err = errors.New("client Request ID from request and response does not match") + } + } + + return resp, err + } + }) +} + +const xMsClientRequestID = "x-ms-client-request-id" diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go new file mode 100644 index 00000000000..ad38f597ed2 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_retry_reader.go @@ -0,0 +1,186 @@ +package azblob + +import ( + "context" + "io" + "net" + "net/http" + "strings" + "sync" +) + +const CountToEnd = 0 + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) + +// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type HTTPGetterInfo struct { + // Offset specifies the start offset that should be used when + // creating the HTTP GET request's Range header + Offset int64 + + // Count specifies the count of bytes that should be used to calculate + // the end offset when creating the HTTP GET request's Range header + Count int64 + + // ETag specifies the resource's etag that should be used when creating + // the HTTP GET request's If-Match header + ETag ETag +} + +// FailedReadNotifier is a function type that represents the notification function called when a read fails +type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) + +// RetryReaderOptions contains properties which can help to decide when to do retry. +type RetryReaderOptions struct { + // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made + // while reading from a RetryReader. A value of zero means that no additional HTTP + // GET requests will be made. + MaxRetryRequests int + doInjectError bool + doInjectErrorRound int + injectedError error + + // NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging. + NotifyFailedRead FailedReadNotifier + + // TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // retryReader has the following special behaviour: closing the response body before it is all read is treated as a + // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = + // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If + // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead + // treated as a fatal (non-retryable) error. + // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens + // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors + // which will be retried. + TreatEarlyCloseAsError bool + + ClientProvidedKeyOptions ClientProvidedKeyOptions +} + +// retryReader implements io.ReaderCloser methods. +// retryReader tries to read from response, and if there is retriable network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +type retryReader struct { + ctx context.Context + info HTTPGetterInfo + countWasBounded bool + o RetryReaderOptions + getter HTTPGetter + + // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response + responseMu *sync.Mutex + response *http.Response +} + +// NewRetryReader creates a retry reader. +func NewRetryReader(ctx context.Context, initialResponse *http.Response, + info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { + return &retryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + o: o} +} + +func (s *retryReader) setResponse(r *http.Response) { + s.responseMu.Lock() + defer s.responseMu.Unlock() + s.response = r +} + +func (s *retryReader) Read(p []byte) (n int, err error) { + for try := 0; ; try++ { + //fmt.Println(try) // Comment out for debugging. + if s.countWasBounded && s.info.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + s.responseMu.Lock() + resp := s.response + s.responseMu.Unlock() + if resp == nil { // We don't have a response stream to read from, try to get one. + newResponse, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.setResponse(newResponse) + resp = newResponse + } + n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + + // Injection mechanism for testing. + if s.o.doInjectError && try == s.o.doInjectErrorRound { + if s.o.injectedError != nil { + err = s.o.injectedError + } else { + err = &net.DNSError{IsTemporary: true} + } + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Count != CountToEnd { + s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + s.Close() // Error, close stream + s.setResponse(nil) // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + retriesExhausted := try >= s.o.MaxRetryRequests + _, isNetError := err.(net.Error) + isUnexpectedEOF := err == io.ErrUnexpectedEOF + willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted + + // Notify, for logging purposes, of any failures + if s.o.NotifyFailedRead != nil { + failureCount := try + 1 // because try is zero-based + s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) + } + + if willRetry { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, or retries exhausted, so just return + } +} + +// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry +// Is this safe, to close early from another goroutine? Early close ultimately ends up calling +// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" +// which is exactly the behaviour we want. +// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) +// then there are two different types of error that may happen - either the one one we check for here, +// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine +// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. +func (s *retryReader) wasRetryableEarlyClose(err error) bool { + if s.o.TreatEarlyCloseAsError { + return false // user wants all early closes to be errors, and so not retryable + } + // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text + return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) +} + +const ReadOnClosedBodyMessage = "read on closed response body" + +func (s *retryReader) Close() error { + s.responseMu.Lock() + defer s.responseMu.Unlock() + if s.response != nil && s.response.Body != nil { + return s.response.Body.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go new file mode 100644 index 00000000000..6b84d95e3ed --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_account.go @@ -0,0 +1,244 @@ +package azblob + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" +) + +// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() + ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() +} + +// NewSASQueryParameters uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = SASVersion + } + perms := &AccountSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That right, the account SAS requires a terminating extra newline + "\n") + + signature := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// The AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +type AccountSASPermissions struct { + Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags, PermanentDelete, Immutability bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Permissions field. +func (p AccountSASPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + if p.PermanentDelete { + buffer.WriteRune('y') + } + if p.Immutability { + buffer.WriteRune('i') + } + return buffer.String() +} + +// Parse initializes the AccountSASPermissions's fields from a string. +func (p *AccountSASPermissions) Parse(s string) error { + *p = AccountSASPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 'x': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + case 'y': + p.PermanentDelete = true + case 'i': + p.Immutability = true + default: + return fmt.Errorf("invalid permission character: '%v'", r) + } + } + return nil +} + +// The AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. +type AccountSASServices struct { + Blob, Queue, File bool +} + +// String produces the SAS services string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Services field. +func (s AccountSASServices) String() string { + var buffer bytes.Buffer + if s.Blob { + buffer.WriteRune('b') + } + if s.Queue { + buffer.WriteRune('q') + } + if s.File { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASServices' fields from a string. +func (a *AccountSASServices) Parse(s string) error { + *a = AccountSASServices{} // Clear out the flags + for _, r := range s { + switch r { + case 'b': + a.Blob = true + case 'q': + a.Queue = true + case 'f': + a.File = true + default: + return fmt.Errorf("Invalid service character: '%v'", r) + } + } + return nil +} + +// The AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +type AccountSASResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's ResourceTypes field. +func (rt AccountSASResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// Parse initializes the AccountSASResourceType's fields from a string. +func (rt *AccountSASResourceTypes) Parse(s string) error { + *rt = AccountSASResourceTypes{} // Clear out the flags + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return fmt.Errorf("Invalid resource type: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go new file mode 100644 index 00000000000..bef67624caf --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_sas_query_params.go @@ -0,0 +1,393 @@ +package azblob + +import ( + "errors" + "net" + "net/url" + "strings" + "time" +) + +// SASVersion indicates the SAS version. +const SASVersion = ServiceVersion + +type SASProtocol string + +const ( + // SASProtocolHTTPS can be specified for a SAS protocol + SASProtocolHTTPS SASProtocol = "https" + + // SASProtocolHTTPSandHTTP can be specified for a SAS protocol + SASProtocolHTTPSandHTTP SASProtocol = "https,http" +) + +// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatSASTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatSASTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(SnapshotTimeFormat) + } + return ss, se, sh +} + +// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 +var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. + +// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatSASTimeWithDefaultFormat(t *time.Time) string { + return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatSASTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// parseSASTimeString try to parse sas time string. +func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range SASTimeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// A SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type SASQueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol SASProtocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOid string `param:"skoid"` + signedTid string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + preauthorizedAgentObjectId string `param:"saoid"` + agentObjectId string `param:"suoid"` + correlationId string `param:"scid"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +func (p *SASQueryParameters) PreauthorizedAgentObjectId() string { + return p.preauthorizedAgentObjectId +} + +func (p *SASQueryParameters) AgentObjectId() string { + return p.agentObjectId +} + +func (p *SASQueryParameters) SignedCorrelationId() string { + return p.correlationId +} + +func (p *SASQueryParameters) SignedTid() string { + return p.signedTid +} + +func (p *SASQueryParameters) SignedStart() time.Time { + return p.signedStart +} + +func (p *SASQueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +func (p *SASQueryParameters) SignedService() string { + return p.signedService +} + +func (p *SASQueryParameters) SignedVersion() string { + return p.signedVersion +} + +func (p *SASQueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +func (p *SASQueryParameters) Version() string { + return p.version +} + +func (p *SASQueryParameters) Services() string { + return p.services +} +func (p *SASQueryParameters) ResourceTypes() string { + return p.resourceTypes +} +func (p *SASQueryParameters) Protocol() SASProtocol { + return p.protocol +} +func (p *SASQueryParameters) StartTime() time.Time { + return p.startTime +} +func (p *SASQueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +func (p *SASQueryParameters) IPRange() IPRange { + return p.ipRange +} + +func (p *SASQueryParameters) Identifier() string { + return p.identifier +} + +func (p *SASQueryParameters) Resource() string { + return p.resource +} +func (p *SASQueryParameters) Permissions() string { + return p.permissions +} + +func (p *SASQueryParameters) Signature() string { + return p.signature +} + +func (p *SASQueryParameters) CacheControl() string { + return p.cacheControl +} + +func (p *SASQueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +func (p *SASQueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +func (p *SASQueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +func (p *SASQueryParameters) ContentType() string { + return p.contentType +} + +func (p *SASQueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { + p := SASQueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = SASProtocol(val) + case "snapshot": + p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOid = val + case "sktid": + p.signedTid = val + case "skt": + p.signedStart, _ = time.Parse(SASTimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(SASTimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.preauthorizedAgentObjectId = val + case "suoid": + p.agentObjectId = val + case "scid": + p.correlationId = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} + +// AddToValues adds the SAS components to the specified query parameters map. +func (p *SASQueryParameters) addToValues(v url.Values) url.Values { + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOid != "" { + v.Add("skoid", p.signedOid) + v.Add("sktid", p.signedTid) + v.Add("skt", p.signedStart.Format(SASTimeFormat)) + v.Add("ske", p.signedExpiry.Format(SASTimeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.preauthorizedAgentObjectId != "" { + v.Add("saoid", p.preauthorizedAgentObjectId) + } + if p.agentObjectId != "" { + v.Add("suoid", p.agentObjectId) + } + if p.correlationId != "" { + v.Add("scid", p.correlationId) + } + return v +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *SASQueryParameters) Encode() string { + v := url.Values{} + p.addToValues(v) + return v.Encode() +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go new file mode 100644 index 00000000000..d09ddcffcc6 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_service_codes_common.go @@ -0,0 +1,134 @@ +package azblob + +// https://docs.microsoft.com/en-us/rest/api/storageservices/common-rest-api-error-codes + +const ( + // ServiceCodeNone is the default value. It indicates that the error was related to the service or that the service didn't return a code. + ServiceCodeNone ServiceCodeType = "" + + // ServiceCodeAccountAlreadyExists means the specified account already exists. + ServiceCodeAccountAlreadyExists ServiceCodeType = "AccountAlreadyExists" + + // ServiceCodeAccountBeingCreated means the specified account is in the process of being created (403). + ServiceCodeAccountBeingCreated ServiceCodeType = "AccountBeingCreated" + + // ServiceCodeAccountIsDisabled means the specified account is disabled (403). + ServiceCodeAccountIsDisabled ServiceCodeType = "AccountIsDisabled" + + // ServiceCodeAuthenticationFailed means the server failed to authenticate the request. Make sure the value of the Authorization header is formed correctly including the signature (403). + ServiceCodeAuthenticationFailed ServiceCodeType = "AuthenticationFailed" + + // ServiceCodeConditionHeadersNotSupported means the condition headers are not supported (400). + ServiceCodeConditionHeadersNotSupported ServiceCodeType = "ConditionHeadersNotSupported" + + // ServiceCodeConditionNotMet means the condition specified in the conditional header(s) was not met for a read/write operation (304/412). + ServiceCodeConditionNotMet ServiceCodeType = "ConditionNotMet" + + // ServiceCodeEmptyMetadataKey means the key for one of the metadata key-value pairs is empty (400). + ServiceCodeEmptyMetadataKey ServiceCodeType = "EmptyMetadataKey" + + // ServiceCodeInsufficientAccountPermissions means read operations are currently disabled or Write operations are not allowed or The account being accessed does not have sufficient permissions to execute this operation (403). + ServiceCodeInsufficientAccountPermissions ServiceCodeType = "InsufficientAccountPermissions" + + // ServiceCodeInternalError means the server encountered an internal error. Please retry the request (500). + ServiceCodeInternalError ServiceCodeType = "InternalError" + + // ServiceCodeInvalidAuthenticationInfo means the authentication information was not provided in the correct format. Verify the value of Authorization header (400). + ServiceCodeInvalidAuthenticationInfo ServiceCodeType = "InvalidAuthenticationInfo" + + // ServiceCodeInvalidHeaderValue means the value provided for one of the HTTP headers was not in the correct format (400). + ServiceCodeInvalidHeaderValue ServiceCodeType = "InvalidHeaderValue" + + // ServiceCodeInvalidHTTPVerb means the HTTP verb specified was not recognized by the server (400). + ServiceCodeInvalidHTTPVerb ServiceCodeType = "InvalidHttpVerb" + + // ServiceCodeInvalidInput means one of the request inputs is not valid (400). + ServiceCodeInvalidInput ServiceCodeType = "InvalidInput" + + // ServiceCodeInvalidMd5 means the MD5 value specified in the request is invalid. The MD5 value must be 128 bits and Base64-encoded (400). + ServiceCodeInvalidMd5 ServiceCodeType = "InvalidMd5" + + // ServiceCodeInvalidMetadata means the specified metadata is invalid. It includes characters that are not permitted (400). + ServiceCodeInvalidMetadata ServiceCodeType = "InvalidMetadata" + + // ServiceCodeInvalidQueryParameterValue means an invalid value was specified for one of the query parameters in the request URI (400). + ServiceCodeInvalidQueryParameterValue ServiceCodeType = "InvalidQueryParameterValue" + + // ServiceCodeInvalidRange means the range specified is invalid for the current size of the resource (416). + ServiceCodeInvalidRange ServiceCodeType = "InvalidRange" + + // ServiceCodeInvalidResourceName means the specified resource name contains invalid characters (400). + ServiceCodeInvalidResourceName ServiceCodeType = "InvalidResourceName" + + // ServiceCodeInvalidURI means the requested URI does not represent any resource on the server (400). + ServiceCodeInvalidURI ServiceCodeType = "InvalidUri" + + // ServiceCodeInvalidXMLDocument means the specified XML is not syntactically valid (400). + ServiceCodeInvalidXMLDocument ServiceCodeType = "InvalidXmlDocument" + + // ServiceCodeInvalidXMLNodeValue means the value provided for one of the XML nodes in the request body was not in the correct format (400). + ServiceCodeInvalidXMLNodeValue ServiceCodeType = "InvalidXmlNodeValue" + + // ServiceCodeMd5Mismatch means the MD5 value specified in the request did not match the MD5 value calculated by the server (400). + ServiceCodeMd5Mismatch ServiceCodeType = "Md5Mismatch" + + // ServiceCodeMetadataTooLarge means the size of the specified metadata exceeds the maximum size permitted (400). + ServiceCodeMetadataTooLarge ServiceCodeType = "MetadataTooLarge" + + // ServiceCodeMissingContentLengthHeader means the Content-Length header was not specified (411). + ServiceCodeMissingContentLengthHeader ServiceCodeType = "MissingContentLengthHeader" + + // ServiceCodeMissingRequiredQueryParameter means a required query parameter was not specified for this request (400). + ServiceCodeMissingRequiredQueryParameter ServiceCodeType = "MissingRequiredQueryParameter" + + // ServiceCodeMissingRequiredHeader means a required HTTP header was not specified (400). + ServiceCodeMissingRequiredHeader ServiceCodeType = "MissingRequiredHeader" + + // ServiceCodeMissingRequiredXMLNode means a required XML node was not specified in the request body (400). + ServiceCodeMissingRequiredXMLNode ServiceCodeType = "MissingRequiredXmlNode" + + // ServiceCodeMultipleConditionHeadersNotSupported means multiple condition headers are not supported (400). + ServiceCodeMultipleConditionHeadersNotSupported ServiceCodeType = "MultipleConditionHeadersNotSupported" + + // ServiceCodeOperationTimedOut means the operation could not be completed within the permitted time (500). + ServiceCodeOperationTimedOut ServiceCodeType = "OperationTimedOut" + + // ServiceCodeOutOfRangeInput means one of the request inputs is out of range (400). + ServiceCodeOutOfRangeInput ServiceCodeType = "OutOfRangeInput" + + // ServiceCodeOutOfRangeQueryParameterValue means a query parameter specified in the request URI is outside the permissible range (400). + ServiceCodeOutOfRangeQueryParameterValue ServiceCodeType = "OutOfRangeQueryParameterValue" + + // ServiceCodeRequestBodyTooLarge means the size of the request body exceeds the maximum size permitted (413). + ServiceCodeRequestBodyTooLarge ServiceCodeType = "RequestBodyTooLarge" + + // ServiceCodeResourceTypeMismatch means the specified resource type does not match the type of the existing resource (409). + ServiceCodeResourceTypeMismatch ServiceCodeType = "ResourceTypeMismatch" + + // ServiceCodeRequestURLFailedToParse means the url in the request could not be parsed (400). + ServiceCodeRequestURLFailedToParse ServiceCodeType = "RequestUrlFailedToParse" + + // ServiceCodeResourceAlreadyExists means the specified resource already exists (409). + ServiceCodeResourceAlreadyExists ServiceCodeType = "ResourceAlreadyExists" + + // ServiceCodeResourceNotFound means the specified resource does not exist (404). + ServiceCodeResourceNotFound ServiceCodeType = "ResourceNotFound" + + // ServiceCodeNoAuthenticationInformation means the specified authentication for the resource does not exist (401). + ServiceCodeNoAuthenticationInformation ServiceCodeType = "NoAuthenticationInformation" + + // ServiceCodeServerBusy means the server is currently unable to receive requests. Please retry your request or Ingress/egress is over the account limit or operations per second is over the account limit (503). + ServiceCodeServerBusy ServiceCodeType = "ServerBusy" + + // ServiceCodeUnsupportedHeader means one of the HTTP headers specified in the request is not supported (400). + ServiceCodeUnsupportedHeader ServiceCodeType = "UnsupportedHeader" + + // ServiceCodeUnsupportedXMLNode means one of the XML nodes specified in the request body is not supported (400). + ServiceCodeUnsupportedXMLNode ServiceCodeType = "UnsupportedXmlNode" + + // ServiceCodeUnsupportedQueryParameter means one of the query parameters specified in the request URI is not supported (400). + ServiceCodeUnsupportedQueryParameter ServiceCodeType = "UnsupportedQueryParameter" + + // ServiceCodeUnsupportedHTTPVerb means the resource doesn't support the specified HTTP verb (405). + ServiceCodeUnsupportedHTTPVerb ServiceCodeType = "UnsupportedHttpVerb" +) diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go new file mode 100644 index 00000000000..a3cbd9817bf --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_storage_error.go @@ -0,0 +1,111 @@ +package azblob + +import ( + "bytes" + "encoding/xml" + "fmt" + "net/http" + "sort" + + "github.com/Azure/azure-pipeline-go/pipeline" +) + +func init() { + // wire up our custom error handling constructor + responseErrorFactory = newStorageError +} + +// ServiceCodeType is a string identifying a storage service error. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2 +type ServiceCodeType string + +// StorageError identifies a responder-generated network or response parsing error. +type StorageError interface { + // ResponseError implements error's Error(), net.Error's Temporary() and Timeout() methods & Response(). + ResponseError + + // ServiceCode returns a service error code. Your code can use this to make error recovery decisions. + ServiceCode() ServiceCodeType +} + +// storageError is the internal struct that implements the public StorageError interface. +type storageError struct { + responseError + serviceCode ServiceCodeType + details map[string]string +} + +// newStorageError creates an error object that implements the error interface. +func newStorageError(cause error, response *http.Response, description string) error { + return &storageError{ + responseError: responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + response: response, + description: description, + }, + serviceCode: ServiceCodeType(response.Header.Get("x-ms-error-code")), + } +} + +// ServiceCode returns service-error information. The caller may examine these values but should not modify any of them. +func (e *storageError) ServiceCode() ServiceCodeType { + return e.serviceCode +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *storageError) Error() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "===== RESPONSE ERROR (ServiceCode=%s) =====\n", e.serviceCode) + fmt.Fprintf(b, "Description=%s, Details: ", e.description) + if len(e.details) == 0 { + b.WriteString("(none)\n") + } else { + b.WriteRune('\n') + keys := make([]string, 0, len(e.details)) + // Alphabetize the details + for k := range e.details { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) + } + } + req := pipeline.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request + pipeline.WriteRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) + return e.ErrorNode.Error(b.String()) +} + +// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). +func (e *storageError) Temporary() bool { + if e.response != nil { + if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) { + return true + } + } + return e.ErrorNode.Temporary() +} + +// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. +func (e *storageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { + tokName := "" + var t xml.Token + for t, err = d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = tt.Name.Local + break + case xml.CharData: + switch tokName { + case "Message": + e.description = string(tt) + default: + if e.details == nil { + e.details = map[string]string{} + } + e.details[tokName] = string(tt) + } + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go new file mode 100644 index 00000000000..d7b2507e43f --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_util_validate.go @@ -0,0 +1,64 @@ +package azblob + +import ( + "errors" + "fmt" + "io" + "strconv" +) + +// httpRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value httpRange indicates the entire resource. An httpRange +// which has an offset but na zero value count indicates from the offset to the resource's end. +type httpRange struct { + offset int64 + count int64 +} + +func (r httpRange) pointers() *string { + if r.offset == 0 && r.count == CountToEnd { // Do common case first for performance + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.count > 0 { + endOffset = strconv.FormatInt((r.offset+r.count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.offset, endOffset) + return &dataRange +} + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + body.Seek(0, io.SeekStart) + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go new file mode 100644 index 00000000000..66799f9cb65 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zc_uuid.go @@ -0,0 +1,77 @@ +package azblob + +import ( + "crypto/rand" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedNCS byte = 0x80 + reservedRFC4122 byte = 0x40 + reservedMicrosoft byte = 0x20 + reservedFuture byte = 0x00 +) + +// A UUID representation compliant with specification in RFC 4122 document. +type uuid [16]byte + +// NewUUID returns a new uuid using RFC 4122 algorithm. +func newUUID() (u uuid) { + u = uuid{} + // Set all bits to randomly (or pseudo-randomly) chosen values. + rand.Read(u[:]) + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return +} + +// String returns an unparsed version of the generated UUID sequence. +func (u uuid) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// ParseUUID parses a string formatted as "003020100-0504-0706-0809-0a0b0c0d0e0f" +// or "{03020100-0504-0706-0809-0a0b0c0d0e0f}" into a UUID. +func parseUUID(uuidStr string) uuid { + char := func(hexString string) byte { + i, _ := strconv.ParseUint(hexString, 16, 8) + return byte(i) + } + if uuidStr[0] == '{' { + uuidStr = uuidStr[1:] // Skip over the '{' + } + // 03020100 - 05 04 - 07 06 - 08 09 - 0a 0b 0c 0d 0e 0f + // 1 11 1 11 11 1 12 22 2 22 22 22 33 33 33 + // 01234567 8 90 12 3 45 67 8 90 12 3 45 67 89 01 23 45 + uuidVal := uuid{ + char(uuidStr[0:2]), + char(uuidStr[2:4]), + char(uuidStr[4:6]), + char(uuidStr[6:8]), + + char(uuidStr[9:11]), + char(uuidStr[11:13]), + + char(uuidStr[14:16]), + char(uuidStr[16:18]), + + char(uuidStr[19:21]), + char(uuidStr[21:23]), + + char(uuidStr[24:26]), + char(uuidStr[26:28]), + char(uuidStr[28:30]), + char(uuidStr[30:32]), + char(uuidStr[32:34]), + char(uuidStr[34:36]), + } + return uuidVal +} + +func (u uuid) bytes() []byte { + return u[:] +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go new file mode 100644 index 00000000000..6b3779c0e98 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zt_doc.go @@ -0,0 +1,89 @@ +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azblob allows you to manipulate Azure Storage containers and blobs objects. + +URL Types + +The most common types you'll work with are the XxxURL types. The methods of these types make requests +against the Azure Storage Service. + + - ServiceURL's methods perform operations on a storage account. + - ContainerURL's methods perform operations on an account's container. + - BlockBlobURL's methods perform operations on a container's block blob. + - AppendBlobURL's methods perform operations on a container's append blob. + - PageBlobURL's methods perform operations on a container's page blob. + - BlobURL's methods perform operations on a container's blob regardless of the blob's type. + +Internally, each XxxURL object contains a URL and a request pipeline. The URL indicates the endpoint where each HTTP +request is sent and the pipeline indicates how the outgoing HTTP request and incoming HTTP response is processed. +The pipeline specifies things like retry policies, logging, deserialization of HTTP response payloads, and more. + +Pipelines are threadsafe and may be shared by multiple XxxURL objects. When you create a ServiceURL, you pass +an initial pipeline. When you call ServiceURL's NewContainerURL method, the new ContainerURL object has its own +URL but it shares the same pipeline as the parent ServiceURL object. + +To work with a blob, call one of ContainerURL's 4 NewXxxBlobURL methods depending on how you want to treat the blob. +To treat the blob as a block blob, append blob, or page blob, call NewBlockBlobURL, NewAppendBlobURL, or NewPageBlobURL +respectively. These three types are all identical except for the methods they expose; each type exposes the methods +relevant to the type of blob represented. If you're not sure how you want to treat a blob, you can call NewBlobURL; +this returns an object whose methods are relevant to any kind of blob. When you call ContainerURL's NewXxxBlobURL, +the new XxxBlobURL object has its own URL but it shares the same pipeline as the parent ContainerURL object. You +can easily switch between blob types (method sets) by calling a ToXxxBlobURL method. + +If you'd like to use a different pipeline with a ServiceURL, ContainerURL, or XxxBlobURL object, then call the XxxURL +object's WithPipeline method passing in the desired pipeline. The WithPipeline methods create a new XxxURL object +with the same URL as the original but with the specified pipeline. + +Note that XxxURL objects use little memory, are goroutine-safe, and many objects share the same pipeline. This means that +XxxURL objects share a lot of system resources making them very efficient. + +All of XxxURL's methods that make HTTP requests return rich error handling information so you can discern network failures, +transient failures, timeout failures, service failures, etc. See the StorageError interface for more information and an +example of how to do deal with errors. + +URL and Shared Access Signature Manipulation + +The library includes a BlobURLParts type for deconstructing and reconstructing URLs. And you can use the following types +for generating and parsing Shared Access Signature (SAS) + - Use the AccountSASSignatureValues type to create a SAS for a storage account. + - Use the BlobSASSignatureValues type to create a SAS for a container or blob. + - Use the SASQueryParameters type to turn signature values in to query parameres or to parse query parameters. + +To generate a SAS, you must use the SharedKeyCredential type. + +Credentials + +When creating a request pipeline, you must specify one of this package's credential types. + - Call the NewAnonymousCredential function for requests that contain a Shared Access Signature (SAS). + - Call the NewSharedKeyCredential function (with an account name & key) to access any account resources. You must also use this + to generate Shared Access Signatures. + +HTTP Request Policy Factories + +This package defines several request policy factories for use with the pipeline package. +Most applications will not use these factories directly; instead, the NewPipeline +function creates these factories, initializes them (via the PipelineOptions type) +and returns a pipeline object for use by the XxxURL objects. + +However, for advanced scenarios, developers can access these policy factories directly +and even create their own and then construct their own pipeline in order to affect HTTP +requests and responses performed by the XxxURL objects. For example, developers can +introduce their own logging, random failures, request recording & playback for fast +testing, HTTP request pacing, alternate retry mechanisms, metering, metrics, etc. The +possibilities are endless! + +Below are the request pipeline policy factory functions that are provided with this +package: + - NewRetryPolicyFactory Enables rich retry semantics for failed HTTP requests. + - NewRequestLogPolicyFactory Enables rich logging support for HTTP requests/responses & failures. + - NewTelemetryPolicyFactory Enables simple modification of the HTTP request's User-Agent header so each request reports the SDK version & language/runtime making the requests. + - NewUniqueRequestIDPolicyFactory Adds a x-ms-client-request-id header with a unique UUID value to an HTTP request to help with diagnosing failures. + +Also, note that all the NewXxxCredential functions return request policy factory objects which get injected into the pipeline. +*/ +package azblob + +// TokenCredential Use this to access resources using Role-Based Access Control (RBAC). diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go new file mode 100644 index 00000000000..9a0144bf5f4 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_append_blob.go @@ -0,0 +1,532 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// appendBlobClient is the client for the AppendBlob methods of the Azblob service. +type appendBlobClient struct { + managementClient +} + +// newAppendBlobClient creates an instance of the appendBlobClient client. +func newAppendBlobClient(url url.URL, p pipeline.Pipeline) appendBlobClient { + return appendBlobClient{newManagementClient(url, p)} +} + +// AppendBlock the Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to AppendBlob. Append Block is +// supported only on version 2015-02-21 version or later. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be +// validated by the service. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If +// the Append Block operation would cause the blob to exceed that limit or if the blob size is already greater than the +// value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - +// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A +// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this +// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - +// Precondition Failed). encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in +// the request. If not specified, encryption is performed with the root account encryption key. For more information, +// see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided +// encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm +// used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the +// x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the +// name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client appendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*AppendBlobAppendBlockResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.appendBlockPreparer(body, contentLength, timeout, transactionalContentMD5, transactionalContentCrc64, leaseID, maxSize, appendPosition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobAppendBlockResponse), err +} + +// appendBlockPreparer prepares the AppendBlock request. +func (client appendBlobClient) appendBlockPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, leaseID *string, maxSize *int64, appendPosition *int64, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "appendblock") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if maxSize != nil { + req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) + } + if appendPosition != nil { + req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// appendBlockResponder handles the response to the AppendBlock request. +func (client appendBlobClient) appendBlockResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobAppendBlockResponse{rawResponse: resp.Response()}, err +} + +// AppendBlockFromURL the Append Block operation commits a new block of data to the end of an existing append blob +// where the contents are read from a source url. The Append Block operation is permitted only if the blob was created +// with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// +// sourceURL is specify a URL to the copy source. contentLength is the length of the request. sourceRange is bytes of +// source data in the specified range. sourceContentMD5 is specify the md5 calculated for the range of bytes that must +// be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the range of bytes that must be +// read from the copy source. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. encryptionKey is optional. Specifies the encryption key to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the root account encryption key. For more +// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies +// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this +// ID. maxSize is optional conditional header. The max length in bytes permitted for the append blob. If the Append +// Block operation would cause the blob to exceed that limit or if the blob size is already greater than the value +// specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - +// Precondition Failed). appendPosition is optional conditional header, used only for the Append Block operation. A +// number indicating the byte offset to compare. Append Block will succeed only if the append position is equal to this +// number. If it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - +// Precondition Failed). ifModifiedSince is specify this header value to operate only on a blob if it has been modified +// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has +// not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a +// matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is +// specify a SQL where clause on blob tags to operate only on blobs with a matching value. sourceIfModifiedSince is +// specify this header value to operate only on a blob if it has been modified since the specified date/time. +// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value. +// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. requestID is provides +// a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. copySourceAuthorization is only Bearer type is supported. Credentials should be a +// valid OAuth access token to copy source. +func (client appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (*AppendBlobAppendBlockFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.appendBlockFromURLPreparer(sourceURL, contentLength, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, transactionalContentMD5, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, maxSize, appendPosition, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID, copySourceAuthorization) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.appendBlockFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobAppendBlockFromURLResponse), err +} + +// appendBlockFromURLPreparer prepares the AppendBlockFromURL request. +func (client appendBlobClient) appendBlockFromURLPreparer(sourceURL string, contentLength int64, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, transactionalContentMD5 []byte, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, maxSize *int64, appendPosition *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "appendblock") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-copy-source", sourceURL) + if sourceRange != nil { + req.Header.Set("x-ms-source-range", *sourceRange) + } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if sourceContentcrc64 != nil { + req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) + } + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if maxSize != nil { + req.Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*maxSize, 10)) + } + if appendPosition != nil { + req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if copySourceAuthorization != nil { + req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) + } + return req, nil +} + +// appendBlockFromURLResponder handles the response to the AppendBlockFromURL request. +func (client appendBlobClient) appendBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobAppendBlockFromURLResponse{rawResponse: resp.Response()}, err +} + +// Create the Create Append Blob operation creates a new append blob. +// +// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. blobContentType is optional. Sets the blob's content type. If specified, +// this property is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the +// blob's content encoding. If specified, this property is stored with the blob and returned with a read request. +// blobContentLanguage is optional. Set the blob's content language. If specified, this property is stored with the +// blob and returned with a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this +// hash is not validated, as the hashes for the individual blocks were validated when each was uploaded. +// blobCacheControl is optional. Sets the blob's cache control. If specified, this property is stored with the blob and +// returned with a read request. metadata is optional. Specifies a user-defined name-value pair associated with the +// blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to the +// destination blob. If one or more name-value pairs are specified, the destination blob is created with the specified +// metadata, and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, +// metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and +// Metadata for more information. leaseID is if specified, the operation only succeeds if the resource's lease is +// active and matches this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. +// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not +// specified, encryption is performed with the root account encryption key. For more information, see Encryption at +// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be +// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the +// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key +// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption +// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default +// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. ifModifiedSince +// is specify this header value to operate only on a blob if it has been modified since the specified date/time. +// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is +// specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on +// blob tags to operate only on blobs with a matching value. requestID is provides a client-generated, opaque value +// with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +// blobTagsString is optional. Used to set blob tags in various blob operations. immutabilityPolicyExpiry is specifies +// the date time when the blobs immutability policy is set to expire. immutabilityPolicyMode is specifies the +// immutability policy mode to set on the blob. legalHold is specified if a legal hold should be set on the blob. +func (client appendBlobClient) Create(ctx context.Context, contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*AppendBlobCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(contentLength, timeout, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client appendBlobClient) createPreparer(contentLength int64, timeout *int32, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + req.Header.Set("x-ms-blob-type", "AppendBlob") + return req, nil +} + +// createResponder handles the response to the Create request. +func (client appendBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobCreateResponse{rawResponse: resp.Response()}, err +} + +// Seal the Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 +// version or later. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if +// specified, the operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is +// specify this header value to operate only on a blob if it has been modified since the specified date/time. +// ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is +// specify an ETag value to operate only on blobs without a matching value. appendPosition is optional conditional +// header, used only for the Append Block operation. A number indicating the byte offset to compare. Append Block will +// succeed only if the append position is equal to this number. If it is not, the request will fail with the +// AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). +func (client appendBlobClient) Seal(ctx context.Context, timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (*AppendBlobSealResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.sealPreparer(timeout, requestID, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, appendPosition) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.sealResponder}, req) + if err != nil { + return nil, err + } + return resp.(*AppendBlobSealResponse), err +} + +// sealPreparer prepares the Seal request. +func (client appendBlobClient) sealPreparer(timeout *int32, requestID *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, appendPosition *int64) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "seal") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if appendPosition != nil { + req.Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPosition, 10)) + } + return req, nil +} + +// sealResponder handles the response to the Seal request. +func (client appendBlobClient) sealResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &AppendBlobSealResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go new file mode 100644 index 00000000000..6e3b9a207a4 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_blob.go @@ -0,0 +1,2063 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// blobClient is the client for the Blob methods of the Azblob service. +type blobClient struct { + managementClient +} + +// newBlobClient creates an instance of the blobClient client. +func newBlobClient(url url.URL, p pipeline.Pipeline) blobClient { + return blobClient{newManagementClient(url, p)} +} + +// AbortCopyFromURL the Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a +// destination blob with zero length and full metadata. +// +// copyID is the copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. timeout is +// the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) AbortCopyFromURL(ctx context.Context, copyID string, timeout *int32, leaseID *string, requestID *string) (*BlobAbortCopyFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.abortCopyFromURLPreparer(copyID, timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.abortCopyFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobAbortCopyFromURLResponse), err +} + +// abortCopyFromURLPreparer prepares the AbortCopyFromURL request. +func (client blobClient) abortCopyFromURLPreparer(copyID string, timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("copyid", copyID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "copy") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-copy-action", "abort") + return req, nil +} + +// abortCopyFromURLResponder handles the response to the AbortCopyFromURL request. +func (client blobClient) abortCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusNoContent) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobAbortCopyFromURLResponse{rawResponse: resp.Response()}, err +} + +// AcquireLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative +// one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration +// cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob +// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor +// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobAcquireLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobAcquireLeaseResponse), err +} + +// acquireLeasePreparer prepares the AcquireLease request. +func (client blobClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + if duration != nil { + req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) + } + if proposedLeaseID != nil { + req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "acquire") + return req, nil +} + +// acquireLeaseResponder handles the response to the AcquireLease request. +func (client blobClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobAcquireLeaseResponse{rawResponse: resp.Response()}, err +} + +// BreakLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should +// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the +// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available +// before the break period has expired, but the lease may be held for longer than the break period. If this header does +// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an +// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is +// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when +// storage analytics logging is enabled. +func (client blobClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobBreakLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobBreakLeaseResponse), err +} + +// breakLeasePreparer prepares the BreakLease request. +func (client blobClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + if breakPeriod != nil { + req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "break") + return req, nil +} + +// breakLeaseResponder handles the response to the BreakLease request. +func (client blobClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobBreakLeaseResponse{rawResponse: resp.Response()}, err +} + +// ChangeLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string +// format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See +// Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobChangeLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobChangeLeaseResponse), err +} + +// changeLeasePreparer prepares the ChangeLease request. +func (client blobClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "change") + return req, nil +} + +// changeLeaseResponder handles the response to the ChangeLease request. +func (client blobClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobChangeLeaseResponse{rawResponse: resp.Response()}, err +} + +// CopyFromURL the Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a +// response until the copy is complete. +// +// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that +// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob +// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob. +// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not +// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a +// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. sourceContentMD5 is specify the md5 calculated for the range of bytes that must be read from the copy +// source. blobTagsString is optional. Used to set blob tags in various blob operations. immutabilityPolicyExpiry is +// specifies the date time when the blobs immutability policy is set to expire. immutabilityPolicyMode is specifies the +// immutability policy mode to set on the blob. legalHold is specified if a legal hold should be set on the blob. +// copySourceAuthorization is only Bearer type is supported. Credentials should be a valid OAuth access token to copy +// source. +func (client blobClient) CopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool, copySourceAuthorization *string) (*BlobCopyFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.copyFromURLPreparer(copySource, timeout, metadata, tier, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID, sourceContentMD5, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold, copySourceAuthorization) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobCopyFromURLResponse), err +} + +// copyFromURLPreparer prepares the CopyFromURL request. +func (client blobClient) copyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool, copySourceAuthorization *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-copy-source", copySource) + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + if copySourceAuthorization != nil { + req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) + } + req.Header.Set("x-ms-requires-sync", "true") + return req, nil +} + +// copyFromURLResponder handles the response to the CopyFromURL request. +func (client blobClient) copyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobCopyFromURLResponse{rawResponse: resp.Response()}, err +} + +// CreateSnapshot the Create Snapshot operation creates a read-only snapshot of a blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. encryptionKey is optional. Specifies the encryption key to use +// to encrypt the data provided in the request. If not specified, encryption is performed with the root account +// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the +// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. +// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is +// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version +// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the +// request. If not specified, encryption is performed with the default account encryption scope. For more information, +// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) CreateSnapshot(ctx context.Context, timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string) (*BlobCreateSnapshotResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createSnapshotPreparer(timeout, metadata, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createSnapshotResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobCreateSnapshotResponse), err +} + +// createSnapshotPreparer prepares the CreateSnapshot request. +func (client blobClient) createSnapshotPreparer(timeout *int32, metadata map[string]string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "snapshot") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// createSnapshotResponder handles the response to the CreateSnapshot request. +func (client blobClient) createSnapshotResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobCreateSnapshotResponse{rawResponse: resp.Response()}, err +} + +// Delete if the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently +// removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob is +// deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service retains the blob +// or snapshot for the number of days specified by the DeleteRetentionPolicy section of [Storage service properties] +// (Set-Blob-Service-Properties.md). After the specified number of days has passed, the blob's data is permanently +// removed from the storage account. Note that you continue to be charged for the soft-deleted blob's storage until it +// is permanently removed. Use the List Blobs API and specify the "include=deleted" query parameter to discover which +// blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. +// All other operations on a soft-deleted blob or snapshot causes the service to return an HTTP status code of 404 +// (ResourceNotFound). +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. deleteSnapshots is required if the blob has associated snapshots. Specify one +// of the following two options: include: Delete the base blob and all of its snapshots. only: Delete only the blob's +// snapshots and not the blob itself ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is +// provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when +// storage analytics logging is enabled. blobDeleteType is optional. Only possible value is 'permanent', which +// specifies to permanently delete a blob if blob soft delete is enabled. +func (client blobClient) Delete(ctx context.Context, snapshot *string, versionID *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobDeleteType BlobDeleteType) (*BlobDeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.deletePreparer(snapshot, versionID, timeout, leaseID, deleteSnapshots, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobDeleteType) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobDeleteResponse), err +} + +// deletePreparer prepares the Delete request. +func (client blobClient) deletePreparer(snapshot *string, versionID *string, timeout *int32, leaseID *string, deleteSnapshots DeleteSnapshotsOptionType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobDeleteType BlobDeleteType) (pipeline.Request, error) { + req, err := pipeline.NewRequest("DELETE", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if blobDeleteType != BlobDeleteNone { + params.Set("deletetype", string(blobDeleteType)) + } + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if deleteSnapshots != DeleteSnapshotsOptionNone { + req.Header.Set("x-ms-delete-snapshots", string(deleteSnapshots)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// deleteResponder handles the response to the Delete request. +func (client blobClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobDeleteResponse{rawResponse: resp.Response()}, err +} + +// DeleteImmutabilityPolicy the Delete Immutability Policy operation deletes the immutability policy on the blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) DeleteImmutabilityPolicy(ctx context.Context, timeout *int32, requestID *string) (*BlobDeleteImmutabilityPolicyResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.deleteImmutabilityPolicyPreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteImmutabilityPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobDeleteImmutabilityPolicyResponse), err +} + +// deleteImmutabilityPolicyPreparer prepares the DeleteImmutabilityPolicy request. +func (client blobClient) deleteImmutabilityPolicyPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("DELETE", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "immutabilityPolicies") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// deleteImmutabilityPolicyResponder handles the response to the DeleteImmutabilityPolicy request. +func (client blobClient) deleteImmutabilityPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobDeleteImmutabilityPolicyResponse{rawResponse: resp.Response()}, err +} + +// Download the Download operation reads or downloads a blob from the system, including its metadata and properties. +// You can also call Download to read a snapshot. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// rangeGetContentMD5 is when set to true and specified together with the Range, the service returns the MD5 hash for +// the range, as long as the range is less than or equal to 4 MB in size. rangeGetContentCRC64 is when set to true and +// specified together with the Range, the service returns the CRC64 hash for the range, as long as the range is less +// than or equal to 4 MB in size. encryptionKey is optional. Specifies the encryption key to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the root account encryption key. For more +// information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) Download(ctx context.Context, snapshot *string, versionID *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*downloadResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.downloadPreparer(snapshot, versionID, timeout, rangeParameter, leaseID, rangeGetContentMD5, rangeGetContentCRC64, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.downloadResponder}, req) + if err != nil { + return nil, err + } + return resp.(*downloadResponse), err +} + +// downloadPreparer prepares the Download request. +func (client blobClient) downloadPreparer(snapshot *string, versionID *string, timeout *int32, rangeParameter *string, leaseID *string, rangeGetContentMD5 *bool, rangeGetContentCRC64 *bool, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if rangeGetContentMD5 != nil { + req.Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*rangeGetContentMD5)) + } + if rangeGetContentCRC64 != nil { + req.Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*rangeGetContentCRC64)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// downloadResponder handles the response to the Download request. +func (client blobClient) downloadResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusPartialContent) + if resp == nil { + return nil, err + } + return &downloadResponse{rawResponse: resp.Response()}, err +} + +// GetAccountInfo returns the sku name and account kind +func (client blobClient) GetAccountInfo(ctx context.Context) (*BlobGetAccountInfoResponse, error) { + req, err := client.getAccountInfoPreparer() + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobGetAccountInfoResponse), err +} + +// getAccountInfoPreparer prepares the GetAccountInfo request. +func (client blobClient) getAccountInfoPreparer() (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("restype", "account") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// getAccountInfoResponder handles the response to the GetAccountInfo request. +func (client blobClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobGetAccountInfoResponse{rawResponse: resp.Response()}, err +} + +// GetProperties the Get Properties operation returns all user-defined metadata, standard HTTP properties, and system +// properties for the blob. It does not return the content of the blob. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the +// data provided in the request. If not specified, encryption is performed with the root account encryption key. For +// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) GetProperties(ctx context.Context, snapshot *string, versionID *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobGetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(snapshot, versionID, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobGetPropertiesResponse), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client blobClient) getPropertiesPreparer(snapshot *string, versionID *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("HEAD", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client blobClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobGetPropertiesResponse{rawResponse: resp.Response()}, err +} + +// GetTags the Get Tags operation enables users to get the tags associated with a blob. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. snapshot is the +// snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more +// information on working with blob snapshots, see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. +func (client blobClient) GetTags(ctx context.Context, timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string, leaseID *string) (*BlobTags, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getTagsPreparer(timeout, requestID, snapshot, versionID, ifTags, leaseID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getTagsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobTags), err +} + +// getTagsPreparer prepares the GetTags request. +func (client blobClient) getTagsPreparer(timeout *int32, requestID *string, snapshot *string, versionID *string, ifTags *string, leaseID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + params.Set("comp", "tags") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + return req, nil +} + +// getTagsResponder handles the response to the GetTags request. +func (client blobClient) getTagsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &BlobTags{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// Query the Query operation enables users to select/project on blob data by providing simple query expressions. +// +// // queryPreparer prepares the Query request. +// func (client blobClient) queryPreparer(snapshot *string, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *string, ifNoneMatch *string, ifTags *string, requestID *string) (pipeline.Request, error) { +// req, err := pipeline.NewRequest("POST", client.url, nil) +// if err != nil { +// return req, pipeline.NewError(err, "failed to create request") +// } +// params := req.URL.Query() +// if snapshot != nil && len(*snapshot) > 0 { +// params.Set("snapshot", *snapshot) +// } +// if timeout != nil { +// params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) +// } +// params.Set("comp", "query") +// req.URL.RawQuery = params.Encode() +// if leaseID != nil { +// req.Header.Set("x-ms-lease-id", *leaseID) +// } +// if encryptionKey != nil { +// req.Header.Set("x-ms-encryption-key", *encryptionKey) +// } +// if encryptionKeySha256 != nil { +// req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) +// } +// if encryptionAlgorithm != EncryptionAlgorithmNone { +// req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) +// } +// if ifModifiedSince != nil { +// req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) +// } +// if ifUnmodifiedSince != nil { +// req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) +// } +// if ifMatch != nil { +// req.Header.Set("If-Match", *ifMatch) +// } +// if ifNoneMatch != nil { +// req.Header.Set("If-None-Match", *ifNoneMatch) +// } +// if ifTags != nil { +// req.Header.Set("x-ms-if-tags", *ifTags) +// } +// req.Header.Set("x-ms-version", ServiceVersion) +// if requestID != nil { +// req.Header.Set("x-ms-client-request-id", *requestID) +// } +// b, err := xml.Marshal(queryRequest) +// if err != nil { +// return req, pipeline.NewError(err, "failed to marshal request body") +// } +// req.Header.Set("Content-Type", "application/xml") +// err = req.SetBody(bytes.NewReader(b)) +// if err != nil { +// return req, pipeline.NewError(err, "failed to set request body") +// } +// return req, nil +// } +// +// // queryResponder handles the response to the Query request. +// func (client blobClient) queryResponder(resp pipeline.Response) (pipeline.Response, error) { +// err := validateResponse(resp, http.StatusOK, http.StatusPartialContent) +// if resp == nil { +// return nil, err +// } +// return &QueryResponse{rawResponse: resp.Response()}, err +// } + +// ReleaseLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobReleaseLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobReleaseLeaseResponse), err +} + +// releaseLeasePreparer prepares the ReleaseLease request. +func (client blobClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "release") + return req, nil +} + +// releaseLeaseResponder handles the response to the ReleaseLease request. +func (client blobClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobReleaseLeaseResponse{rawResponse: resp.Response()}, err +} + +// RenewLease [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete +// operations +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client blobClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobRenewLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobRenewLeaseResponse), err +} + +// renewLeasePreparer prepares the RenewLease request. +func (client blobClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "renew") + return req, nil +} + +// renewLeaseResponder handles the response to the RenewLease request. +func (client blobClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobRenewLeaseResponse{rawResponse: resp.Response()}, err +} + +// SetExpiry sets the time a blob will expire and be deleted. +// +// expiryOptions is required. Indicates mode of the expiry time timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. expiresOn is the +// time to set the blob to expiry +func (client blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptionsType, timeout *int32, requestID *string, expiresOn *string) (*BlobSetExpiryResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setExpiryPreparer(expiryOptions, timeout, requestID, expiresOn) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setExpiryResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetExpiryResponse), err +} + +// setExpiryPreparer prepares the SetExpiry request. +func (client blobClient) setExpiryPreparer(expiryOptions BlobExpiryOptionsType, timeout *int32, requestID *string, expiresOn *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "expiry") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-expiry-option", string(expiryOptions)) + if expiresOn != nil { + req.Header.Set("x-ms-expiry-time", *expiresOn) + } + return req, nil +} + +// setExpiryResponder handles the response to the SetExpiry request. +func (client blobClient) setExpiryResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetExpiryResponse{rawResponse: resp.Response()}, err +} + +// SetHTTPHeaders the Set HTTP Headers operation sets system properties on the blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, +// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's +// content type. If specified, this property is stored with the blob and returned with a read request. blobContentMD5 +// is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual +// blocks were validated when each was uploaded. blobContentEncoding is optional. Sets the blob's content encoding. If +// specified, this property is stored with the blob and returned with a read request. blobContentLanguage is optional. +// Set the blob's content language. If specified, this property is stored with the blob and returned with a read +// request. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. blobContentDisposition is optional. Sets +// the blob's Content-Disposition header. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) SetHTTPHeaders(ctx context.Context, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobContentDisposition *string, requestID *string) (*BlobSetHTTPHeadersResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setHTTPHeadersPreparer(timeout, blobCacheControl, blobContentType, blobContentMD5, blobContentEncoding, blobContentLanguage, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobContentDisposition, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setHTTPHeadersResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetHTTPHeadersResponse), err +} + +// setHTTPHeadersPreparer prepares the SetHTTPHeaders request. +func (client blobClient) setHTTPHeadersPreparer(timeout *int32, blobCacheControl *string, blobContentType *string, blobContentMD5 []byte, blobContentEncoding *string, blobContentLanguage *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobContentDisposition *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setHTTPHeadersResponder handles the response to the SetHTTPHeaders request. +func (client blobClient) setHTTPHeadersResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetHTTPHeadersResponse{rawResponse: resp.Response()}, err +} + +// SetImmutabilityPolicy the Set Immutability Policy operation sets the immutability policy on the blob +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. ifUnmodifiedSince +// is specify this header value to operate only on a blob if it has not been modified since the specified date/time. +// immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. +// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. +func (client blobClient) SetImmutabilityPolicy(ctx context.Context, timeout *int32, requestID *string, ifUnmodifiedSince *time.Time, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType) (*BlobSetImmutabilityPolicyResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setImmutabilityPolicyPreparer(timeout, requestID, ifUnmodifiedSince, immutabilityPolicyExpiry, immutabilityPolicyMode) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setImmutabilityPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetImmutabilityPolicyResponse), err +} + +// setImmutabilityPolicyPreparer prepares the SetImmutabilityPolicy request. +func (client blobClient) setImmutabilityPolicyPreparer(timeout *int32, requestID *string, ifUnmodifiedSince *time.Time, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "immutabilityPolicies") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + return req, nil +} + +// setImmutabilityPolicyResponder handles the response to the SetImmutabilityPolicy request. +func (client blobClient) setImmutabilityPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetImmutabilityPolicyResponse{rawResponse: resp.Response()}, err +} + +// SetLegalHold the Set Legal Hold operation sets a legal hold on the blob. +// +// legalHold is specified if a legal hold should be set on the blob. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) SetLegalHold(ctx context.Context, legalHold bool, timeout *int32, requestID *string) (*BlobSetLegalHoldResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setLegalHoldPreparer(legalHold, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setLegalHoldResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetLegalHoldResponse), err +} + +// setLegalHoldPreparer prepares the SetLegalHold request. +func (client blobClient) setLegalHoldPreparer(legalHold bool, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "legalhold") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(legalHold)) + return req, nil +} + +// setLegalHoldResponder handles the response to the SetLegalHold request. +func (client blobClient) setLegalHoldResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetLegalHoldResponse{rawResponse: resp.Response()}, err +} + +// SetMetadata the Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more +// name-value pairs +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. leaseID is if specified, the operation only succeeds if the +// resource's lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to +// encrypt the data provided in the request. If not specified, encryption is performed with the root account encryption +// key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 +// hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. +// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is +// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version +// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the +// request. If not specified, encryption is performed with the default account encryption scope. For more information, +// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blobClient) SetMetadata(ctx context.Context, timeout *int32, metadata map[string]string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*BlobSetMetadataResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setMetadataPreparer(timeout, metadata, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetMetadataResponse), err +} + +// setMetadataPreparer prepares the SetMetadata request. +func (client blobClient) setMetadataPreparer(timeout *int32, metadata map[string]string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "metadata") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setMetadataResponder handles the response to the SetMetadata request. +func (client blobClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetMetadataResponse{rawResponse: resp.Response()}, err +} + +// SetTags the Set Tags operation enables users to set tags on a blob. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. versionID is the version id parameter is an opaque DateTime value that, +// when present, specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. +// transactionalContentMD5 is specify the transactional md5 for the body, to be validated by the service. +// transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated by the service. requestID +// is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when +// storage analytics logging is enabled. ifTags is specify a SQL where clause on blob tags to operate only on blobs +// with a matching value. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. tags is blob tags +func (client blobClient) SetTags(ctx context.Context, timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, leaseID *string, tags *BlobTags) (*BlobSetTagsResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setTagsPreparer(timeout, versionID, transactionalContentMD5, transactionalContentCrc64, requestID, ifTags, leaseID, tags) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTagsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetTagsResponse), err +} + +// setTagsPreparer prepares the SetTags request. +func (client blobClient) setTagsPreparer(timeout *int32, versionID *string, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, requestID *string, ifTags *string, leaseID *string, tags *BlobTags) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + params.Set("comp", "tags") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + b, err := xml.Marshal(tags) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setTagsResponder handles the response to the SetTags request. +func (client blobClient) setTagsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusNoContent) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetTagsResponse{rawResponse: resp.Response()}, err +} + +// SetTier the Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A premium page blob's tier +// determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// +// tier is indicates the tier to be set on the blob. snapshot is the snapshot parameter is an opaque DateTime value +// that, when present, specifies the blob snapshot to retrieve. For more information on working with blob snapshots, +// see Creating +// a Snapshot of a Blob. versionID is the version id parameter is an opaque DateTime value that, when present, +// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rehydratePriority is optional: Indicates the priority with which to +// rehydrate an archived blob. requestID is provides a client-generated, opaque value with a 1 KB character limit that +// is recorded in the analytics logs when storage analytics logging is enabled. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. ifTags is specify a SQL where clause on blob +// tags to operate only on blobs with a matching value. +func (client blobClient) SetTier(ctx context.Context, tier AccessTierType, snapshot *string, versionID *string, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string, ifTags *string) (*BlobSetTierResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setTierPreparer(tier, snapshot, versionID, timeout, rehydratePriority, requestID, leaseID, ifTags) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setTierResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobSetTierResponse), err +} + +// setTierPreparer prepares the SetTier request. +func (client blobClient) setTierPreparer(tier AccessTierType, snapshot *string, versionID *string, timeout *int32, rehydratePriority RehydratePriorityType, requestID *string, leaseID *string, ifTags *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if versionID != nil && len(*versionID) > 0 { + params.Set("versionid", *versionID) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "tier") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-access-tier", string(tier)) + if rehydratePriority != RehydratePriorityNone { + req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + return req, nil +} + +// setTierResponder handles the response to the SetTier request. +func (client blobClient) setTierResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobSetTierResponse{rawResponse: resp.Response()}, err +} + +// StartCopyFromURL the Start Copy From URL operation copies a blob or an internet resource to a new blob. +// +// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that +// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob +// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. tier is optional. Indicates the tier to be set on the blob. +// rehydratePriority is optional: Indicates the priority with which to rehydrate an archived blob. +// sourceIfModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not +// been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a +// matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// sourceIfTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. leaseID is if specified, the operation +// only succeeds if the resource's lease is active and matches this ID. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. blobTagsString is optional. Used to set blob tags in various blob operations. sealBlob is overrides the +// sealed state of the destination blob. Service version 2019-12-12 and newer. immutabilityPolicyExpiry is specifies +// the date time when the blobs immutability policy is set to expire. immutabilityPolicyMode is specifies the +// immutability policy mode to set on the blob. legalHold is specified if a legal hold should be set on the blob. +func (client blobClient) StartCopyFromURL(ctx context.Context, copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, blobTagsString *string, sealBlob *bool, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*BlobStartCopyFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.startCopyFromURLPreparer(copySource, timeout, metadata, tier, rehydratePriority, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, sourceIfTags, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, leaseID, requestID, blobTagsString, sealBlob, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.startCopyFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobStartCopyFromURLResponse), err +} + +// startCopyFromURLPreparer prepares the StartCopyFromURL request. +func (client blobClient) startCopyFromURLPreparer(copySource string, timeout *int32, metadata map[string]string, tier AccessTierType, rehydratePriority RehydratePriorityType, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, leaseID *string, requestID *string, blobTagsString *string, sealBlob *bool, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if rehydratePriority != RehydratePriorityNone { + req.Header.Set("x-ms-rehydrate-priority", string(rehydratePriority)) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + if sourceIfTags != nil { + req.Header.Set("x-ms-source-if-tags", *sourceIfTags) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-copy-source", copySource) + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if sealBlob != nil { + req.Header.Set("x-ms-seal-blob", strconv.FormatBool(*sealBlob)) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + return req, nil +} + +// startCopyFromURLResponder handles the response to the StartCopyFromURL request. +func (client blobClient) startCopyFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobStartCopyFromURLResponse{rawResponse: resp.Response()}, err +} + +// Undelete undelete a blob that was previously soft deleted +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client blobClient) Undelete(ctx context.Context, timeout *int32, requestID *string) (*BlobUndeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.undeletePreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.undeleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlobUndeleteResponse), err +} + +// undeletePreparer prepares the Undelete request. +func (client blobClient) undeletePreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "undelete") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// undeleteResponder handles the response to the Undelete request. +func (client blobClient) undeleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlobUndeleteResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go new file mode 100644 index 00000000000..31067ed64e5 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_block_blob.go @@ -0,0 +1,848 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// blockBlobClient is the client for the BlockBlob methods of the Azblob service. +type blockBlobClient struct { + managementClient +} + +// newBlockBlobClient creates an instance of the blockBlobClient client. +func newBlockBlobClient(url url.URL, p pipeline.Pipeline) blockBlobClient { + return blockBlobClient{newManagementClient(url, p)} +} + +// CommitBlockList the Commit Block List operation writes a blob by specifying the list of block IDs that make up the +// blob. In order to be written as part of a blob, a block must have been successfully written to the server in a prior +// Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that have changed, +// then committing the new and existing blocks together. You can do this by specifying whether to commit a block from +// the committed block list or from the uncommitted block list, or to commit the most recently uploaded version of the +// block, whichever list it may belong to. +// +// blocks is blob Blocks. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. blobCacheControl is optional. Sets the blob's cache control. If specified, +// this property is stored with the blob and returned with a read request. blobContentType is optional. Sets the blob's +// content type. If specified, this property is stored with the blob and returned with a read request. +// blobContentEncoding is optional. Sets the blob's content encoding. If specified, this property is stored with the +// blob and returned with a read request. blobContentLanguage is optional. Set the blob's content language. If +// specified, this property is stored with the blob and returned with a read request. blobContentMD5 is optional. An +// MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks were +// validated when each was uploaded. transactionalContentMD5 is specify the transactional md5 for the body, to be +// validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to be validated +// by the service. metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no +// name-value pairs are specified, the operation will copy the metadata from the source blob or file to the destination +// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, +// and metadata is not copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names +// must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for +// more information. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches +// this ID. blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. +// Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the root account encryption key. For more information, see Encryption at Rest for Azure Storage +// Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the +// x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key +// hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is +// provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to +// use to encrypt the data provided in the request. If not specified, encryption is performed with the default account +// encryption scope. For more information, see Encryption at Rest for Azure Storage Services. tier is optional. +// Indicates the tier to be set on the blob. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob +// operations. immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. +// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. legalHold is specified if a +// legal hold should be set on the blob. +func (client blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*BlockBlobCommitBlockListResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.commitBlockListPreparer(blocks, timeout, blobCacheControl, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, transactionalContentMD5, transactionalContentCrc64, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.commitBlockListResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobCommitBlockListResponse), err +} + +// commitBlockListPreparer prepares the CommitBlockList request. +func (client blockBlobClient) commitBlockListPreparer(blocks BlockLookupList, timeout *int32, blobCacheControl *string, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "blocklist") + req.URL.RawQuery = params.Encode() + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + b, err := xml.Marshal(blocks) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// commitBlockListResponder handles the response to the CommitBlockList request. +func (client blockBlobClient) commitBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobCommitBlockListResponse{rawResponse: resp.Response()}, err +} + +// GetBlockList the Get Block List operation retrieves the list of blocks that have been uploaded as part of a block +// blob +// +// listType is specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists +// together. snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob +// snapshot to retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. ifTags is specify a SQL where clause on blob tags to operate only on blobs with +// a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is +// recorded in the analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (*BlockList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getBlockListPreparer(listType, snapshot, timeout, leaseID, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getBlockListResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockList), err +} + +// getBlockListPreparer prepares the GetBlockList request. +func (client blockBlobClient) getBlockListPreparer(listType BlockListType, snapshot *string, timeout *int32, leaseID *string, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + params.Set("blocklisttype", string(listType)) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "blocklist") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getBlockListResponder handles the response to the GetBlockList request. +func (client blockBlobClient) getBlockListResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &BlockList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// PutBlobFromURL the Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not supported with +// Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform +// partial updates to a block blob’s contents using a source URL, use the Put Block from URL API in conjunction with +// Put Block List. +// +// contentLength is the length of the request. copySource is specifies the name of the source page blob snapshot. This +// value is a URL of up to 2 KB in length that specifies a page blob snapshot. The value should be URL-encoded as it +// would appear in a request URI. The source blob must either be public or must be authenticated via a shared access +// signature. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. blobContentType is optional. Sets the blob's content type. If specified, this property +// is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the blob's content +// encoding. If specified, this property is stored with the blob and returned with a read request. blobContentLanguage +// is optional. Set the blob's content language. If specified, this property is stored with the blob and returned with +// a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, +// as the hashes for the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets +// the blob's cache control. If specified, this property is stored with the blob and returned with a read request. +// metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are +// specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more +// name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not +// copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the +// naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. +// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies +// the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed +// with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. +// encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key +// header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the +// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is +// optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the default account encryption scope. For +// more information, see Encryption at Rest for Azure Storage Services. tier is optional. Indicates the tier to be set +// on the blob. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since +// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. sourceIfModifiedSince is specify this +// header value to operate only on a blob if it has been modified since the specified date/time. +// sourceIfUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified since the +// specified date/time. sourceIfMatch is specify an ETag value to operate only on blobs with a matching value. +// sourceIfNoneMatch is specify an ETag value to operate only on blobs without a matching value. sourceIfTags is +// specify a SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. sourceContentMD5 is specify the md5 calculated for the range of bytes that must be +// read from the copy source. blobTagsString is optional. Used to set blob tags in various blob operations. +// copySourceBlobProperties is optional, default is true. Indicates if properties from the source blob should be +// copied. copySourceAuthorization is only Bearer type is supported. Credentials should be a valid OAuth access token +// to copy source. +func (client blockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, copySourceBlobProperties *bool, copySourceAuthorization *string) (*BlockBlobPutBlobFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.putBlobFromURLPreparer(contentLength, copySource, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, sourceIfTags, requestID, sourceContentMD5, blobTagsString, copySourceBlobProperties, copySourceAuthorization) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.putBlobFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobPutBlobFromURLResponse), err +} + +// putBlobFromURLPreparer prepares the PutBlobFromURL request. +func (client blockBlobClient) putBlobFromURLPreparer(contentLength int64, copySource string, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, sourceIfTags *string, requestID *string, sourceContentMD5 []byte, blobTagsString *string, copySourceBlobProperties *bool, copySourceAuthorization *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + if sourceIfTags != nil { + req.Header.Set("x-ms-source-if-tags", *sourceIfTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + req.Header.Set("x-ms-copy-source", copySource) + if copySourceBlobProperties != nil { + req.Header.Set("x-ms-copy-source-blob-properties", strconv.FormatBool(*copySourceBlobProperties)) + } + if copySourceAuthorization != nil { + req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) + } + req.Header.Set("x-ms-blob-type", "BlockBlob") + return req, nil +} + +// putBlobFromURLResponder handles the response to the PutBlobFromURL request. +func (client blockBlobClient) putBlobFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobPutBlobFromURLResponse{rawResponse: resp.Response()}, err +} + +// StageBlock the Stage Block operation creates a new block to be committed as part of a blob +// +// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or +// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the +// same size for each block. contentLength is the length of the request. body is initial data body will be closed upon +// successful return. Callers should ensure closure when receiving an error.transactionalContentMD5 is specify the +// transactional md5 for the body, to be validated by the service. transactionalContentCrc64 is specify the +// transactional crc64 for the body, to be validated by the service. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the +// data provided in the request. If not specified, encryption is performed with the root account encryption key. For +// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies +// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (*BlockBlobStageBlockResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.stageBlockPreparer(blockID, contentLength, body, transactionalContentMD5, transactionalContentCrc64, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobStageBlockResponse), err +} + +// stageBlockPreparer prepares the StageBlock request. +func (client blockBlobClient) stageBlockPreparer(blockID string, contentLength int64, body io.ReadSeeker, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("blockid", blockID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "block") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// stageBlockResponder handles the response to the StageBlock request. +func (client blockBlobClient) stageBlockResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobStageBlockResponse{rawResponse: resp.Response()}, err +} + +// StageBlockFromURL the Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// +// blockID is a valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or +// equal to 64 bytes in size. For a given blob, the length of the value specified for the blockid parameter must be the +// same size for each block. contentLength is the length of the request. sourceURL is specify a URL to the copy source. +// sourceRange is bytes of source data in the specified range. sourceContentMD5 is specify the md5 calculated for the +// range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated for the +// range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in seconds. For +// more information, see Setting +// Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt +// the data provided in the request. If not specified, encryption is performed with the root account encryption key. +// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of +// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is +// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be +// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. +// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, +// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for +// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. sourceIfModifiedSince is specify this header value to operate only on a blob if it has been +// modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag value to operate +// only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded +// in the analytics logs when storage analytics logging is enabled. copySourceAuthorization is only Bearer type is +// supported. Credentials should be a valid OAuth access token to copy source. +func (client blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (*BlockBlobStageBlockFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.stageBlockFromURLPreparer(blockID, contentLength, sourceURL, sourceRange, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID, copySourceAuthorization) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.stageBlockFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobStageBlockFromURLResponse), err +} + +// stageBlockFromURLPreparer prepares the StageBlockFromURL request. +func (client blockBlobClient) stageBlockFromURLPreparer(blockID string, contentLength int64, sourceURL string, sourceRange *string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("blockid", blockID) + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "block") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Header.Set("x-ms-copy-source", sourceURL) + if sourceRange != nil { + req.Header.Set("x-ms-source-range", *sourceRange) + } + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if sourceContentcrc64 != nil { + req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if copySourceAuthorization != nil { + req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) + } + return req, nil +} + +// stageBlockFromURLResponder handles the response to the StageBlockFromURL request. +func (client blockBlobClient) stageBlockFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobStageBlockFromURLResponse{rawResponse: resp.Response()}, err +} + +// Upload the Upload Block Blob operation updates the content of an existing block blob. Updating an existing block +// blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; the content of +// the existing blob is overwritten with the content of the new blob. To perform a partial update of the content of a +// block blob, use the Put Block List operation. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. transactionalContentMD5 is specify the transactional md5 for the body, to +// be validated by the service. blobContentType is optional. Sets the blob's content type. If specified, this property +// is stored with the blob and returned with a read request. blobContentEncoding is optional. Sets the blob's content +// encoding. If specified, this property is stored with the blob and returned with a read request. blobContentLanguage +// is optional. Set the blob's content language. If specified, this property is stored with the blob and returned with +// a read request. blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, +// as the hashes for the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets +// the blob's cache control. If specified, this property is stored with the blob and returned with a read request. +// metadata is optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are +// specified, the operation will copy the metadata from the source blob or file to the destination blob. If one or more +// name-value pairs are specified, the destination blob is created with the specified metadata, and metadata is not +// copied from the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the +// naming rules for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. +// leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// blobContentDisposition is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies +// the encryption key to use to encrypt the data provided in the request. If not specified, encryption is performed +// with the root account encryption key. For more information, see Encryption at Rest for Azure Storage Services. +// encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key +// header is provided. encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the +// only accepted value is "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is +// optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data +// provided in the request. If not specified, encryption is performed with the default account encryption scope. For +// more information, see Encryption at Rest for Azure Storage Services. tier is optional. Indicates the tier to be set +// on the blob. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since +// the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations. +// immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. +// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. legalHold is specified if a +// legal hold should be set on the blob. +func (client blockBlobClient) Upload(ctx context.Context, body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*BlockBlobUploadResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.uploadPreparer(body, contentLength, timeout, transactionalContentMD5, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, tier, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadResponder}, req) + if err != nil { + return nil, err + } + return resp.(*BlockBlobUploadResponse), err +} + +// uploadPreparer prepares the Upload request. +func (client blockBlobClient) uploadPreparer(body io.ReadSeeker, contentLength int64, timeout *int32, transactionalContentMD5 []byte, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, tier AccessTierType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if tier != AccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + req.Header.Set("x-ms-blob-type", "BlockBlob") + return req, nil +} + +// uploadResponder handles the response to the Upload request. +func (client blockBlobClient) uploadResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &BlockBlobUploadResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go new file mode 100644 index 00000000000..0db347e3459 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_client.go @@ -0,0 +1,38 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/azure-pipeline-go/pipeline" + "net/url" +) + +const ( + // ServiceVersion specifies the version of the operations used in this package. + ServiceVersion = "2020-10-02" +) + +// managementClient is the base client for Azblob. +type managementClient struct { + url url.URL + p pipeline.Pipeline +} + +// newManagementClient creates an instance of the managementClient client. +func newManagementClient(url url.URL, p pipeline.Pipeline) managementClient { + return managementClient{ + url: url, + p: p, + } +} + +// URL returns a copy of the URL for this client. +func (mc managementClient) URL() url.URL { + return mc.url +} + +// Pipeline returns the pipeline for this client. +func (mc managementClient) Pipeline() pipeline.Pipeline { + return mc.p +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go new file mode 100644 index 00000000000..2e2f176e548 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_container.go @@ -0,0 +1,1232 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// containerClient is the client for the Container methods of the Azblob service. +type containerClient struct { + managementClient +} + +// newContainerClient creates an instance of the containerClient client. +func newContainerClient(url url.URL, p pipeline.Pipeline) containerClient { + return containerClient{newManagementClient(url, p)} +} + +// AcquireLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. duration is specifies the duration of the lease, in seconds, or negative +// one (-1) for a lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease duration +// cannot be changed using renew or change. proposedLeaseID is proposed lease ID, in a GUID string format. The Blob +// service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid Constructor +// (String) for a list of valid GUID string formats. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client containerClient) AcquireLease(ctx context.Context, timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerAcquireLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.acquireLeasePreparer(timeout, duration, proposedLeaseID, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.acquireLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerAcquireLeaseResponse), err +} + +// acquireLeasePreparer prepares the AcquireLease request. +func (client containerClient) acquireLeasePreparer(timeout *int32, duration *int32, proposedLeaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if duration != nil { + req.Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*duration), 10)) + } + if proposedLeaseID != nil { + req.Header.Set("x-ms-proposed-lease-id", *proposedLeaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "acquire") + return req, nil +} + +// acquireLeaseResponder handles the response to the AcquireLease request. +func (client containerClient) acquireLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerAcquireLeaseResponse{rawResponse: resp.Response()}, err +} + +// BreakLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. breakPeriod is for a break operation, proposed duration the lease should +// continue before it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter than the +// time remaining on the lease. If longer, the time remaining on the lease is used. A new lease will not be available +// before the break period has expired, but the lease may be held for longer than the break period. If this header does +// not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, and an +// infinite lease breaks immediately. ifModifiedSince is specify this header value to operate only on a blob if it has +// been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) BreakLease(ctx context.Context, timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerBreakLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.breakLeasePreparer(timeout, breakPeriod, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.breakLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerBreakLeaseResponse), err +} + +// breakLeasePreparer prepares the BreakLease request. +func (client containerClient) breakLeasePreparer(timeout *int32, breakPeriod *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if breakPeriod != nil { + req.Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*breakPeriod), 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "break") + return req, nil +} + +// breakLeaseResponder handles the response to the BreakLease request. +func (client containerClient) breakLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerBreakLeaseResponse{rawResponse: resp.Response()}, err +} + +// ChangeLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// leaseID is specifies the current lease ID on the resource. proposedLeaseID is proposed lease ID, in a GUID string +// format. The Blob service returns 400 (Invalid request) if the proposed lease ID is not in the correct format. See +// Guid Constructor (String) for a list of valid GUID string formats. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerChangeLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.changeLeasePreparer(leaseID, proposedLeaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.changeLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerChangeLeaseResponse), err +} + +// changeLeasePreparer prepares the ChangeLease request. +func (client containerClient) changeLeasePreparer(leaseID string, proposedLeaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + req.Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "change") + return req, nil +} + +// changeLeaseResponder handles the response to the ChangeLease request. +func (client containerClient) changeLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerChangeLeaseResponse{rawResponse: resp.Response()}, err +} + +// Create creates a new container under the specified account. If the container with the same name already exists, the +// operation fails +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. metadata is optional. Specifies a user-defined name-value pair associated +// with the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or +// file to the destination blob. If one or more name-value pairs are specified, the destination blob is created with +// the specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. access is specifies whether data in the container may be +// accessed publicly and the level of access requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +// defaultEncryptionScope is optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on +// the container and use for all future writes. preventEncryptionScopeOverride is optional. Version 2019-07-07 and +// newer. If true, prevents any request from specifying a different encryption scope than the scope set on the +// container. +func (client containerClient) Create(ctx context.Context, timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (*ContainerCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(timeout, metadata, access, requestID, defaultEncryptionScope, preventEncryptionScopeOverride) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client containerClient) createPreparer(timeout *int32, metadata map[string]string, access PublicAccessType, requestID *string, defaultEncryptionScope *string, preventEncryptionScopeOverride *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if access != PublicAccessNone { + req.Header.Set("x-ms-blob-public-access", string(access)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if defaultEncryptionScope != nil { + req.Header.Set("x-ms-default-encryption-scope", *defaultEncryptionScope) + } + if preventEncryptionScopeOverride != nil { + req.Header.Set("x-ms-deny-encryption-scope-override", strconv.FormatBool(*preventEncryptionScopeOverride)) + } + return req, nil +} + +// createResponder handles the response to the Create request. +func (client containerClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerCreateResponse{rawResponse: resp.Response()}, err +} + +// Delete operation marks the specified container for deletion. The container and any blobs contained within it are +// later deleted during garbage collection +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) Delete(ctx context.Context, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerDeleteResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.deletePreparer(timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.deleteResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerDeleteResponse), err +} + +// deletePreparer prepares the Delete request. +func (client containerClient) deletePreparer(timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("DELETE", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// deleteResponder handles the response to the Delete request. +func (client containerClient) deleteResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerDeleteResponse{rawResponse: resp.Response()}, err +} + +// GetAccessPolicy gets the permissions for the specified container. The permissions indicate whether container data +// may be accessed publicly. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) GetAccessPolicy(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*SignedIdentifiers, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getAccessPolicyPreparer(timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccessPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*SignedIdentifiers), err +} + +// getAccessPolicyPreparer prepares the GetAccessPolicy request. +func (client containerClient) getAccessPolicyPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "acl") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getAccessPolicyResponder handles the response to the GetAccessPolicy request. +func (client containerClient) getAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &SignedIdentifiers{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetAccountInfo returns the sku name and account kind +func (client containerClient) GetAccountInfo(ctx context.Context) (*ContainerGetAccountInfoResponse, error) { + req, err := client.getAccountInfoPreparer() + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerGetAccountInfoResponse), err +} + +// getAccountInfoPreparer prepares the GetAccountInfo request. +func (client containerClient) getAccountInfoPreparer() (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("restype", "account") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// getAccountInfoResponder handles the response to the GetAccountInfo request. +func (client containerClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerGetAccountInfoResponse{rawResponse: resp.Response()}, err +} + +// GetProperties returns all user-defined metadata and system properties for the specified container. The data returned +// does not include the container's list of blobs +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. requestID is provides a client-generated, opaque value with a 1 KB character +// limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) GetProperties(ctx context.Context, timeout *int32, leaseID *string, requestID *string) (*ContainerGetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(timeout, leaseID, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerGetPropertiesResponse), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client containerClient) getPropertiesPreparer(timeout *int32, leaseID *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client containerClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerGetPropertiesResponse{rawResponse: resp.Response()}, err +} + +// ListBlobFlatSegment [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a +// string value that identifies the portion of the list of containers to be returned with the next listing operation. +// The operation returns the NextMarker value within the response body if the listing operation did not return all +// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the +// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the +// client. maxresults is specifies the maximum number of containers to return. If the request does not specify +// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the +// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the +// remainder of the results. For this reason, it is possible that the service will return fewer results than specified +// by maxresults, or than the default of 5000. include is include this parameter to specify one or more datasets to +// include in the response. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) ListBlobFlatSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsFlatSegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listBlobFlatSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobFlatSegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListBlobsFlatSegmentResponse), err +} + +// listBlobFlatSegmentPreparer prepares the ListBlobFlatSegment request. +func (client containerClient) listBlobFlatSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listBlobFlatSegmentResponder handles the response to the ListBlobFlatSegment request. +func (client containerClient) listBlobFlatSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListBlobsFlatSegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ListBlobHierarchySegment [Update] The List Blobs operation returns a list of the blobs under the specified container +// +// delimiter is when the request includes this parameter, the operation returns a BlobPrefix element in the response +// body that acts as a placeholder for all blobs whose names begin with the same substring up to the appearance of the +// delimiter character. The delimiter may be a single character or a string. prefix is filters the results to return +// only containers whose name begins with the specified prefix. marker is a string value that identifies the portion of +// the list of containers to be returned with the next listing operation. The operation returns the NextMarker value +// within the response body if the listing operation did not return all containers remaining to be listed with the +// current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request +// the next page of list items. The marker value is opaque to the client. maxresults is specifies the maximum number of +// containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server +// will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will +// return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the +// service will return fewer results than specified by maxresults, or than the default of 5000. include is include this +// parameter to specify one or more datasets to include in the response. timeout is the timeout parameter is expressed +// in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) ListBlobHierarchySegment(ctx context.Context, delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (*ListBlobsHierarchySegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listBlobHierarchySegmentPreparer(delimiter, prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listBlobHierarchySegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListBlobsHierarchySegmentResponse), err +} + +// listBlobHierarchySegmentPreparer prepares the ListBlobHierarchySegment request. +func (client containerClient) listBlobHierarchySegmentPreparer(delimiter string, prefix *string, marker *string, maxresults *int32, include []ListBlobsIncludeItemType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + params.Set("delimiter", delimiter) + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listBlobHierarchySegmentResponder handles the response to the ListBlobHierarchySegment request. +func (client containerClient) listBlobHierarchySegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListBlobsHierarchySegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ReleaseLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be +// 15 to 60 seconds, or can be infinite +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) ReleaseLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerReleaseLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.releaseLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.releaseLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerReleaseLeaseResponse), err +} + +// releaseLeasePreparer prepares the ReleaseLease request. +func (client containerClient) releaseLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "release") + return req, nil +} + +// releaseLeaseResponder handles the response to the ReleaseLease request. +func (client containerClient) releaseLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerReleaseLeaseResponse{rawResponse: resp.Response()}, err +} + +// Rename renames an existing container. +// +// sourceContainerName is required. Specifies the name of the container to rename. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. sourceLeaseID is a +// lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. +func (client containerClient) Rename(ctx context.Context, sourceContainerName string, timeout *int32, requestID *string, sourceLeaseID *string) (*ContainerRenameResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.renamePreparer(sourceContainerName, timeout, requestID, sourceLeaseID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renameResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerRenameResponse), err +} + +// renamePreparer prepares the Rename request. +func (client containerClient) renamePreparer(sourceContainerName string, timeout *int32, requestID *string, sourceLeaseID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "rename") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-source-container-name", sourceContainerName) + if sourceLeaseID != nil { + req.Header.Set("x-ms-source-lease-id", *sourceLeaseID) + } + return req, nil +} + +// renameResponder handles the response to the Rename request. +func (client containerClient) renameResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerRenameResponse{rawResponse: resp.Response()}, err +} + +// RenewLease [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// +// leaseID is specifies the current lease ID on the resource. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client containerClient) RenewLease(ctx context.Context, leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerRenewLeaseResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.renewLeasePreparer(leaseID, timeout, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.renewLeaseResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerRenewLeaseResponse), err +} + +// renewLeasePreparer prepares the RenewLease request. +func (client containerClient) renewLeasePreparer(leaseID string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "lease") + params.Set("restype", "container") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-lease-id", leaseID) + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-lease-action", "renew") + return req, nil +} + +// renewLeaseResponder handles the response to the RenewLease request. +func (client containerClient) renewLeaseResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerRenewLeaseResponse{rawResponse: resp.Response()}, err +} + +// Restore restores a previously-deleted container. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +// deletedContainerName is optional. Version 2019-12-12 and later. Specifies the name of the deleted container to +// restore. deletedContainerVersion is optional. Version 2019-12-12 and later. Specifies the version of the deleted +// container to restore. +func (client containerClient) Restore(ctx context.Context, timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (*ContainerRestoreResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.restorePreparer(timeout, requestID, deletedContainerName, deletedContainerVersion) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.restoreResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerRestoreResponse), err +} + +// restorePreparer prepares the Restore request. +func (client containerClient) restorePreparer(timeout *int32, requestID *string, deletedContainerName *string, deletedContainerVersion *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "undelete") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if deletedContainerName != nil { + req.Header.Set("x-ms-deleted-container-name", *deletedContainerName) + } + if deletedContainerVersion != nil { + req.Header.Set("x-ms-deleted-container-version", *deletedContainerVersion) + } + return req, nil +} + +// restoreResponder handles the response to the Restore request. +func (client containerClient) restoreResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerRestoreResponse{rawResponse: resp.Response()}, err +} + +// SetAccessPolicy sets the permissions for the specified container. The permissions indicate whether blobs in a +// container may be accessed publicly. +// +// containerACL is the acls for the container timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. access is specifies whether data in the container may be accessed publicly and +// the level of access ifModifiedSince is specify this header value to operate only on a blob if it has been modified +// since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has +// not been modified since the specified date/time. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) SetAccessPolicy(ctx context.Context, containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (*ContainerSetAccessPolicyResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setAccessPolicyPreparer(containerACL, timeout, leaseID, access, ifModifiedSince, ifUnmodifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setAccessPolicyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerSetAccessPolicyResponse), err +} + +// setAccessPolicyPreparer prepares the SetAccessPolicy request. +func (client containerClient) setAccessPolicyPreparer(containerACL []SignedIdentifier, timeout *int32, leaseID *string, access PublicAccessType, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "acl") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if access != PublicAccessNone { + req.Header.Set("x-ms-blob-public-access", string(access)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(SignedIdentifiers{Items: containerACL}) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setAccessPolicyResponder handles the response to the SetAccessPolicy request. +func (client containerClient) setAccessPolicyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerSetAccessPolicyResponse{rawResponse: resp.Response()}, err +} + +// SetMetadata operation sets one or more user-defined name-value pairs for the specified container. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. metadata is optional. Specifies a user-defined name-value pair associated with +// the blob. If no name-value pairs are specified, the operation will copy the metadata from the source blob or file to +// the destination blob. If one or more name-value pairs are specified, the destination blob is created with the +// specified metadata, and metadata is not copied from the source blob or file. Note that beginning with version +// 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing +// Containers, Blobs, and Metadata for more information. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. requestID is provides a client-generated, opaque +// value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) SetMetadata(ctx context.Context, timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (*ContainerSetMetadataResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setMetadataPreparer(timeout, leaseID, metadata, ifModifiedSince, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setMetadataResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ContainerSetMetadataResponse), err +} + +// setMetadataPreparer prepares the SetMetadata request. +func (client containerClient) setMetadataPreparer(timeout *int32, leaseID *string, metadata map[string]string, ifModifiedSince *time.Time, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "metadata") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// setMetadataResponder handles the response to the SetMetadata request. +func (client containerClient) setMetadataResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ContainerSetMetadataResponse{rawResponse: resp.Response()}, err +} + +// SubmitBatch the Batch operation allows multiple API calls to be embedded into a single HTTP request. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. multipartContentType is required. The value of this header must be +// multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_ timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client containerClient) SubmitBatch(ctx context.Context, body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (*SubmitBatchResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.submitBatchPreparer(body, contentLength, multipartContentType, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.submitBatchResponder}, req) + if err != nil { + return nil, err + } + return resp.(*SubmitBatchResponse), err +} + +// submitBatchPreparer prepares the SubmitBatch request. +func (client containerClient) submitBatchPreparer(body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("POST", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "container") + params.Set("comp", "batch") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Header.Set("Content-Type", multipartContentType) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// submitBatchResponder handles the response to the SubmitBatch request. +func (client containerClient) submitBatchResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + return &SubmitBatchResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go new file mode 100644 index 00000000000..ec872a3225a --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_models.go @@ -0,0 +1,7302 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "encoding/base64" + "encoding/xml" + "errors" + "io" + "net/http" + "reflect" + "strconv" + "strings" + "time" + "unsafe" +) + +// ETag is an entity tag. +type ETag string + +const ( + // ETagNone represents an empty entity tag. + ETagNone ETag = "" + + // ETagAny matches any entity tag. + ETagAny ETag = "*" +) + +// Metadata contains metadata key/value pairs. +type Metadata map[string]string + +const mdPrefix = "x-ms-meta-" + +const mdPrefixLen = len(mdPrefix) + +// UnmarshalXML implements the xml.Unmarshaler interface for Metadata. +func (md *Metadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if *md == nil { + *md = Metadata{} + } + (*md)[tokName] = string(tt) + break + } + } + return nil +} + +// Marker represents an opaque value used in paged responses. +type Marker struct { + Val *string +} + +// NotDone returns true if the list enumeration should be started or is not yet complete. Specifically, NotDone returns true +// for a just-initialized (zero value) Marker indicating that you should make an initial request to get a result portion from +// the service. NotDone also returns true whenever the service returns an interim result portion. NotDone returns false only +// after the service has returned the final result portion. +func (m Marker) NotDone() bool { + return m.Val == nil || *m.Val != "" +} + +// UnmarshalXML implements the xml.Unmarshaler interface for Marker. +func (m *Marker) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var out string + err := d.DecodeElement(&out, &start) + m.Val = &out + return err +} + +// concatenates a slice of const values with the specified separator between each item +func joinConst(s interface{}, sep string) string { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + panic("s wasn't a slice or array") + } + ss := make([]string, 0, v.Len()) + for i := 0; i < v.Len(); i++ { + ss = append(ss, v.Index(i).String()) + } + return strings.Join(ss, sep) +} + +func validateError(err error) { + if err != nil { + panic(err) + } +} + +// AccessTierType enumerates the values for access tier type. +type AccessTierType string + +const ( + // AccessTierArchive ... + AccessTierArchive AccessTierType = "Archive" + // AccessTierCool ... + AccessTierCool AccessTierType = "Cool" + // AccessTierHot ... + AccessTierHot AccessTierType = "Hot" + // AccessTierNone represents an empty AccessTierType. + AccessTierNone AccessTierType = "" + // AccessTierP10 ... + AccessTierP10 AccessTierType = "P10" + // AccessTierP15 ... + AccessTierP15 AccessTierType = "P15" + // AccessTierP20 ... + AccessTierP20 AccessTierType = "P20" + // AccessTierP30 ... + AccessTierP30 AccessTierType = "P30" + // AccessTierP4 ... + AccessTierP4 AccessTierType = "P4" + // AccessTierP40 ... + AccessTierP40 AccessTierType = "P40" + // AccessTierP50 ... + AccessTierP50 AccessTierType = "P50" + // AccessTierP6 ... + AccessTierP6 AccessTierType = "P6" + // AccessTierP60 ... + AccessTierP60 AccessTierType = "P60" + // AccessTierP70 ... + AccessTierP70 AccessTierType = "P70" + // AccessTierP80 ... + AccessTierP80 AccessTierType = "P80" +) + +// PossibleAccessTierTypeValues returns an array of possible values for the AccessTierType const type. +func PossibleAccessTierTypeValues() []AccessTierType { + return []AccessTierType{AccessTierArchive, AccessTierCool, AccessTierHot, AccessTierNone, AccessTierP10, AccessTierP15, AccessTierP20, AccessTierP30, AccessTierP4, AccessTierP40, AccessTierP50, AccessTierP6, AccessTierP60, AccessTierP70, AccessTierP80} +} + +// AccountKindType enumerates the values for account kind type. +type AccountKindType string + +const ( + // AccountKindBlobStorage ... + AccountKindBlobStorage AccountKindType = "BlobStorage" + // AccountKindBlockBlobStorage ... + AccountKindBlockBlobStorage AccountKindType = "BlockBlobStorage" + // AccountKindFileStorage ... + AccountKindFileStorage AccountKindType = "FileStorage" + // AccountKindNone represents an empty AccountKindType. + AccountKindNone AccountKindType = "" + // AccountKindStorage ... + AccountKindStorage AccountKindType = "Storage" + // AccountKindStorageV2 ... + AccountKindStorageV2 AccountKindType = "StorageV2" +) + +// PossibleAccountKindTypeValues returns an array of possible values for the AccountKindType const type. +func PossibleAccountKindTypeValues() []AccountKindType { + return []AccountKindType{AccountKindBlobStorage, AccountKindBlockBlobStorage, AccountKindFileStorage, AccountKindNone, AccountKindStorage, AccountKindStorageV2} +} + +// ArchiveStatusType enumerates the values for archive status type. +type ArchiveStatusType string + +const ( + // ArchiveStatusNone represents an empty ArchiveStatusType. + ArchiveStatusNone ArchiveStatusType = "" + // ArchiveStatusRehydratePendingToCool ... + ArchiveStatusRehydratePendingToCool ArchiveStatusType = "rehydrate-pending-to-cool" + // ArchiveStatusRehydratePendingToHot ... + ArchiveStatusRehydratePendingToHot ArchiveStatusType = "rehydrate-pending-to-hot" +) + +// PossibleArchiveStatusTypeValues returns an array of possible values for the ArchiveStatusType const type. +func PossibleArchiveStatusTypeValues() []ArchiveStatusType { + return []ArchiveStatusType{ArchiveStatusNone, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot} +} + +// BlobDeleteType enumerates the values for blob delete type. +type BlobDeleteType string + +const ( + // BlobDeleteNone represents an empty BlobDeleteType. + BlobDeleteNone BlobDeleteType = "" + // BlobDeletePermanent ... + BlobDeletePermanent BlobDeleteType = "Permanent" +) + +// PossibleBlobDeleteTypeValues returns an array of possible values for the BlobDeleteType const type. +func PossibleBlobDeleteTypeValues() []BlobDeleteType { + return []BlobDeleteType{BlobDeleteNone, BlobDeletePermanent} +} + +// BlobExpiryOptionsType enumerates the values for blob expiry options type. +type BlobExpiryOptionsType string + +const ( + // BlobExpiryOptionsAbsolute ... + BlobExpiryOptionsAbsolute BlobExpiryOptionsType = "Absolute" + // BlobExpiryOptionsNeverExpire ... + BlobExpiryOptionsNeverExpire BlobExpiryOptionsType = "NeverExpire" + // BlobExpiryOptionsNone represents an empty BlobExpiryOptionsType. + BlobExpiryOptionsNone BlobExpiryOptionsType = "" + // BlobExpiryOptionsRelativeToCreation ... + BlobExpiryOptionsRelativeToCreation BlobExpiryOptionsType = "RelativeToCreation" + // BlobExpiryOptionsRelativeToNow ... + BlobExpiryOptionsRelativeToNow BlobExpiryOptionsType = "RelativeToNow" +) + +// PossibleBlobExpiryOptionsTypeValues returns an array of possible values for the BlobExpiryOptionsType const type. +func PossibleBlobExpiryOptionsTypeValues() []BlobExpiryOptionsType { + return []BlobExpiryOptionsType{BlobExpiryOptionsAbsolute, BlobExpiryOptionsNeverExpire, BlobExpiryOptionsNone, BlobExpiryOptionsRelativeToCreation, BlobExpiryOptionsRelativeToNow} +} + +// BlobImmutabilityPolicyModeType enumerates the values for blob immutability policy mode type. +type BlobImmutabilityPolicyModeType string + +const ( + // BlobImmutabilityPolicyModeLocked ... + BlobImmutabilityPolicyModeLocked BlobImmutabilityPolicyModeType = "locked" + // BlobImmutabilityPolicyModeMutable ... + BlobImmutabilityPolicyModeMutable BlobImmutabilityPolicyModeType = "mutable" + // BlobImmutabilityPolicyModeNone represents an empty BlobImmutabilityPolicyModeType. + BlobImmutabilityPolicyModeNone BlobImmutabilityPolicyModeType = "" + // BlobImmutabilityPolicyModeUnlocked ... + BlobImmutabilityPolicyModeUnlocked BlobImmutabilityPolicyModeType = "unlocked" +) + +// PossibleBlobImmutabilityPolicyModeTypeValues returns an array of possible values for the BlobImmutabilityPolicyModeType const type. +func PossibleBlobImmutabilityPolicyModeTypeValues() []BlobImmutabilityPolicyModeType { + return []BlobImmutabilityPolicyModeType{BlobImmutabilityPolicyModeLocked, BlobImmutabilityPolicyModeMutable, BlobImmutabilityPolicyModeNone, BlobImmutabilityPolicyModeUnlocked} +} + +// BlobType enumerates the values for blob type. +type BlobType string + +const ( + // BlobAppendBlob ... + BlobAppendBlob BlobType = "AppendBlob" + // BlobBlockBlob ... + BlobBlockBlob BlobType = "BlockBlob" + // BlobNone represents an empty BlobType. + BlobNone BlobType = "" + // BlobPageBlob ... + BlobPageBlob BlobType = "PageBlob" +) + +// PossibleBlobTypeValues returns an array of possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return []BlobType{BlobAppendBlob, BlobBlockBlob, BlobNone, BlobPageBlob} +} + +// BlockListType enumerates the values for block list type. +type BlockListType string + +const ( + // BlockListAll ... + BlockListAll BlockListType = "all" + // BlockListCommitted ... + BlockListCommitted BlockListType = "committed" + // BlockListNone represents an empty BlockListType. + BlockListNone BlockListType = "" + // BlockListUncommitted ... + BlockListUncommitted BlockListType = "uncommitted" +) + +// PossibleBlockListTypeValues returns an array of possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return []BlockListType{BlockListAll, BlockListCommitted, BlockListNone, BlockListUncommitted} +} + +// CopyStatusType enumerates the values for copy status type. +type CopyStatusType string + +const ( + // CopyStatusAborted ... + CopyStatusAborted CopyStatusType = "aborted" + // CopyStatusFailed ... + CopyStatusFailed CopyStatusType = "failed" + // CopyStatusNone represents an empty CopyStatusType. + CopyStatusNone CopyStatusType = "" + // CopyStatusPending ... + CopyStatusPending CopyStatusType = "pending" + // CopyStatusSuccess ... + CopyStatusSuccess CopyStatusType = "success" +) + +// PossibleCopyStatusTypeValues returns an array of possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{CopyStatusAborted, CopyStatusFailed, CopyStatusNone, CopyStatusPending, CopyStatusSuccess} +} + +// DeleteSnapshotsOptionType enumerates the values for delete snapshots option type. +type DeleteSnapshotsOptionType string + +const ( + // DeleteSnapshotsOptionInclude ... + DeleteSnapshotsOptionInclude DeleteSnapshotsOptionType = "include" + // DeleteSnapshotsOptionNone represents an empty DeleteSnapshotsOptionType. + DeleteSnapshotsOptionNone DeleteSnapshotsOptionType = "" + // DeleteSnapshotsOptionOnly ... + DeleteSnapshotsOptionOnly DeleteSnapshotsOptionType = "only" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns an array of possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{DeleteSnapshotsOptionInclude, DeleteSnapshotsOptionNone, DeleteSnapshotsOptionOnly} +} + +// EncryptionAlgorithmType enumerates the values for encryption algorithm type. +type EncryptionAlgorithmType string + +const ( + // EncryptionAlgorithmAES256 ... + EncryptionAlgorithmAES256 EncryptionAlgorithmType = "AES256" + // EncryptionAlgorithmNone represents an empty EncryptionAlgorithmType. + EncryptionAlgorithmNone EncryptionAlgorithmType = "" +) + +// PossibleEncryptionAlgorithmTypeValues returns an array of possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return []EncryptionAlgorithmType{EncryptionAlgorithmAES256, EncryptionAlgorithmNone} +} + +// GeoReplicationStatusType enumerates the values for geo replication status type. +type GeoReplicationStatusType string + +const ( + // GeoReplicationStatusBootstrap ... + GeoReplicationStatusBootstrap GeoReplicationStatusType = "bootstrap" + // GeoReplicationStatusLive ... + GeoReplicationStatusLive GeoReplicationStatusType = "live" + // GeoReplicationStatusNone represents an empty GeoReplicationStatusType. + GeoReplicationStatusNone GeoReplicationStatusType = "" + // GeoReplicationStatusUnavailable ... + GeoReplicationStatusUnavailable GeoReplicationStatusType = "unavailable" +) + +// PossibleGeoReplicationStatusTypeValues returns an array of possible values for the GeoReplicationStatusType const type. +func PossibleGeoReplicationStatusTypeValues() []GeoReplicationStatusType { + return []GeoReplicationStatusType{GeoReplicationStatusBootstrap, GeoReplicationStatusLive, GeoReplicationStatusNone, GeoReplicationStatusUnavailable} +} + +// LeaseDurationType enumerates the values for lease duration type. +type LeaseDurationType string + +const ( + // LeaseDurationFixed ... + LeaseDurationFixed LeaseDurationType = "fixed" + // LeaseDurationInfinite ... + LeaseDurationInfinite LeaseDurationType = "infinite" + // LeaseDurationNone represents an empty LeaseDurationType. + LeaseDurationNone LeaseDurationType = "" +) + +// PossibleLeaseDurationTypeValues returns an array of possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{LeaseDurationFixed, LeaseDurationInfinite, LeaseDurationNone} +} + +// LeaseStateType enumerates the values for lease state type. +type LeaseStateType string + +const ( + // LeaseStateAvailable ... + LeaseStateAvailable LeaseStateType = "available" + // LeaseStateBreaking ... + LeaseStateBreaking LeaseStateType = "breaking" + // LeaseStateBroken ... + LeaseStateBroken LeaseStateType = "broken" + // LeaseStateExpired ... + LeaseStateExpired LeaseStateType = "expired" + // LeaseStateLeased ... + LeaseStateLeased LeaseStateType = "leased" + // LeaseStateNone represents an empty LeaseStateType. + LeaseStateNone LeaseStateType = "" +) + +// PossibleLeaseStateTypeValues returns an array of possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{LeaseStateAvailable, LeaseStateBreaking, LeaseStateBroken, LeaseStateExpired, LeaseStateLeased, LeaseStateNone} +} + +// LeaseStatusType enumerates the values for lease status type. +type LeaseStatusType string + +const ( + // LeaseStatusLocked ... + LeaseStatusLocked LeaseStatusType = "locked" + // LeaseStatusNone represents an empty LeaseStatusType. + LeaseStatusNone LeaseStatusType = "" + // LeaseStatusUnlocked ... + LeaseStatusUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns an array of possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{LeaseStatusLocked, LeaseStatusNone, LeaseStatusUnlocked} +} + +// ListBlobsIncludeItemType enumerates the values for list blobs include item type. +type ListBlobsIncludeItemType string + +const ( + // ListBlobsIncludeItemCopy ... + ListBlobsIncludeItemCopy ListBlobsIncludeItemType = "copy" + // ListBlobsIncludeItemDeleted ... + ListBlobsIncludeItemDeleted ListBlobsIncludeItemType = "deleted" + // ListBlobsIncludeItemDeletedwithversions ... + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItemType = "deletedwithversions" + // ListBlobsIncludeItemImmutabilitypolicy ... + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItemType = "immutabilitypolicy" + // ListBlobsIncludeItemLegalhold ... + ListBlobsIncludeItemLegalhold ListBlobsIncludeItemType = "legalhold" + // ListBlobsIncludeItemMetadata ... + ListBlobsIncludeItemMetadata ListBlobsIncludeItemType = "metadata" + // ListBlobsIncludeItemNone represents an empty ListBlobsIncludeItemType. + ListBlobsIncludeItemNone ListBlobsIncludeItemType = "" + // ListBlobsIncludeItemPermissions ... + ListBlobsIncludeItemPermissions ListBlobsIncludeItemType = "permissions" + // ListBlobsIncludeItemSnapshots ... + ListBlobsIncludeItemSnapshots ListBlobsIncludeItemType = "snapshots" + // ListBlobsIncludeItemTags ... + ListBlobsIncludeItemTags ListBlobsIncludeItemType = "tags" + // ListBlobsIncludeItemUncommittedblobs ... + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItemType = "uncommittedblobs" + // ListBlobsIncludeItemVersions ... + ListBlobsIncludeItemVersions ListBlobsIncludeItemType = "versions" +) + +// PossibleListBlobsIncludeItemTypeValues returns an array of possible values for the ListBlobsIncludeItemType const type. +func PossibleListBlobsIncludeItemTypeValues() []ListBlobsIncludeItemType { + return []ListBlobsIncludeItemType{ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, ListBlobsIncludeItemDeletedwithversions, ListBlobsIncludeItemImmutabilitypolicy, ListBlobsIncludeItemLegalhold, ListBlobsIncludeItemMetadata, ListBlobsIncludeItemNone, ListBlobsIncludeItemPermissions, ListBlobsIncludeItemSnapshots, ListBlobsIncludeItemTags, ListBlobsIncludeItemUncommittedblobs, ListBlobsIncludeItemVersions} +} + +// ListContainersIncludeType enumerates the values for list containers include type. +type ListContainersIncludeType string + +const ( + // ListContainersIncludeDeleted ... + ListContainersIncludeDeleted ListContainersIncludeType = "deleted" + // ListContainersIncludeMetadata ... + ListContainersIncludeMetadata ListContainersIncludeType = "metadata" + // ListContainersIncludeNone represents an empty ListContainersIncludeType. + ListContainersIncludeNone ListContainersIncludeType = "" + // ListContainersIncludeSystem ... + ListContainersIncludeSystem ListContainersIncludeType = "system" +) + +// PossibleListContainersIncludeTypeValues returns an array of possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return []ListContainersIncludeType{ListContainersIncludeDeleted, ListContainersIncludeMetadata, ListContainersIncludeNone, ListContainersIncludeSystem} +} + +// PremiumPageBlobAccessTierType enumerates the values for premium page blob access tier type. +type PremiumPageBlobAccessTierType string + +const ( + // PremiumPageBlobAccessTierNone represents an empty PremiumPageBlobAccessTierType. + PremiumPageBlobAccessTierNone PremiumPageBlobAccessTierType = "" + // PremiumPageBlobAccessTierP10 ... + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTierType = "P10" + // PremiumPageBlobAccessTierP15 ... + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTierType = "P15" + // PremiumPageBlobAccessTierP20 ... + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTierType = "P20" + // PremiumPageBlobAccessTierP30 ... + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTierType = "P30" + // PremiumPageBlobAccessTierP4 ... + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTierType = "P4" + // PremiumPageBlobAccessTierP40 ... + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTierType = "P40" + // PremiumPageBlobAccessTierP50 ... + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTierType = "P50" + // PremiumPageBlobAccessTierP6 ... + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTierType = "P6" + // PremiumPageBlobAccessTierP60 ... + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTierType = "P60" + // PremiumPageBlobAccessTierP70 ... + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTierType = "P70" + // PremiumPageBlobAccessTierP80 ... + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTierType = "P80" +) + +// PossiblePremiumPageBlobAccessTierTypeValues returns an array of possible values for the PremiumPageBlobAccessTierType const type. +func PossiblePremiumPageBlobAccessTierTypeValues() []PremiumPageBlobAccessTierType { + return []PremiumPageBlobAccessTierType{PremiumPageBlobAccessTierNone, PremiumPageBlobAccessTierP10, PremiumPageBlobAccessTierP15, PremiumPageBlobAccessTierP20, PremiumPageBlobAccessTierP30, PremiumPageBlobAccessTierP4, PremiumPageBlobAccessTierP40, PremiumPageBlobAccessTierP50, PremiumPageBlobAccessTierP6, PremiumPageBlobAccessTierP60, PremiumPageBlobAccessTierP70, PremiumPageBlobAccessTierP80} +} + +// PublicAccessType enumerates the values for public access type. +type PublicAccessType string + +const ( + // PublicAccessBlob ... + PublicAccessBlob PublicAccessType = "blob" + // PublicAccessContainer ... + PublicAccessContainer PublicAccessType = "container" + // PublicAccessNone represents an empty PublicAccessType. + PublicAccessNone PublicAccessType = "" +) + +// PossiblePublicAccessTypeValues returns an array of possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return []PublicAccessType{PublicAccessBlob, PublicAccessContainer, PublicAccessNone} +} + +// QueryFormatType enumerates the values for query format type. +type QueryFormatType string + +const ( + // QueryFormatArrow ... + QueryFormatArrow QueryFormatType = "arrow" + // QueryFormatDelimited ... + QueryFormatDelimited QueryFormatType = "delimited" + // QueryFormatJSON ... + QueryFormatJSON QueryFormatType = "json" + // QueryFormatNone represents an empty QueryFormatType. + QueryFormatNone QueryFormatType = "" + // QueryFormatParquet ... + QueryFormatParquet QueryFormatType = "parquet" +) + +// PossibleQueryFormatTypeValues returns an array of possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return []QueryFormatType{QueryFormatArrow, QueryFormatDelimited, QueryFormatJSON, QueryFormatNone, QueryFormatParquet} +} + +// RehydratePriorityType enumerates the values for rehydrate priority type. +type RehydratePriorityType string + +const ( + // RehydratePriorityHigh ... + RehydratePriorityHigh RehydratePriorityType = "High" + // RehydratePriorityNone represents an empty RehydratePriorityType. + RehydratePriorityNone RehydratePriorityType = "" + // RehydratePriorityStandard ... + RehydratePriorityStandard RehydratePriorityType = "Standard" +) + +// PossibleRehydratePriorityTypeValues returns an array of possible values for the RehydratePriorityType const type. +func PossibleRehydratePriorityTypeValues() []RehydratePriorityType { + return []RehydratePriorityType{RehydratePriorityHigh, RehydratePriorityNone, RehydratePriorityStandard} +} + +// SequenceNumberActionType enumerates the values for sequence number action type. +type SequenceNumberActionType string + +const ( + // SequenceNumberActionIncrement ... + SequenceNumberActionIncrement SequenceNumberActionType = "increment" + // SequenceNumberActionMax ... + SequenceNumberActionMax SequenceNumberActionType = "max" + // SequenceNumberActionNone represents an empty SequenceNumberActionType. + SequenceNumberActionNone SequenceNumberActionType = "" + // SequenceNumberActionUpdate ... + SequenceNumberActionUpdate SequenceNumberActionType = "update" +) + +// PossibleSequenceNumberActionTypeValues returns an array of possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return []SequenceNumberActionType{SequenceNumberActionIncrement, SequenceNumberActionMax, SequenceNumberActionNone, SequenceNumberActionUpdate} +} + +// SkuNameType enumerates the values for sku name type. +type SkuNameType string + +const ( + // SkuNameNone represents an empty SkuNameType. + SkuNameNone SkuNameType = "" + // SkuNamePremiumLRS ... + SkuNamePremiumLRS SkuNameType = "Premium_LRS" + // SkuNameStandardGRS ... + SkuNameStandardGRS SkuNameType = "Standard_GRS" + // SkuNameStandardLRS ... + SkuNameStandardLRS SkuNameType = "Standard_LRS" + // SkuNameStandardRAGRS ... + SkuNameStandardRAGRS SkuNameType = "Standard_RAGRS" + // SkuNameStandardZRS ... + SkuNameStandardZRS SkuNameType = "Standard_ZRS" +) + +// PossibleSkuNameTypeValues returns an array of possible values for the SkuNameType const type. +func PossibleSkuNameTypeValues() []SkuNameType { + return []SkuNameType{SkuNameNone, SkuNamePremiumLRS, SkuNameStandardGRS, SkuNameStandardLRS, SkuNameStandardRAGRS, SkuNameStandardZRS} +} + +// StorageErrorCodeType enumerates the values for storage error code type. +type StorageErrorCodeType string + +const ( + // StorageErrorCodeAccountAlreadyExists ... + StorageErrorCodeAccountAlreadyExists StorageErrorCodeType = "AccountAlreadyExists" + // StorageErrorCodeAccountBeingCreated ... + StorageErrorCodeAccountBeingCreated StorageErrorCodeType = "AccountBeingCreated" + // StorageErrorCodeAccountIsDisabled ... + StorageErrorCodeAccountIsDisabled StorageErrorCodeType = "AccountIsDisabled" + // StorageErrorCodeAppendPositionConditionNotMet ... + StorageErrorCodeAppendPositionConditionNotMet StorageErrorCodeType = "AppendPositionConditionNotMet" + // StorageErrorCodeAuthenticationFailed ... + StorageErrorCodeAuthenticationFailed StorageErrorCodeType = "AuthenticationFailed" + // StorageErrorCodeAuthorizationFailure ... + StorageErrorCodeAuthorizationFailure StorageErrorCodeType = "AuthorizationFailure" + // StorageErrorCodeAuthorizationPermissionMismatch ... + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCodeType = "AuthorizationPermissionMismatch" + // StorageErrorCodeAuthorizationProtocolMismatch ... + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCodeType = "AuthorizationProtocolMismatch" + // StorageErrorCodeAuthorizationResourceTypeMismatch ... + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCodeType = "AuthorizationResourceTypeMismatch" + // StorageErrorCodeAuthorizationServiceMismatch ... + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCodeType = "AuthorizationServiceMismatch" + // StorageErrorCodeAuthorizationSourceIPMismatch ... + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCodeType = "AuthorizationSourceIPMismatch" + // StorageErrorCodeBlobAlreadyExists ... + StorageErrorCodeBlobAlreadyExists StorageErrorCodeType = "BlobAlreadyExists" + // StorageErrorCodeBlobArchived ... + StorageErrorCodeBlobArchived StorageErrorCodeType = "BlobArchived" + // StorageErrorCodeBlobBeingRehydrated ... + StorageErrorCodeBlobBeingRehydrated StorageErrorCodeType = "BlobBeingRehydrated" + // StorageErrorCodeBlobImmutableDueToPolicy ... + StorageErrorCodeBlobImmutableDueToPolicy StorageErrorCodeType = "BlobImmutableDueToPolicy" + // StorageErrorCodeBlobNotArchived ... + StorageErrorCodeBlobNotArchived StorageErrorCodeType = "BlobNotArchived" + // StorageErrorCodeBlobNotFound ... + StorageErrorCodeBlobNotFound StorageErrorCodeType = "BlobNotFound" + // StorageErrorCodeBlobOverwritten ... + StorageErrorCodeBlobOverwritten StorageErrorCodeType = "BlobOverwritten" + // StorageErrorCodeBlobTierInadequateForContentLength ... + StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCodeType = "BlobTierInadequateForContentLength" + // StorageErrorCodeBlobUsesCustomerSpecifiedEncryption ... + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption StorageErrorCodeType = "BlobUsesCustomerSpecifiedEncryption" + // StorageErrorCodeBlockCountExceedsLimit ... + StorageErrorCodeBlockCountExceedsLimit StorageErrorCodeType = "BlockCountExceedsLimit" + // StorageErrorCodeBlockListTooLong ... + StorageErrorCodeBlockListTooLong StorageErrorCodeType = "BlockListTooLong" + // StorageErrorCodeCannotChangeToLowerTier ... + StorageErrorCodeCannotChangeToLowerTier StorageErrorCodeType = "CannotChangeToLowerTier" + // StorageErrorCodeCannotVerifyCopySource ... + StorageErrorCodeCannotVerifyCopySource StorageErrorCodeType = "CannotVerifyCopySource" + // StorageErrorCodeConditionHeadersNotSupported ... + StorageErrorCodeConditionHeadersNotSupported StorageErrorCodeType = "ConditionHeadersNotSupported" + // StorageErrorCodeConditionNotMet ... + StorageErrorCodeConditionNotMet StorageErrorCodeType = "ConditionNotMet" + // StorageErrorCodeContainerAlreadyExists ... + StorageErrorCodeContainerAlreadyExists StorageErrorCodeType = "ContainerAlreadyExists" + // StorageErrorCodeContainerBeingDeleted ... + StorageErrorCodeContainerBeingDeleted StorageErrorCodeType = "ContainerBeingDeleted" + // StorageErrorCodeContainerDisabled ... + StorageErrorCodeContainerDisabled StorageErrorCodeType = "ContainerDisabled" + // StorageErrorCodeContainerNotFound ... + StorageErrorCodeContainerNotFound StorageErrorCodeType = "ContainerNotFound" + // StorageErrorCodeContentLengthLargerThanTierLimit ... + StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCodeType = "ContentLengthLargerThanTierLimit" + // StorageErrorCodeCopyAcrossAccountsNotSupported ... + StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCodeType = "CopyAcrossAccountsNotSupported" + // StorageErrorCodeCopyIDMismatch ... + StorageErrorCodeCopyIDMismatch StorageErrorCodeType = "CopyIdMismatch" + // StorageErrorCodeEmptyMetadataKey ... + StorageErrorCodeEmptyMetadataKey StorageErrorCodeType = "EmptyMetadataKey" + // StorageErrorCodeFeatureVersionMismatch ... + StorageErrorCodeFeatureVersionMismatch StorageErrorCodeType = "FeatureVersionMismatch" + // StorageErrorCodeIncrementalCopyBlobMismatch ... + StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCodeType = "IncrementalCopyBlobMismatch" + // StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed ... + StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCodeType = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + // StorageErrorCodeIncrementalCopySourceMustBeSnapshot ... + StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCodeType = "IncrementalCopySourceMustBeSnapshot" + // StorageErrorCodeInfiniteLeaseDurationRequired ... + StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCodeType = "InfiniteLeaseDurationRequired" + // StorageErrorCodeInsufficientAccountPermissions ... + StorageErrorCodeInsufficientAccountPermissions StorageErrorCodeType = "InsufficientAccountPermissions" + // StorageErrorCodeInternalError ... + StorageErrorCodeInternalError StorageErrorCodeType = "InternalError" + // StorageErrorCodeInvalidAuthenticationInfo ... + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCodeType = "InvalidAuthenticationInfo" + // StorageErrorCodeInvalidBlobOrBlock ... + StorageErrorCodeInvalidBlobOrBlock StorageErrorCodeType = "InvalidBlobOrBlock" + // StorageErrorCodeInvalidBlobTier ... + StorageErrorCodeInvalidBlobTier StorageErrorCodeType = "InvalidBlobTier" + // StorageErrorCodeInvalidBlobType ... + StorageErrorCodeInvalidBlobType StorageErrorCodeType = "InvalidBlobType" + // StorageErrorCodeInvalidBlockID ... + StorageErrorCodeInvalidBlockID StorageErrorCodeType = "InvalidBlockId" + // StorageErrorCodeInvalidBlockList ... + StorageErrorCodeInvalidBlockList StorageErrorCodeType = "InvalidBlockList" + // StorageErrorCodeInvalidHeaderValue ... + StorageErrorCodeInvalidHeaderValue StorageErrorCodeType = "InvalidHeaderValue" + // StorageErrorCodeInvalidHTTPVerb ... + StorageErrorCodeInvalidHTTPVerb StorageErrorCodeType = "InvalidHttpVerb" + // StorageErrorCodeInvalidInput ... + StorageErrorCodeInvalidInput StorageErrorCodeType = "InvalidInput" + // StorageErrorCodeInvalidMd5 ... + StorageErrorCodeInvalidMd5 StorageErrorCodeType = "InvalidMd5" + // StorageErrorCodeInvalidMetadata ... + StorageErrorCodeInvalidMetadata StorageErrorCodeType = "InvalidMetadata" + // StorageErrorCodeInvalidOperation ... + StorageErrorCodeInvalidOperation StorageErrorCodeType = "InvalidOperation" + // StorageErrorCodeInvalidPageRange ... + StorageErrorCodeInvalidPageRange StorageErrorCodeType = "InvalidPageRange" + // StorageErrorCodeInvalidQueryParameterValue ... + StorageErrorCodeInvalidQueryParameterValue StorageErrorCodeType = "InvalidQueryParameterValue" + // StorageErrorCodeInvalidRange ... + StorageErrorCodeInvalidRange StorageErrorCodeType = "InvalidRange" + // StorageErrorCodeInvalidResourceName ... + StorageErrorCodeInvalidResourceName StorageErrorCodeType = "InvalidResourceName" + // StorageErrorCodeInvalidSourceBlobType ... + StorageErrorCodeInvalidSourceBlobType StorageErrorCodeType = "InvalidSourceBlobType" + // StorageErrorCodeInvalidSourceBlobURL ... + StorageErrorCodeInvalidSourceBlobURL StorageErrorCodeType = "InvalidSourceBlobUrl" + // StorageErrorCodeInvalidURI ... + StorageErrorCodeInvalidURI StorageErrorCodeType = "InvalidUri" + // StorageErrorCodeInvalidVersionForPageBlobOperation ... + StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCodeType = "InvalidVersionForPageBlobOperation" + // StorageErrorCodeInvalidXMLDocument ... + StorageErrorCodeInvalidXMLDocument StorageErrorCodeType = "InvalidXmlDocument" + // StorageErrorCodeInvalidXMLNodeValue ... + StorageErrorCodeInvalidXMLNodeValue StorageErrorCodeType = "InvalidXmlNodeValue" + // StorageErrorCodeLeaseAlreadyBroken ... + StorageErrorCodeLeaseAlreadyBroken StorageErrorCodeType = "LeaseAlreadyBroken" + // StorageErrorCodeLeaseAlreadyPresent ... + StorageErrorCodeLeaseAlreadyPresent StorageErrorCodeType = "LeaseAlreadyPresent" + // StorageErrorCodeLeaseIDMismatchWithBlobOperation ... + StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCodeType = "LeaseIdMismatchWithBlobOperation" + // StorageErrorCodeLeaseIDMismatchWithContainerOperation ... + StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCodeType = "LeaseIdMismatchWithContainerOperation" + // StorageErrorCodeLeaseIDMismatchWithLeaseOperation ... + StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCodeType = "LeaseIdMismatchWithLeaseOperation" + // StorageErrorCodeLeaseIDMissing ... + StorageErrorCodeLeaseIDMissing StorageErrorCodeType = "LeaseIdMissing" + // StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired ... + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCodeType = "LeaseIsBreakingAndCannotBeAcquired" + // StorageErrorCodeLeaseIsBreakingAndCannotBeChanged ... + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCodeType = "LeaseIsBreakingAndCannotBeChanged" + // StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed ... + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCodeType = "LeaseIsBrokenAndCannotBeRenewed" + // StorageErrorCodeLeaseLost ... + StorageErrorCodeLeaseLost StorageErrorCodeType = "LeaseLost" + // StorageErrorCodeLeaseNotPresentWithBlobOperation ... + StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCodeType = "LeaseNotPresentWithBlobOperation" + // StorageErrorCodeLeaseNotPresentWithContainerOperation ... + StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCodeType = "LeaseNotPresentWithContainerOperation" + // StorageErrorCodeLeaseNotPresentWithLeaseOperation ... + StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCodeType = "LeaseNotPresentWithLeaseOperation" + // StorageErrorCodeMaxBlobSizeConditionNotMet ... + StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCodeType = "MaxBlobSizeConditionNotMet" + // StorageErrorCodeMd5Mismatch ... + StorageErrorCodeMd5Mismatch StorageErrorCodeType = "Md5Mismatch" + // StorageErrorCodeMetadataTooLarge ... + StorageErrorCodeMetadataTooLarge StorageErrorCodeType = "MetadataTooLarge" + // StorageErrorCodeMissingContentLengthHeader ... + StorageErrorCodeMissingContentLengthHeader StorageErrorCodeType = "MissingContentLengthHeader" + // StorageErrorCodeMissingRequiredHeader ... + StorageErrorCodeMissingRequiredHeader StorageErrorCodeType = "MissingRequiredHeader" + // StorageErrorCodeMissingRequiredQueryParameter ... + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCodeType = "MissingRequiredQueryParameter" + // StorageErrorCodeMissingRequiredXMLNode ... + StorageErrorCodeMissingRequiredXMLNode StorageErrorCodeType = "MissingRequiredXmlNode" + // StorageErrorCodeMultipleConditionHeadersNotSupported ... + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCodeType = "MultipleConditionHeadersNotSupported" + // StorageErrorCodeNoAuthenticationInformation ... + StorageErrorCodeNoAuthenticationInformation StorageErrorCodeType = "NoAuthenticationInformation" + // StorageErrorCodeNone represents an empty StorageErrorCodeType. + StorageErrorCodeNone StorageErrorCodeType = "" + // StorageErrorCodeNoPendingCopyOperation ... + StorageErrorCodeNoPendingCopyOperation StorageErrorCodeType = "NoPendingCopyOperation" + // StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob ... + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCodeType = "OperationNotAllowedOnIncrementalCopyBlob" + // StorageErrorCodeOperationTimedOut ... + StorageErrorCodeOperationTimedOut StorageErrorCodeType = "OperationTimedOut" + // StorageErrorCodeOutOfRangeInput ... + StorageErrorCodeOutOfRangeInput StorageErrorCodeType = "OutOfRangeInput" + // StorageErrorCodeOutOfRangeQueryParameterValue ... + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCodeType = "OutOfRangeQueryParameterValue" + // StorageErrorCodePendingCopyOperation ... + StorageErrorCodePendingCopyOperation StorageErrorCodeType = "PendingCopyOperation" + // StorageErrorCodePreviousSnapshotCannotBeNewer ... + StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCodeType = "PreviousSnapshotCannotBeNewer" + // StorageErrorCodePreviousSnapshotNotFound ... + StorageErrorCodePreviousSnapshotNotFound StorageErrorCodeType = "PreviousSnapshotNotFound" + // StorageErrorCodePreviousSnapshotOperationNotSupported ... + StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCodeType = "PreviousSnapshotOperationNotSupported" + // StorageErrorCodeRequestBodyTooLarge ... + StorageErrorCodeRequestBodyTooLarge StorageErrorCodeType = "RequestBodyTooLarge" + // StorageErrorCodeRequestURLFailedToParse ... + StorageErrorCodeRequestURLFailedToParse StorageErrorCodeType = "RequestUrlFailedToParse" + // StorageErrorCodeResourceAlreadyExists ... + StorageErrorCodeResourceAlreadyExists StorageErrorCodeType = "ResourceAlreadyExists" + // StorageErrorCodeResourceNotFound ... + StorageErrorCodeResourceNotFound StorageErrorCodeType = "ResourceNotFound" + // StorageErrorCodeResourceTypeMismatch ... + StorageErrorCodeResourceTypeMismatch StorageErrorCodeType = "ResourceTypeMismatch" + // StorageErrorCodeSequenceNumberConditionNotMet ... + StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCodeType = "SequenceNumberConditionNotMet" + // StorageErrorCodeSequenceNumberIncrementTooLarge ... + StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCodeType = "SequenceNumberIncrementTooLarge" + // StorageErrorCodeServerBusy ... + StorageErrorCodeServerBusy StorageErrorCodeType = "ServerBusy" + // StorageErrorCodeSnapshotCountExceeded ... + StorageErrorCodeSnapshotCountExceeded StorageErrorCodeType = "SnapshotCountExceeded" + // StorageErrorCodeSnapshotOperationRateExceeded ... + StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCodeType = "SnapshotOperationRateExceeded" + // StorageErrorCodeSnapshotsPresent ... + StorageErrorCodeSnapshotsPresent StorageErrorCodeType = "SnapshotsPresent" + // StorageErrorCodeSourceConditionNotMet ... + StorageErrorCodeSourceConditionNotMet StorageErrorCodeType = "SourceConditionNotMet" + // StorageErrorCodeSystemInUse ... + StorageErrorCodeSystemInUse StorageErrorCodeType = "SystemInUse" + // StorageErrorCodeTargetConditionNotMet ... + StorageErrorCodeTargetConditionNotMet StorageErrorCodeType = "TargetConditionNotMet" + // StorageErrorCodeUnauthorizedBlobOverwrite ... + StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCodeType = "UnauthorizedBlobOverwrite" + // StorageErrorCodeUnsupportedHeader ... + StorageErrorCodeUnsupportedHeader StorageErrorCodeType = "UnsupportedHeader" + // StorageErrorCodeUnsupportedHTTPVerb ... + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCodeType = "UnsupportedHttpVerb" + // StorageErrorCodeUnsupportedQueryParameter ... + StorageErrorCodeUnsupportedQueryParameter StorageErrorCodeType = "UnsupportedQueryParameter" + // StorageErrorCodeUnsupportedXMLNode ... + StorageErrorCodeUnsupportedXMLNode StorageErrorCodeType = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeTypeValues returns an array of possible values for the StorageErrorCodeType const type. +func PossibleStorageErrorCodeTypeValues() []StorageErrorCodeType { + return []StorageErrorCodeType{StorageErrorCodeAccountAlreadyExists, StorageErrorCodeAccountBeingCreated, StorageErrorCodeAccountIsDisabled, StorageErrorCodeAppendPositionConditionNotMet, StorageErrorCodeAuthenticationFailed, StorageErrorCodeAuthorizationFailure, StorageErrorCodeAuthorizationPermissionMismatch, StorageErrorCodeAuthorizationProtocolMismatch, StorageErrorCodeAuthorizationResourceTypeMismatch, StorageErrorCodeAuthorizationServiceMismatch, StorageErrorCodeAuthorizationSourceIPMismatch, StorageErrorCodeBlobAlreadyExists, StorageErrorCodeBlobArchived, StorageErrorCodeBlobBeingRehydrated, StorageErrorCodeBlobImmutableDueToPolicy, StorageErrorCodeBlobNotArchived, StorageErrorCodeBlobNotFound, StorageErrorCodeBlobOverwritten, StorageErrorCodeBlobTierInadequateForContentLength, StorageErrorCodeBlobUsesCustomerSpecifiedEncryption, StorageErrorCodeBlockCountExceedsLimit, StorageErrorCodeBlockListTooLong, StorageErrorCodeCannotChangeToLowerTier, StorageErrorCodeCannotVerifyCopySource, StorageErrorCodeConditionHeadersNotSupported, StorageErrorCodeConditionNotMet, StorageErrorCodeContainerAlreadyExists, StorageErrorCodeContainerBeingDeleted, StorageErrorCodeContainerDisabled, StorageErrorCodeContainerNotFound, StorageErrorCodeContentLengthLargerThanTierLimit, StorageErrorCodeCopyAcrossAccountsNotSupported, StorageErrorCodeCopyIDMismatch, StorageErrorCodeEmptyMetadataKey, StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInternalError, StorageErrorCodeInvalidAuthenticationInfo, StorageErrorCodeInvalidBlobOrBlock, StorageErrorCodeInvalidBlobTier, StorageErrorCodeInvalidBlobType, StorageErrorCodeInvalidBlockID, StorageErrorCodeInvalidBlockList, StorageErrorCodeInvalidHeaderValue, StorageErrorCodeInvalidHTTPVerb, StorageErrorCodeInvalidInput, StorageErrorCodeInvalidMd5, StorageErrorCodeInvalidMetadata, StorageErrorCodeInvalidOperation, StorageErrorCodeInvalidPageRange, StorageErrorCodeInvalidQueryParameterValue, StorageErrorCodeInvalidRange, StorageErrorCodeInvalidResourceName, StorageErrorCodeInvalidSourceBlobType, StorageErrorCodeInvalidSourceBlobURL, StorageErrorCodeInvalidURI, StorageErrorCodeInvalidVersionForPageBlobOperation, StorageErrorCodeInvalidXMLDocument, StorageErrorCodeInvalidXMLNodeValue, StorageErrorCodeLeaseAlreadyBroken, StorageErrorCodeLeaseAlreadyPresent, StorageErrorCodeLeaseIDMismatchWithBlobOperation, StorageErrorCodeLeaseIDMismatchWithContainerOperation, StorageErrorCodeLeaseIDMismatchWithLeaseOperation, StorageErrorCodeLeaseIDMissing, StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, StorageErrorCodeLeaseLost, StorageErrorCodeLeaseNotPresentWithBlobOperation, StorageErrorCodeLeaseNotPresentWithContainerOperation, StorageErrorCodeLeaseNotPresentWithLeaseOperation, StorageErrorCodeMaxBlobSizeConditionNotMet, StorageErrorCodeMd5Mismatch, StorageErrorCodeMetadataTooLarge, StorageErrorCodeMissingContentLengthHeader, StorageErrorCodeMissingRequiredHeader, StorageErrorCodeMissingRequiredQueryParameter, StorageErrorCodeMissingRequiredXMLNode, StorageErrorCodeMultipleConditionHeadersNotSupported, StorageErrorCodeNoAuthenticationInformation, StorageErrorCodeNone, StorageErrorCodeNoPendingCopyOperation, StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, StorageErrorCodeOperationTimedOut, StorageErrorCodeOutOfRangeInput, StorageErrorCodeOutOfRangeQueryParameterValue, StorageErrorCodePendingCopyOperation, StorageErrorCodePreviousSnapshotCannotBeNewer, StorageErrorCodePreviousSnapshotNotFound, StorageErrorCodePreviousSnapshotOperationNotSupported, StorageErrorCodeRequestBodyTooLarge, StorageErrorCodeRequestURLFailedToParse, StorageErrorCodeResourceAlreadyExists, StorageErrorCodeResourceNotFound, StorageErrorCodeResourceTypeMismatch, StorageErrorCodeSequenceNumberConditionNotMet, StorageErrorCodeSequenceNumberIncrementTooLarge, StorageErrorCodeServerBusy, StorageErrorCodeSnapshotCountExceeded, StorageErrorCodeSnapshotOperationRateExceeded, StorageErrorCodeSnapshotsPresent, StorageErrorCodeSourceConditionNotMet, StorageErrorCodeSystemInUse, StorageErrorCodeTargetConditionNotMet, StorageErrorCodeUnauthorizedBlobOverwrite, StorageErrorCodeUnsupportedHeader, StorageErrorCodeUnsupportedHTTPVerb, StorageErrorCodeUnsupportedQueryParameter, StorageErrorCodeUnsupportedXMLNode} +} + +// SyncCopyStatusType enumerates the values for sync copy status type. +type SyncCopyStatusType string + +const ( + // SyncCopyStatusNone represents an empty SyncCopyStatusType. + SyncCopyStatusNone SyncCopyStatusType = "" + // SyncCopyStatusSuccess ... + SyncCopyStatusSuccess SyncCopyStatusType = "success" +) + +// PossibleSyncCopyStatusTypeValues returns an array of possible values for the SyncCopyStatusType const type. +func PossibleSyncCopyStatusTypeValues() []SyncCopyStatusType { + return []SyncCopyStatusType{SyncCopyStatusNone, SyncCopyStatusSuccess} +} + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // Start - the date-time the policy is active + Start *time.Time `xml:"Start"` + // Expiry - the date-time the policy expires + Expiry *time.Time `xml:"Expiry"` + // Permission - the permissions for the acl policy + Permission *string `xml:"Permission"` +} + +// MarshalXML implements the xml.Marshaler interface for AccessPolicy. +func (ap AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + ap2 := (*accessPolicy)(unsafe.Pointer(&ap)) + return e.EncodeElement(*ap2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for AccessPolicy. +func (ap *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + ap2 := (*accessPolicy)(unsafe.Pointer(ap)) + return d.DecodeElement(ap2, &start) +} + +// AppendBlobAppendBlockFromURLResponse ... +type AppendBlobAppendBlockFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ababfur AppendBlobAppendBlockFromURLResponse) Response() *http.Response { + return ababfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ababfur AppendBlobAppendBlockFromURLResponse) StatusCode() int { + return ababfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ababfur AppendBlobAppendBlockFromURLResponse) Status() string { + return ababfur.rawResponse.Status +} + +// BlobAppendOffset returns the value for header x-ms-blob-append-offset. +func (ababfur AppendBlobAppendBlockFromURLResponse) BlobAppendOffset() string { + return ababfur.rawResponse.Header.Get("x-ms-blob-append-offset") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (ababfur AppendBlobAppendBlockFromURLResponse) BlobCommittedBlockCount() int32 { + s := ababfur.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// ContentMD5 returns the value for header Content-MD5. +func (ababfur AppendBlobAppendBlockFromURLResponse) ContentMD5() []byte { + s := ababfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (ababfur AppendBlobAppendBlockFromURLResponse) Date() time.Time { + s := ababfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionKeySha256() string { + return ababfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (ababfur AppendBlobAppendBlockFromURLResponse) EncryptionScope() string { + return ababfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ababfur AppendBlobAppendBlockFromURLResponse) ErrorCode() string { + return ababfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (ababfur AppendBlobAppendBlockFromURLResponse) ETag() ETag { + return ETag(ababfur.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (ababfur AppendBlobAppendBlockFromURLResponse) IsServerEncrypted() string { + return ababfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (ababfur AppendBlobAppendBlockFromURLResponse) LastModified() time.Time { + s := ababfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (ababfur AppendBlobAppendBlockFromURLResponse) RequestID() string { + return ababfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ababfur AppendBlobAppendBlockFromURLResponse) Version() string { + return ababfur.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (ababfur AppendBlobAppendBlockFromURLResponse) XMsContentCrc64() []byte { + s := ababfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// AppendBlobAppendBlockResponse ... +type AppendBlobAppendBlockResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ababr AppendBlobAppendBlockResponse) Response() *http.Response { + return ababr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ababr AppendBlobAppendBlockResponse) StatusCode() int { + return ababr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ababr AppendBlobAppendBlockResponse) Status() string { + return ababr.rawResponse.Status +} + +// BlobAppendOffset returns the value for header x-ms-blob-append-offset. +func (ababr AppendBlobAppendBlockResponse) BlobAppendOffset() string { + return ababr.rawResponse.Header.Get("x-ms-blob-append-offset") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (ababr AppendBlobAppendBlockResponse) BlobCommittedBlockCount() int32 { + s := ababr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (ababr AppendBlobAppendBlockResponse) ClientRequestID() string { + return ababr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (ababr AppendBlobAppendBlockResponse) ContentMD5() []byte { + s := ababr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (ababr AppendBlobAppendBlockResponse) Date() time.Time { + s := ababr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (ababr AppendBlobAppendBlockResponse) EncryptionKeySha256() string { + return ababr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (ababr AppendBlobAppendBlockResponse) EncryptionScope() string { + return ababr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ababr AppendBlobAppendBlockResponse) ErrorCode() string { + return ababr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (ababr AppendBlobAppendBlockResponse) ETag() ETag { + return ETag(ababr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (ababr AppendBlobAppendBlockResponse) IsServerEncrypted() string { + return ababr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (ababr AppendBlobAppendBlockResponse) LastModified() time.Time { + s := ababr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (ababr AppendBlobAppendBlockResponse) RequestID() string { + return ababr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ababr AppendBlobAppendBlockResponse) Version() string { + return ababr.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (ababr AppendBlobAppendBlockResponse) XMsContentCrc64() []byte { + s := ababr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// AppendBlobCreateResponse ... +type AppendBlobCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (abcr AppendBlobCreateResponse) Response() *http.Response { + return abcr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (abcr AppendBlobCreateResponse) StatusCode() int { + return abcr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (abcr AppendBlobCreateResponse) Status() string { + return abcr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (abcr AppendBlobCreateResponse) ClientRequestID() string { + return abcr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (abcr AppendBlobCreateResponse) ContentMD5() []byte { + s := abcr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (abcr AppendBlobCreateResponse) Date() time.Time { + s := abcr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (abcr AppendBlobCreateResponse) EncryptionKeySha256() string { + return abcr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (abcr AppendBlobCreateResponse) EncryptionScope() string { + return abcr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (abcr AppendBlobCreateResponse) ErrorCode() string { + return abcr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (abcr AppendBlobCreateResponse) ETag() ETag { + return ETag(abcr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (abcr AppendBlobCreateResponse) IsServerEncrypted() string { + return abcr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (abcr AppendBlobCreateResponse) LastModified() time.Time { + s := abcr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (abcr AppendBlobCreateResponse) RequestID() string { + return abcr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (abcr AppendBlobCreateResponse) Version() string { + return abcr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (abcr AppendBlobCreateResponse) VersionID() string { + return abcr.rawResponse.Header.Get("x-ms-version-id") +} + +// AppendBlobSealResponse ... +type AppendBlobSealResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (absr AppendBlobSealResponse) Response() *http.Response { + return absr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (absr AppendBlobSealResponse) StatusCode() int { + return absr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (absr AppendBlobSealResponse) Status() string { + return absr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (absr AppendBlobSealResponse) ClientRequestID() string { + return absr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (absr AppendBlobSealResponse) Date() time.Time { + s := absr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (absr AppendBlobSealResponse) ErrorCode() string { + return absr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (absr AppendBlobSealResponse) ETag() ETag { + return ETag(absr.rawResponse.Header.Get("ETag")) +} + +// IsSealed returns the value for header x-ms-blob-sealed. +func (absr AppendBlobSealResponse) IsSealed() string { + return absr.rawResponse.Header.Get("x-ms-blob-sealed") +} + +// LastModified returns the value for header Last-Modified. +func (absr AppendBlobSealResponse) LastModified() time.Time { + s := absr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (absr AppendBlobSealResponse) RequestID() string { + return absr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (absr AppendBlobSealResponse) Version() string { + return absr.rawResponse.Header.Get("x-ms-version") +} + +// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow +// formatted. +type ArrowConfiguration struct { + Schema []ArrowField `xml:"Schema>Field"` +} + +// ArrowField - Groups settings regarding specific field of an arrow schema +type ArrowField struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Field"` + Type string `xml:"Type"` + Name *string `xml:"Name"` + Precision *int32 `xml:"Precision"` + Scale *int32 `xml:"Scale"` +} + +// BlobAbortCopyFromURLResponse ... +type BlobAbortCopyFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bacfur BlobAbortCopyFromURLResponse) Response() *http.Response { + return bacfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bacfur BlobAbortCopyFromURLResponse) StatusCode() int { + return bacfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bacfur BlobAbortCopyFromURLResponse) Status() string { + return bacfur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bacfur BlobAbortCopyFromURLResponse) ClientRequestID() string { + return bacfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bacfur BlobAbortCopyFromURLResponse) Date() time.Time { + s := bacfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bacfur BlobAbortCopyFromURLResponse) ErrorCode() string { + return bacfur.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bacfur BlobAbortCopyFromURLResponse) RequestID() string { + return bacfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bacfur BlobAbortCopyFromURLResponse) Version() string { + return bacfur.rawResponse.Header.Get("x-ms-version") +} + +// BlobAcquireLeaseResponse ... +type BlobAcquireLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (balr BlobAcquireLeaseResponse) Response() *http.Response { + return balr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (balr BlobAcquireLeaseResponse) StatusCode() int { + return balr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (balr BlobAcquireLeaseResponse) Status() string { + return balr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (balr BlobAcquireLeaseResponse) ClientRequestID() string { + return balr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (balr BlobAcquireLeaseResponse) Date() time.Time { + s := balr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (balr BlobAcquireLeaseResponse) ErrorCode() string { + return balr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (balr BlobAcquireLeaseResponse) ETag() ETag { + return ETag(balr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (balr BlobAcquireLeaseResponse) LastModified() time.Time { + s := balr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (balr BlobAcquireLeaseResponse) LeaseID() string { + return balr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (balr BlobAcquireLeaseResponse) RequestID() string { + return balr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (balr BlobAcquireLeaseResponse) Version() string { + return balr.rawResponse.Header.Get("x-ms-version") +} + +// BlobBreakLeaseResponse ... +type BlobBreakLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bblr BlobBreakLeaseResponse) Response() *http.Response { + return bblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bblr BlobBreakLeaseResponse) StatusCode() int { + return bblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bblr BlobBreakLeaseResponse) Status() string { + return bblr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bblr BlobBreakLeaseResponse) ClientRequestID() string { + return bblr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bblr BlobBreakLeaseResponse) Date() time.Time { + s := bblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bblr BlobBreakLeaseResponse) ErrorCode() string { + return bblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bblr BlobBreakLeaseResponse) ETag() ETag { + return ETag(bblr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bblr BlobBreakLeaseResponse) LastModified() time.Time { + s := bblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseTime returns the value for header x-ms-lease-time. +func (bblr BlobBreakLeaseResponse) LeaseTime() int32 { + s := bblr.rawResponse.Header.Get("x-ms-lease-time") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// RequestID returns the value for header x-ms-request-id. +func (bblr BlobBreakLeaseResponse) RequestID() string { + return bblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bblr BlobBreakLeaseResponse) Version() string { + return bblr.rawResponse.Header.Get("x-ms-version") +} + +// BlobChangeLeaseResponse ... +type BlobChangeLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bclr BlobChangeLeaseResponse) Response() *http.Response { + return bclr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bclr BlobChangeLeaseResponse) StatusCode() int { + return bclr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bclr BlobChangeLeaseResponse) Status() string { + return bclr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bclr BlobChangeLeaseResponse) ClientRequestID() string { + return bclr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bclr BlobChangeLeaseResponse) Date() time.Time { + s := bclr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bclr BlobChangeLeaseResponse) ErrorCode() string { + return bclr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bclr BlobChangeLeaseResponse) ETag() ETag { + return ETag(bclr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bclr BlobChangeLeaseResponse) LastModified() time.Time { + s := bclr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (bclr BlobChangeLeaseResponse) LeaseID() string { + return bclr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (bclr BlobChangeLeaseResponse) RequestID() string { + return bclr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bclr BlobChangeLeaseResponse) Version() string { + return bclr.rawResponse.Header.Get("x-ms-version") +} + +// BlobCopyFromURLResponse ... +type BlobCopyFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bcfur BlobCopyFromURLResponse) Response() *http.Response { + return bcfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bcfur BlobCopyFromURLResponse) StatusCode() int { + return bcfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bcfur BlobCopyFromURLResponse) Status() string { + return bcfur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bcfur BlobCopyFromURLResponse) ClientRequestID() string { + return bcfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bcfur BlobCopyFromURLResponse) ContentMD5() []byte { + s := bcfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// CopyID returns the value for header x-ms-copy-id. +func (bcfur BlobCopyFromURLResponse) CopyID() string { + return bcfur.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (bcfur BlobCopyFromURLResponse) CopyStatus() SyncCopyStatusType { + return SyncCopyStatusType(bcfur.rawResponse.Header.Get("x-ms-copy-status")) +} + +// Date returns the value for header Date. +func (bcfur BlobCopyFromURLResponse) Date() time.Time { + s := bcfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bcfur BlobCopyFromURLResponse) ErrorCode() string { + return bcfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bcfur BlobCopyFromURLResponse) ETag() ETag { + return ETag(bcfur.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bcfur BlobCopyFromURLResponse) LastModified() time.Time { + s := bcfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bcfur BlobCopyFromURLResponse) RequestID() string { + return bcfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bcfur BlobCopyFromURLResponse) Version() string { + return bcfur.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bcfur BlobCopyFromURLResponse) VersionID() string { + return bcfur.rawResponse.Header.Get("x-ms-version-id") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bcfur BlobCopyFromURLResponse) XMsContentCrc64() []byte { + s := bcfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlobCreateSnapshotResponse ... +type BlobCreateSnapshotResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bcsr BlobCreateSnapshotResponse) Response() *http.Response { + return bcsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bcsr BlobCreateSnapshotResponse) StatusCode() int { + return bcsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bcsr BlobCreateSnapshotResponse) Status() string { + return bcsr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bcsr BlobCreateSnapshotResponse) ClientRequestID() string { + return bcsr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bcsr BlobCreateSnapshotResponse) Date() time.Time { + s := bcsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bcsr BlobCreateSnapshotResponse) ErrorCode() string { + return bcsr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bcsr BlobCreateSnapshotResponse) ETag() ETag { + return ETag(bcsr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bcsr BlobCreateSnapshotResponse) IsServerEncrypted() string { + return bcsr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bcsr BlobCreateSnapshotResponse) LastModified() time.Time { + s := bcsr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bcsr BlobCreateSnapshotResponse) RequestID() string { + return bcsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Snapshot returns the value for header x-ms-snapshot. +func (bcsr BlobCreateSnapshotResponse) Snapshot() string { + return bcsr.rawResponse.Header.Get("x-ms-snapshot") +} + +// Version returns the value for header x-ms-version. +func (bcsr BlobCreateSnapshotResponse) Version() string { + return bcsr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bcsr BlobCreateSnapshotResponse) VersionID() string { + return bcsr.rawResponse.Header.Get("x-ms-version-id") +} + +// BlobDeleteImmutabilityPolicyResponse ... +type BlobDeleteImmutabilityPolicyResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bdipr BlobDeleteImmutabilityPolicyResponse) Response() *http.Response { + return bdipr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bdipr BlobDeleteImmutabilityPolicyResponse) StatusCode() int { + return bdipr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bdipr BlobDeleteImmutabilityPolicyResponse) Status() string { + return bdipr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bdipr BlobDeleteImmutabilityPolicyResponse) ClientRequestID() string { + return bdipr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bdipr BlobDeleteImmutabilityPolicyResponse) Date() time.Time { + s := bdipr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bdipr BlobDeleteImmutabilityPolicyResponse) ErrorCode() string { + return bdipr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bdipr BlobDeleteImmutabilityPolicyResponse) RequestID() string { + return bdipr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bdipr BlobDeleteImmutabilityPolicyResponse) Version() string { + return bdipr.rawResponse.Header.Get("x-ms-version") +} + +// BlobDeleteResponse ... +type BlobDeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bdr BlobDeleteResponse) Response() *http.Response { + return bdr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bdr BlobDeleteResponse) StatusCode() int { + return bdr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bdr BlobDeleteResponse) Status() string { + return bdr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bdr BlobDeleteResponse) ClientRequestID() string { + return bdr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bdr BlobDeleteResponse) Date() time.Time { + s := bdr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bdr BlobDeleteResponse) ErrorCode() string { + return bdr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bdr BlobDeleteResponse) RequestID() string { + return bdr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bdr BlobDeleteResponse) Version() string { + return bdr.rawResponse.Header.Get("x-ms-version") +} + +// BlobFlatListSegment ... +type BlobFlatListSegment struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blobs"` + BlobItems []BlobItemInternal `xml:"Blob"` +} + +// BlobGetAccountInfoResponse ... +type BlobGetAccountInfoResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bgair BlobGetAccountInfoResponse) Response() *http.Response { + return bgair.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bgair BlobGetAccountInfoResponse) StatusCode() int { + return bgair.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bgair BlobGetAccountInfoResponse) Status() string { + return bgair.rawResponse.Status +} + +// AccountKind returns the value for header x-ms-account-kind. +func (bgair BlobGetAccountInfoResponse) AccountKind() AccountKindType { + return AccountKindType(bgair.rawResponse.Header.Get("x-ms-account-kind")) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bgair BlobGetAccountInfoResponse) ClientRequestID() string { + return bgair.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bgair BlobGetAccountInfoResponse) Date() time.Time { + s := bgair.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bgair BlobGetAccountInfoResponse) ErrorCode() string { + return bgair.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bgair BlobGetAccountInfoResponse) RequestID() string { + return bgair.rawResponse.Header.Get("x-ms-request-id") +} + +// SkuName returns the value for header x-ms-sku-name. +func (bgair BlobGetAccountInfoResponse) SkuName() SkuNameType { + return SkuNameType(bgair.rawResponse.Header.Get("x-ms-sku-name")) +} + +// Version returns the value for header x-ms-version. +func (bgair BlobGetAccountInfoResponse) Version() string { + return bgair.rawResponse.Header.Get("x-ms-version") +} + +// BlobGetPropertiesResponse ... +type BlobGetPropertiesResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (bgpr BlobGetPropertiesResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range bgpr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (bgpr BlobGetPropertiesResponse) Response() *http.Response { + return bgpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bgpr BlobGetPropertiesResponse) StatusCode() int { + return bgpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bgpr BlobGetPropertiesResponse) Status() string { + return bgpr.rawResponse.Status +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (bgpr BlobGetPropertiesResponse) AcceptRanges() string { + return bgpr.rawResponse.Header.Get("Accept-Ranges") +} + +// AccessTier returns the value for header x-ms-access-tier. +func (bgpr BlobGetPropertiesResponse) AccessTier() string { + return bgpr.rawResponse.Header.Get("x-ms-access-tier") +} + +// AccessTierChangeTime returns the value for header x-ms-access-tier-change-time. +func (bgpr BlobGetPropertiesResponse) AccessTierChangeTime() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-access-tier-change-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// AccessTierInferred returns the value for header x-ms-access-tier-inferred. +func (bgpr BlobGetPropertiesResponse) AccessTierInferred() string { + return bgpr.rawResponse.Header.Get("x-ms-access-tier-inferred") +} + +// ArchiveStatus returns the value for header x-ms-archive-status. +func (bgpr BlobGetPropertiesResponse) ArchiveStatus() string { + return bgpr.rawResponse.Header.Get("x-ms-archive-status") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (bgpr BlobGetPropertiesResponse) BlobCommittedBlockCount() int32 { + s := bgpr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (bgpr BlobGetPropertiesResponse) BlobSequenceNumber() int64 { + s := bgpr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (bgpr BlobGetPropertiesResponse) BlobType() BlobType { + return BlobType(bgpr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (bgpr BlobGetPropertiesResponse) CacheControl() string { + return bgpr.rawResponse.Header.Get("Cache-Control") +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bgpr BlobGetPropertiesResponse) ClientRequestID() string { + return bgpr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentDisposition returns the value for header Content-Disposition. +func (bgpr BlobGetPropertiesResponse) ContentDisposition() string { + return bgpr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (bgpr BlobGetPropertiesResponse) ContentEncoding() string { + return bgpr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (bgpr BlobGetPropertiesResponse) ContentLanguage() string { + return bgpr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (bgpr BlobGetPropertiesResponse) ContentLength() int64 { + s := bgpr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (bgpr BlobGetPropertiesResponse) ContentMD5() []byte { + s := bgpr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentType returns the value for header Content-Type. +func (bgpr BlobGetPropertiesResponse) ContentType() string { + return bgpr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (bgpr BlobGetPropertiesResponse) CopyCompletionTime() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (bgpr BlobGetPropertiesResponse) CopyID() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (bgpr BlobGetPropertiesResponse) CopyProgress() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (bgpr BlobGetPropertiesResponse) CopySource() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (bgpr BlobGetPropertiesResponse) CopyStatus() CopyStatusType { + return CopyStatusType(bgpr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (bgpr BlobGetPropertiesResponse) CopyStatusDescription() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// CreationTime returns the value for header x-ms-creation-time. +func (bgpr BlobGetPropertiesResponse) CreationTime() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-creation-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// Date returns the value for header Date. +func (bgpr BlobGetPropertiesResponse) Date() time.Time { + s := bgpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// DestinationSnapshot returns the value for header x-ms-copy-destination-snapshot. +func (bgpr BlobGetPropertiesResponse) DestinationSnapshot() string { + return bgpr.rawResponse.Header.Get("x-ms-copy-destination-snapshot") +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bgpr BlobGetPropertiesResponse) EncryptionKeySha256() string { + return bgpr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bgpr BlobGetPropertiesResponse) EncryptionScope() string { + return bgpr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bgpr BlobGetPropertiesResponse) ErrorCode() string { + return bgpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bgpr BlobGetPropertiesResponse) ETag() ETag { + return ETag(bgpr.rawResponse.Header.Get("ETag")) +} + +// ExpiresOn returns the value for header x-ms-expiry-time. +func (bgpr BlobGetPropertiesResponse) ExpiresOn() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-expiry-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ImmutabilityPolicyExpiresOn returns the value for header x-ms-immutability-policy-until-date. +func (bgpr BlobGetPropertiesResponse) ImmutabilityPolicyExpiresOn() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-immutability-policy-until-date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ImmutabilityPolicyMode returns the value for header x-ms-immutability-policy-mode. +func (bgpr BlobGetPropertiesResponse) ImmutabilityPolicyMode() BlobImmutabilityPolicyModeType { + return BlobImmutabilityPolicyModeType(bgpr.rawResponse.Header.Get("x-ms-immutability-policy-mode")) +} + +// IsCurrentVersion returns the value for header x-ms-is-current-version. +func (bgpr BlobGetPropertiesResponse) IsCurrentVersion() string { + return bgpr.rawResponse.Header.Get("x-ms-is-current-version") +} + +// IsIncrementalCopy returns the value for header x-ms-incremental-copy. +func (bgpr BlobGetPropertiesResponse) IsIncrementalCopy() string { + return bgpr.rawResponse.Header.Get("x-ms-incremental-copy") +} + +// IsSealed returns the value for header x-ms-blob-sealed. +func (bgpr BlobGetPropertiesResponse) IsSealed() string { + return bgpr.rawResponse.Header.Get("x-ms-blob-sealed") +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (bgpr BlobGetPropertiesResponse) IsServerEncrypted() string { + return bgpr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastAccessed returns the value for header x-ms-last-access-time. +func (bgpr BlobGetPropertiesResponse) LastAccessed() time.Time { + s := bgpr.rawResponse.Header.Get("x-ms-last-access-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LastModified returns the value for header Last-Modified. +func (bgpr BlobGetPropertiesResponse) LastModified() time.Time { + s := bgpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (bgpr BlobGetPropertiesResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(bgpr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (bgpr BlobGetPropertiesResponse) LeaseState() LeaseStateType { + return LeaseStateType(bgpr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (bgpr BlobGetPropertiesResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(bgpr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// LegalHold returns the value for header x-ms-legal-hold. +func (bgpr BlobGetPropertiesResponse) LegalHold() string { + return bgpr.rawResponse.Header.Get("x-ms-legal-hold") +} + +// ObjectReplicationPolicyID returns the value for header x-ms-or-policy-id. +func (bgpr BlobGetPropertiesResponse) ObjectReplicationPolicyID() string { + return bgpr.rawResponse.Header.Get("x-ms-or-policy-id") +} + +// ObjectReplicationRules returns the value for header x-ms-or. +func (bgpr BlobGetPropertiesResponse) ObjectReplicationRules() string { + return bgpr.rawResponse.Header.Get("x-ms-or") +} + +// RehydratePriority returns the value for header x-ms-rehydrate-priority. +func (bgpr BlobGetPropertiesResponse) RehydratePriority() string { + return bgpr.rawResponse.Header.Get("x-ms-rehydrate-priority") +} + +// RequestID returns the value for header x-ms-request-id. +func (bgpr BlobGetPropertiesResponse) RequestID() string { + return bgpr.rawResponse.Header.Get("x-ms-request-id") +} + +// TagCount returns the value for header x-ms-tag-count. +func (bgpr BlobGetPropertiesResponse) TagCount() int64 { + s := bgpr.rawResponse.Header.Get("x-ms-tag-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Version returns the value for header x-ms-version. +func (bgpr BlobGetPropertiesResponse) Version() string { + return bgpr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bgpr BlobGetPropertiesResponse) VersionID() string { + return bgpr.rawResponse.Header.Get("x-ms-version-id") +} + +// BlobHierarchyListSegment ... +type BlobHierarchyListSegment struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blobs"` + BlobPrefixes []BlobPrefix `xml:"BlobPrefix"` + BlobItems []BlobItemInternal `xml:"Blob"` +} + +// BlobItemInternal - An Azure Storage blob +type BlobItemInternal struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blob"` + Name string `xml:"Name"` + Deleted bool `xml:"Deleted"` + Snapshot string `xml:"Snapshot"` + VersionID *string `xml:"VersionId"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + Properties BlobPropertiesInternal `xml:"Properties"` + Metadata Metadata `xml:"Metadata"` + BlobTags *BlobTags `xml:"Tags"` + ObjectReplicationMetadata map[string]string `xml:"ObjectReplicationMetadata"` + HasVersionsOnly *bool `xml:"HasVersionsOnly"` +} + +// BlobPrefix ... +type BlobPrefix struct { + Name string `xml:"Name"` +} + +// BlobPropertiesInternal - Properties of a blob +type BlobPropertiesInternal struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Properties"` + CreationTime *time.Time `xml:"Creation-Time"` + LastModified time.Time `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + // ContentLength - Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentType *string `xml:"Content-Type"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentDisposition *string `xml:"Content-Disposition"` + CacheControl *string `xml:"Cache-Control"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + // BlobType - Possible values include: 'BlobBlockBlob', 'BlobPageBlob', 'BlobAppendBlob', 'BlobNone' + BlobType BlobType `xml:"BlobType"` + // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' + LeaseState LeaseStateType `xml:"LeaseState"` + // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + CopyID *string `xml:"CopyId"` + // CopyStatus - Possible values include: 'CopyStatusPending', 'CopyStatusSuccess', 'CopyStatusAborted', 'CopyStatusFailed', 'CopyStatusNone' + CopyStatus CopyStatusType `xml:"CopyStatus"` + CopySource *string `xml:"CopySource"` + CopyProgress *string `xml:"CopyProgress"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + DeletedTime *time.Time `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + // AccessTier - Possible values include: 'AccessTierP4', 'AccessTierP6', 'AccessTierP10', 'AccessTierP15', 'AccessTierP20', 'AccessTierP30', 'AccessTierP40', 'AccessTierP50', 'AccessTierP60', 'AccessTierP70', 'AccessTierP80', 'AccessTierHot', 'AccessTierCool', 'AccessTierArchive', 'AccessTierNone' + AccessTier AccessTierType `xml:"AccessTier"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + // ArchiveStatus - Possible values include: 'ArchiveStatusRehydratePendingToHot', 'ArchiveStatusRehydratePendingToCool', 'ArchiveStatusNone' + ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` + CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"` + // EncryptionScope - The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string `xml:"EncryptionScope"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + TagCount *int32 `xml:"TagCount"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + IsSealed *bool `xml:"Sealed"` + // RehydratePriority - Possible values include: 'RehydratePriorityHigh', 'RehydratePriorityStandard', 'RehydratePriorityNone' + RehydratePriority RehydratePriorityType `xml:"RehydratePriority"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` + // ImmutabilityPolicyMode - Possible values include: 'BlobImmutabilityPolicyModeMutable', 'BlobImmutabilityPolicyModeUnlocked', 'BlobImmutabilityPolicyModeLocked', 'BlobImmutabilityPolicyModeNone' + ImmutabilityPolicyMode BlobImmutabilityPolicyModeType `xml:"ImmutabilityPolicyMode"` + LegalHold *bool `xml:"LegalHold"` + Owner *string `xml:"Owner"` + Group *string `xml:"Group"` + Permissions *string `xml:"Permissions"` + ACL *string `xml:"Acl"` +} + +// MarshalXML implements the xml.Marshaler interface for BlobPropertiesInternal. +func (bpi BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + bpi2 := (*blobPropertiesInternal)(unsafe.Pointer(&bpi)) + return e.EncodeElement(*bpi2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for BlobPropertiesInternal. +func (bpi *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + bpi2 := (*blobPropertiesInternal)(unsafe.Pointer(bpi)) + return d.DecodeElement(bpi2, &start) +} + +// BlobReleaseLeaseResponse ... +type BlobReleaseLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (brlr BlobReleaseLeaseResponse) Response() *http.Response { + return brlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (brlr BlobReleaseLeaseResponse) StatusCode() int { + return brlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (brlr BlobReleaseLeaseResponse) Status() string { + return brlr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (brlr BlobReleaseLeaseResponse) ClientRequestID() string { + return brlr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (brlr BlobReleaseLeaseResponse) Date() time.Time { + s := brlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (brlr BlobReleaseLeaseResponse) ErrorCode() string { + return brlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (brlr BlobReleaseLeaseResponse) ETag() ETag { + return ETag(brlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (brlr BlobReleaseLeaseResponse) LastModified() time.Time { + s := brlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (brlr BlobReleaseLeaseResponse) RequestID() string { + return brlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (brlr BlobReleaseLeaseResponse) Version() string { + return brlr.rawResponse.Header.Get("x-ms-version") +} + +// BlobRenewLeaseResponse ... +type BlobRenewLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (brlr BlobRenewLeaseResponse) Response() *http.Response { + return brlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (brlr BlobRenewLeaseResponse) StatusCode() int { + return brlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (brlr BlobRenewLeaseResponse) Status() string { + return brlr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (brlr BlobRenewLeaseResponse) ClientRequestID() string { + return brlr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (brlr BlobRenewLeaseResponse) Date() time.Time { + s := brlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (brlr BlobRenewLeaseResponse) ErrorCode() string { + return brlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (brlr BlobRenewLeaseResponse) ETag() ETag { + return ETag(brlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (brlr BlobRenewLeaseResponse) LastModified() time.Time { + s := brlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (brlr BlobRenewLeaseResponse) LeaseID() string { + return brlr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (brlr BlobRenewLeaseResponse) RequestID() string { + return brlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (brlr BlobRenewLeaseResponse) Version() string { + return brlr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetExpiryResponse ... +type BlobSetExpiryResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bser BlobSetExpiryResponse) Response() *http.Response { + return bser.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bser BlobSetExpiryResponse) StatusCode() int { + return bser.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bser BlobSetExpiryResponse) Status() string { + return bser.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bser BlobSetExpiryResponse) ClientRequestID() string { + return bser.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bser BlobSetExpiryResponse) Date() time.Time { + s := bser.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bser BlobSetExpiryResponse) ErrorCode() string { + return bser.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bser BlobSetExpiryResponse) ETag() ETag { + return ETag(bser.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bser BlobSetExpiryResponse) LastModified() time.Time { + s := bser.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bser BlobSetExpiryResponse) RequestID() string { + return bser.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bser BlobSetExpiryResponse) Version() string { + return bser.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetHTTPHeadersResponse ... +type BlobSetHTTPHeadersResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bshhr BlobSetHTTPHeadersResponse) Response() *http.Response { + return bshhr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bshhr BlobSetHTTPHeadersResponse) StatusCode() int { + return bshhr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bshhr BlobSetHTTPHeadersResponse) Status() string { + return bshhr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (bshhr BlobSetHTTPHeadersResponse) BlobSequenceNumber() int64 { + s := bshhr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bshhr BlobSetHTTPHeadersResponse) ClientRequestID() string { + return bshhr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bshhr BlobSetHTTPHeadersResponse) Date() time.Time { + s := bshhr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bshhr BlobSetHTTPHeadersResponse) ErrorCode() string { + return bshhr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bshhr BlobSetHTTPHeadersResponse) ETag() ETag { + return ETag(bshhr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bshhr BlobSetHTTPHeadersResponse) LastModified() time.Time { + s := bshhr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bshhr BlobSetHTTPHeadersResponse) RequestID() string { + return bshhr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bshhr BlobSetHTTPHeadersResponse) Version() string { + return bshhr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetImmutabilityPolicyResponse ... +type BlobSetImmutabilityPolicyResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bsipr BlobSetImmutabilityPolicyResponse) Response() *http.Response { + return bsipr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bsipr BlobSetImmutabilityPolicyResponse) StatusCode() int { + return bsipr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bsipr BlobSetImmutabilityPolicyResponse) Status() string { + return bsipr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bsipr BlobSetImmutabilityPolicyResponse) ClientRequestID() string { + return bsipr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bsipr BlobSetImmutabilityPolicyResponse) Date() time.Time { + s := bsipr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bsipr BlobSetImmutabilityPolicyResponse) ErrorCode() string { + return bsipr.rawResponse.Header.Get("x-ms-error-code") +} + +// ImmutabilityPolicyExpiry returns the value for header x-ms-immutability-policy-until-date. +func (bsipr BlobSetImmutabilityPolicyResponse) ImmutabilityPolicyExpiry() time.Time { + s := bsipr.rawResponse.Header.Get("x-ms-immutability-policy-until-date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ImmutabilityPolicyMode returns the value for header x-ms-immutability-policy-mode. +func (bsipr BlobSetImmutabilityPolicyResponse) ImmutabilityPolicyMode() BlobImmutabilityPolicyModeType { + return BlobImmutabilityPolicyModeType(bsipr.rawResponse.Header.Get("x-ms-immutability-policy-mode")) +} + +// RequestID returns the value for header x-ms-request-id. +func (bsipr BlobSetImmutabilityPolicyResponse) RequestID() string { + return bsipr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bsipr BlobSetImmutabilityPolicyResponse) Version() string { + return bsipr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetLegalHoldResponse ... +type BlobSetLegalHoldResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bslhr BlobSetLegalHoldResponse) Response() *http.Response { + return bslhr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bslhr BlobSetLegalHoldResponse) StatusCode() int { + return bslhr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bslhr BlobSetLegalHoldResponse) Status() string { + return bslhr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bslhr BlobSetLegalHoldResponse) ClientRequestID() string { + return bslhr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bslhr BlobSetLegalHoldResponse) Date() time.Time { + s := bslhr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bslhr BlobSetLegalHoldResponse) ErrorCode() string { + return bslhr.rawResponse.Header.Get("x-ms-error-code") +} + +// LegalHold returns the value for header x-ms-legal-hold. +func (bslhr BlobSetLegalHoldResponse) LegalHold() string { + return bslhr.rawResponse.Header.Get("x-ms-legal-hold") +} + +// RequestID returns the value for header x-ms-request-id. +func (bslhr BlobSetLegalHoldResponse) RequestID() string { + return bslhr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bslhr BlobSetLegalHoldResponse) Version() string { + return bslhr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetMetadataResponse ... +type BlobSetMetadataResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bsmr BlobSetMetadataResponse) Response() *http.Response { + return bsmr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bsmr BlobSetMetadataResponse) StatusCode() int { + return bsmr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bsmr BlobSetMetadataResponse) Status() string { + return bsmr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bsmr BlobSetMetadataResponse) ClientRequestID() string { + return bsmr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bsmr BlobSetMetadataResponse) Date() time.Time { + s := bsmr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bsmr BlobSetMetadataResponse) EncryptionKeySha256() string { + return bsmr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bsmr BlobSetMetadataResponse) EncryptionScope() string { + return bsmr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bsmr BlobSetMetadataResponse) ErrorCode() string { + return bsmr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bsmr BlobSetMetadataResponse) ETag() ETag { + return ETag(bsmr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bsmr BlobSetMetadataResponse) IsServerEncrypted() string { + return bsmr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bsmr BlobSetMetadataResponse) LastModified() time.Time { + s := bsmr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bsmr BlobSetMetadataResponse) RequestID() string { + return bsmr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bsmr BlobSetMetadataResponse) Version() string { + return bsmr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bsmr BlobSetMetadataResponse) VersionID() string { + return bsmr.rawResponse.Header.Get("x-ms-version-id") +} + +// BlobSetTagsResponse ... +type BlobSetTagsResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bstr BlobSetTagsResponse) Response() *http.Response { + return bstr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bstr BlobSetTagsResponse) StatusCode() int { + return bstr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bstr BlobSetTagsResponse) Status() string { + return bstr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bstr BlobSetTagsResponse) ClientRequestID() string { + return bstr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bstr BlobSetTagsResponse) Date() time.Time { + s := bstr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bstr BlobSetTagsResponse) ErrorCode() string { + return bstr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bstr BlobSetTagsResponse) RequestID() string { + return bstr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bstr BlobSetTagsResponse) Version() string { + return bstr.rawResponse.Header.Get("x-ms-version") +} + +// BlobSetTierResponse ... +type BlobSetTierResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bstr BlobSetTierResponse) Response() *http.Response { + return bstr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bstr BlobSetTierResponse) StatusCode() int { + return bstr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bstr BlobSetTierResponse) Status() string { + return bstr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bstr BlobSetTierResponse) ClientRequestID() string { + return bstr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bstr BlobSetTierResponse) ErrorCode() string { + return bstr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bstr BlobSetTierResponse) RequestID() string { + return bstr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bstr BlobSetTierResponse) Version() string { + return bstr.rawResponse.Header.Get("x-ms-version") +} + +// BlobStartCopyFromURLResponse ... +type BlobStartCopyFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bscfur BlobStartCopyFromURLResponse) Response() *http.Response { + return bscfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bscfur BlobStartCopyFromURLResponse) StatusCode() int { + return bscfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bscfur BlobStartCopyFromURLResponse) Status() string { + return bscfur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bscfur BlobStartCopyFromURLResponse) ClientRequestID() string { + return bscfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// CopyID returns the value for header x-ms-copy-id. +func (bscfur BlobStartCopyFromURLResponse) CopyID() string { + return bscfur.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (bscfur BlobStartCopyFromURLResponse) CopyStatus() CopyStatusType { + return CopyStatusType(bscfur.rawResponse.Header.Get("x-ms-copy-status")) +} + +// Date returns the value for header Date. +func (bscfur BlobStartCopyFromURLResponse) Date() time.Time { + s := bscfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bscfur BlobStartCopyFromURLResponse) ErrorCode() string { + return bscfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bscfur BlobStartCopyFromURLResponse) ETag() ETag { + return ETag(bscfur.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bscfur BlobStartCopyFromURLResponse) LastModified() time.Time { + s := bscfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bscfur BlobStartCopyFromURLResponse) RequestID() string { + return bscfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bscfur BlobStartCopyFromURLResponse) Version() string { + return bscfur.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bscfur BlobStartCopyFromURLResponse) VersionID() string { + return bscfur.rawResponse.Header.Get("x-ms-version-id") +} + +// BlobTag ... +type BlobTag struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Tag"` + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +// BlobTags - Blob tags +type BlobTags struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Tags"` + BlobTagSet []BlobTag `xml:"TagSet>Tag"` +} + +// Response returns the raw HTTP response object. +func (bt BlobTags) Response() *http.Response { + return bt.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bt BlobTags) StatusCode() int { + return bt.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bt BlobTags) Status() string { + return bt.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bt BlobTags) ClientRequestID() string { + return bt.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bt BlobTags) Date() time.Time { + s := bt.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bt BlobTags) ErrorCode() string { + return bt.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bt BlobTags) RequestID() string { + return bt.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bt BlobTags) Version() string { + return bt.rawResponse.Header.Get("x-ms-version") +} + +// BlobUndeleteResponse ... +type BlobUndeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bur BlobUndeleteResponse) Response() *http.Response { + return bur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bur BlobUndeleteResponse) StatusCode() int { + return bur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bur BlobUndeleteResponse) Status() string { + return bur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bur BlobUndeleteResponse) ClientRequestID() string { + return bur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (bur BlobUndeleteResponse) Date() time.Time { + s := bur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bur BlobUndeleteResponse) ErrorCode() string { + return bur.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (bur BlobUndeleteResponse) RequestID() string { + return bur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bur BlobUndeleteResponse) Version() string { + return bur.rawResponse.Header.Get("x-ms-version") +} + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block struct { + // Name - The base64 encoded block ID. + Name string `xml:"Name"` + // Size - The block size in bytes. + Size int64 `xml:"Size"` +} + +// BlockBlobCommitBlockListResponse ... +type BlockBlobCommitBlockListResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbcblr BlockBlobCommitBlockListResponse) Response() *http.Response { + return bbcblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbcblr BlockBlobCommitBlockListResponse) StatusCode() int { + return bbcblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbcblr BlockBlobCommitBlockListResponse) Status() string { + return bbcblr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbcblr BlockBlobCommitBlockListResponse) ClientRequestID() string { + return bbcblr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbcblr BlockBlobCommitBlockListResponse) ContentMD5() []byte { + s := bbcblr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbcblr BlockBlobCommitBlockListResponse) Date() time.Time { + s := bbcblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbcblr BlockBlobCommitBlockListResponse) EncryptionKeySha256() string { + return bbcblr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbcblr BlockBlobCommitBlockListResponse) EncryptionScope() string { + return bbcblr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbcblr BlockBlobCommitBlockListResponse) ErrorCode() string { + return bbcblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bbcblr BlockBlobCommitBlockListResponse) ETag() ETag { + return ETag(bbcblr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbcblr BlockBlobCommitBlockListResponse) IsServerEncrypted() string { + return bbcblr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bbcblr BlockBlobCommitBlockListResponse) LastModified() time.Time { + s := bbcblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bbcblr BlockBlobCommitBlockListResponse) RequestID() string { + return bbcblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbcblr BlockBlobCommitBlockListResponse) Version() string { + return bbcblr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bbcblr BlockBlobCommitBlockListResponse) VersionID() string { + return bbcblr.rawResponse.Header.Get("x-ms-version-id") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bbcblr BlockBlobCommitBlockListResponse) XMsContentCrc64() []byte { + s := bbcblr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlockBlobPutBlobFromURLResponse ... +type BlockBlobPutBlobFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbpbfur BlockBlobPutBlobFromURLResponse) Response() *http.Response { + return bbpbfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbpbfur BlockBlobPutBlobFromURLResponse) StatusCode() int { + return bbpbfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbpbfur BlockBlobPutBlobFromURLResponse) Status() string { + return bbpbfur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbpbfur BlockBlobPutBlobFromURLResponse) ClientRequestID() string { + return bbpbfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbpbfur BlockBlobPutBlobFromURLResponse) ContentMD5() []byte { + s := bbpbfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbpbfur BlockBlobPutBlobFromURLResponse) Date() time.Time { + s := bbpbfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbpbfur BlockBlobPutBlobFromURLResponse) EncryptionKeySha256() string { + return bbpbfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbpbfur BlockBlobPutBlobFromURLResponse) EncryptionScope() string { + return bbpbfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbpbfur BlockBlobPutBlobFromURLResponse) ErrorCode() string { + return bbpbfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bbpbfur BlockBlobPutBlobFromURLResponse) ETag() ETag { + return ETag(bbpbfur.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbpbfur BlockBlobPutBlobFromURLResponse) IsServerEncrypted() string { + return bbpbfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bbpbfur BlockBlobPutBlobFromURLResponse) LastModified() time.Time { + s := bbpbfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bbpbfur BlockBlobPutBlobFromURLResponse) RequestID() string { + return bbpbfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbpbfur BlockBlobPutBlobFromURLResponse) Version() string { + return bbpbfur.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bbpbfur BlockBlobPutBlobFromURLResponse) VersionID() string { + return bbpbfur.rawResponse.Header.Get("x-ms-version-id") +} + +// BlockBlobStageBlockFromURLResponse ... +type BlockBlobStageBlockFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Response() *http.Response { + return bbsbfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbsbfur BlockBlobStageBlockFromURLResponse) StatusCode() int { + return bbsbfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbsbfur BlockBlobStageBlockFromURLResponse) Status() string { + return bbsbfur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ClientRequestID() string { + return bbsbfur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ContentMD5() []byte { + s := bbsbfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Date() time.Time { + s := bbsbfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionKeySha256() string { + return bbsbfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbsbfur BlockBlobStageBlockFromURLResponse) EncryptionScope() string { + return bbsbfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbsbfur BlockBlobStageBlockFromURLResponse) ErrorCode() string { + return bbsbfur.rawResponse.Header.Get("x-ms-error-code") +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbsbfur BlockBlobStageBlockFromURLResponse) IsServerEncrypted() string { + return bbsbfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// RequestID returns the value for header x-ms-request-id. +func (bbsbfur BlockBlobStageBlockFromURLResponse) RequestID() string { + return bbsbfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbsbfur BlockBlobStageBlockFromURLResponse) Version() string { + return bbsbfur.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bbsbfur BlockBlobStageBlockFromURLResponse) XMsContentCrc64() []byte { + s := bbsbfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlockBlobStageBlockResponse ... +type BlockBlobStageBlockResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbsbr BlockBlobStageBlockResponse) Response() *http.Response { + return bbsbr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbsbr BlockBlobStageBlockResponse) StatusCode() int { + return bbsbr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbsbr BlockBlobStageBlockResponse) Status() string { + return bbsbr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbsbr BlockBlobStageBlockResponse) ClientRequestID() string { + return bbsbr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbsbr BlockBlobStageBlockResponse) ContentMD5() []byte { + s := bbsbr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbsbr BlockBlobStageBlockResponse) Date() time.Time { + s := bbsbr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbsbr BlockBlobStageBlockResponse) EncryptionKeySha256() string { + return bbsbr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbsbr BlockBlobStageBlockResponse) EncryptionScope() string { + return bbsbr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbsbr BlockBlobStageBlockResponse) ErrorCode() string { + return bbsbr.rawResponse.Header.Get("x-ms-error-code") +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbsbr BlockBlobStageBlockResponse) IsServerEncrypted() string { + return bbsbr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// RequestID returns the value for header x-ms-request-id. +func (bbsbr BlockBlobStageBlockResponse) RequestID() string { + return bbsbr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbsbr BlockBlobStageBlockResponse) Version() string { + return bbsbr.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (bbsbr BlockBlobStageBlockResponse) XMsContentCrc64() []byte { + s := bbsbr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlockBlobUploadResponse ... +type BlockBlobUploadResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (bbur BlockBlobUploadResponse) Response() *http.Response { + return bbur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bbur BlockBlobUploadResponse) StatusCode() int { + return bbur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bbur BlockBlobUploadResponse) Status() string { + return bbur.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bbur BlockBlobUploadResponse) ClientRequestID() string { + return bbur.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (bbur BlockBlobUploadResponse) ContentMD5() []byte { + s := bbur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (bbur BlockBlobUploadResponse) Date() time.Time { + s := bbur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (bbur BlockBlobUploadResponse) EncryptionKeySha256() string { + return bbur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (bbur BlockBlobUploadResponse) EncryptionScope() string { + return bbur.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bbur BlockBlobUploadResponse) ErrorCode() string { + return bbur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bbur BlockBlobUploadResponse) ETag() ETag { + return ETag(bbur.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (bbur BlockBlobUploadResponse) IsServerEncrypted() string { + return bbur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (bbur BlockBlobUploadResponse) LastModified() time.Time { + s := bbur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bbur BlockBlobUploadResponse) RequestID() string { + return bbur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bbur BlockBlobUploadResponse) Version() string { + return bbur.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (bbur BlockBlobUploadResponse) VersionID() string { + return bbur.rawResponse.Header.Get("x-ms-version-id") +} + +// BlockList ... +type BlockList struct { + rawResponse *http.Response + CommittedBlocks []Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []Block `xml:"UncommittedBlocks>Block"` +} + +// Response returns the raw HTTP response object. +func (bl BlockList) Response() *http.Response { + return bl.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (bl BlockList) StatusCode() int { + return bl.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (bl BlockList) Status() string { + return bl.rawResponse.Status +} + +// BlobContentLength returns the value for header x-ms-blob-content-length. +func (bl BlockList) BlobContentLength() int64 { + s := bl.rawResponse.Header.Get("x-ms-blob-content-length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (bl BlockList) ClientRequestID() string { + return bl.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentType returns the value for header Content-Type. +func (bl BlockList) ContentType() string { + return bl.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (bl BlockList) Date() time.Time { + s := bl.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (bl BlockList) ErrorCode() string { + return bl.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (bl BlockList) ETag() ETag { + return ETag(bl.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (bl BlockList) LastModified() time.Time { + s := bl.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (bl BlockList) RequestID() string { + return bl.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (bl BlockList) Version() string { + return bl.rawResponse.Header.Get("x-ms-version") +} + +// BlockLookupList ... +type BlockLookupList struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"BlockList"` + Committed []string `xml:"Committed"` + Uncommitted []string `xml:"Uncommitted"` + Latest []string `xml:"Latest"` +} + +// ClearRange ... +type ClearRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +// ContainerAcquireLeaseResponse ... +type ContainerAcquireLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (calr ContainerAcquireLeaseResponse) Response() *http.Response { + return calr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (calr ContainerAcquireLeaseResponse) StatusCode() int { + return calr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (calr ContainerAcquireLeaseResponse) Status() string { + return calr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (calr ContainerAcquireLeaseResponse) ClientRequestID() string { + return calr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (calr ContainerAcquireLeaseResponse) Date() time.Time { + s := calr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (calr ContainerAcquireLeaseResponse) ErrorCode() string { + return calr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (calr ContainerAcquireLeaseResponse) ETag() ETag { + return ETag(calr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (calr ContainerAcquireLeaseResponse) LastModified() time.Time { + s := calr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (calr ContainerAcquireLeaseResponse) LeaseID() string { + return calr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (calr ContainerAcquireLeaseResponse) RequestID() string { + return calr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (calr ContainerAcquireLeaseResponse) Version() string { + return calr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerBreakLeaseResponse ... +type ContainerBreakLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cblr ContainerBreakLeaseResponse) Response() *http.Response { + return cblr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cblr ContainerBreakLeaseResponse) StatusCode() int { + return cblr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cblr ContainerBreakLeaseResponse) Status() string { + return cblr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cblr ContainerBreakLeaseResponse) ClientRequestID() string { + return cblr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (cblr ContainerBreakLeaseResponse) Date() time.Time { + s := cblr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cblr ContainerBreakLeaseResponse) ErrorCode() string { + return cblr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cblr ContainerBreakLeaseResponse) ETag() ETag { + return ETag(cblr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (cblr ContainerBreakLeaseResponse) LastModified() time.Time { + s := cblr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseTime returns the value for header x-ms-lease-time. +func (cblr ContainerBreakLeaseResponse) LeaseTime() int32 { + s := cblr.rawResponse.Header.Get("x-ms-lease-time") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// RequestID returns the value for header x-ms-request-id. +func (cblr ContainerBreakLeaseResponse) RequestID() string { + return cblr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cblr ContainerBreakLeaseResponse) Version() string { + return cblr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerChangeLeaseResponse ... +type ContainerChangeLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cclr ContainerChangeLeaseResponse) Response() *http.Response { + return cclr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cclr ContainerChangeLeaseResponse) StatusCode() int { + return cclr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cclr ContainerChangeLeaseResponse) Status() string { + return cclr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cclr ContainerChangeLeaseResponse) ClientRequestID() string { + return cclr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (cclr ContainerChangeLeaseResponse) Date() time.Time { + s := cclr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cclr ContainerChangeLeaseResponse) ErrorCode() string { + return cclr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cclr ContainerChangeLeaseResponse) ETag() ETag { + return ETag(cclr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (cclr ContainerChangeLeaseResponse) LastModified() time.Time { + s := cclr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (cclr ContainerChangeLeaseResponse) LeaseID() string { + return cclr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (cclr ContainerChangeLeaseResponse) RequestID() string { + return cclr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cclr ContainerChangeLeaseResponse) Version() string { + return cclr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerCreateResponse ... +type ContainerCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (ccr ContainerCreateResponse) Response() *http.Response { + return ccr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ccr ContainerCreateResponse) StatusCode() int { + return ccr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ccr ContainerCreateResponse) Status() string { + return ccr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (ccr ContainerCreateResponse) ClientRequestID() string { + return ccr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (ccr ContainerCreateResponse) Date() time.Time { + s := ccr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ccr ContainerCreateResponse) ErrorCode() string { + return ccr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (ccr ContainerCreateResponse) ETag() ETag { + return ETag(ccr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (ccr ContainerCreateResponse) LastModified() time.Time { + s := ccr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (ccr ContainerCreateResponse) RequestID() string { + return ccr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ccr ContainerCreateResponse) Version() string { + return ccr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerDeleteResponse ... +type ContainerDeleteResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cdr ContainerDeleteResponse) Response() *http.Response { + return cdr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cdr ContainerDeleteResponse) StatusCode() int { + return cdr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cdr ContainerDeleteResponse) Status() string { + return cdr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cdr ContainerDeleteResponse) ClientRequestID() string { + return cdr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (cdr ContainerDeleteResponse) Date() time.Time { + s := cdr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cdr ContainerDeleteResponse) ErrorCode() string { + return cdr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (cdr ContainerDeleteResponse) RequestID() string { + return cdr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cdr ContainerDeleteResponse) Version() string { + return cdr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerGetAccountInfoResponse ... +type ContainerGetAccountInfoResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (cgair ContainerGetAccountInfoResponse) Response() *http.Response { + return cgair.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cgair ContainerGetAccountInfoResponse) StatusCode() int { + return cgair.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cgair ContainerGetAccountInfoResponse) Status() string { + return cgair.rawResponse.Status +} + +// AccountKind returns the value for header x-ms-account-kind. +func (cgair ContainerGetAccountInfoResponse) AccountKind() AccountKindType { + return AccountKindType(cgair.rawResponse.Header.Get("x-ms-account-kind")) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cgair ContainerGetAccountInfoResponse) ClientRequestID() string { + return cgair.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (cgair ContainerGetAccountInfoResponse) Date() time.Time { + s := cgair.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cgair ContainerGetAccountInfoResponse) ErrorCode() string { + return cgair.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (cgair ContainerGetAccountInfoResponse) RequestID() string { + return cgair.rawResponse.Header.Get("x-ms-request-id") +} + +// SkuName returns the value for header x-ms-sku-name. +func (cgair ContainerGetAccountInfoResponse) SkuName() SkuNameType { + return SkuNameType(cgair.rawResponse.Header.Get("x-ms-sku-name")) +} + +// Version returns the value for header x-ms-version. +func (cgair ContainerGetAccountInfoResponse) Version() string { + return cgair.rawResponse.Header.Get("x-ms-version") +} + +// ContainerGetPropertiesResponse ... +type ContainerGetPropertiesResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (cgpr ContainerGetPropertiesResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range cgpr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (cgpr ContainerGetPropertiesResponse) Response() *http.Response { + return cgpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (cgpr ContainerGetPropertiesResponse) StatusCode() int { + return cgpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (cgpr ContainerGetPropertiesResponse) Status() string { + return cgpr.rawResponse.Status +} + +// BlobPublicAccess returns the value for header x-ms-blob-public-access. +func (cgpr ContainerGetPropertiesResponse) BlobPublicAccess() PublicAccessType { + return PublicAccessType(cgpr.rawResponse.Header.Get("x-ms-blob-public-access")) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (cgpr ContainerGetPropertiesResponse) ClientRequestID() string { + return cgpr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (cgpr ContainerGetPropertiesResponse) Date() time.Time { + s := cgpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// DefaultEncryptionScope returns the value for header x-ms-default-encryption-scope. +func (cgpr ContainerGetPropertiesResponse) DefaultEncryptionScope() string { + return cgpr.rawResponse.Header.Get("x-ms-default-encryption-scope") +} + +// DenyEncryptionScopeOverride returns the value for header x-ms-deny-encryption-scope-override. +func (cgpr ContainerGetPropertiesResponse) DenyEncryptionScopeOverride() string { + return cgpr.rawResponse.Header.Get("x-ms-deny-encryption-scope-override") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (cgpr ContainerGetPropertiesResponse) ErrorCode() string { + return cgpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (cgpr ContainerGetPropertiesResponse) ETag() ETag { + return ETag(cgpr.rawResponse.Header.Get("ETag")) +} + +// HasImmutabilityPolicy returns the value for header x-ms-has-immutability-policy. +func (cgpr ContainerGetPropertiesResponse) HasImmutabilityPolicy() string { + return cgpr.rawResponse.Header.Get("x-ms-has-immutability-policy") +} + +// HasLegalHold returns the value for header x-ms-has-legal-hold. +func (cgpr ContainerGetPropertiesResponse) HasLegalHold() string { + return cgpr.rawResponse.Header.Get("x-ms-has-legal-hold") +} + +// IsImmutableStorageWithVersioningEnabled returns the value for header x-ms-immutable-storage-with-versioning-enabled. +func (cgpr ContainerGetPropertiesResponse) IsImmutableStorageWithVersioningEnabled() string { + return cgpr.rawResponse.Header.Get("x-ms-immutable-storage-with-versioning-enabled") +} + +// LastModified returns the value for header Last-Modified. +func (cgpr ContainerGetPropertiesResponse) LastModified() time.Time { + s := cgpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (cgpr ContainerGetPropertiesResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(cgpr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (cgpr ContainerGetPropertiesResponse) LeaseState() LeaseStateType { + return LeaseStateType(cgpr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (cgpr ContainerGetPropertiesResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(cgpr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (cgpr ContainerGetPropertiesResponse) RequestID() string { + return cgpr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (cgpr ContainerGetPropertiesResponse) Version() string { + return cgpr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Container"` + Name string `xml:"Name"` + Deleted *bool `xml:"Deleted"` + Version *string `xml:"Version"` + Properties ContainerProperties `xml:"Properties"` + Metadata Metadata `xml:"Metadata"` +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + LastModified time.Time `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + // LeaseStatus - Possible values include: 'LeaseStatusLocked', 'LeaseStatusUnlocked', 'LeaseStatusNone' + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + // LeaseState - Possible values include: 'LeaseStateAvailable', 'LeaseStateLeased', 'LeaseStateExpired', 'LeaseStateBreaking', 'LeaseStateBroken', 'LeaseStateNone' + LeaseState LeaseStateType `xml:"LeaseState"` + // LeaseDuration - Possible values include: 'LeaseDurationInfinite', 'LeaseDurationFixed', 'LeaseDurationNone' + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + // PublicAccess - Possible values include: 'PublicAccessContainer', 'PublicAccessBlob', 'PublicAccessNone' + PublicAccess PublicAccessType `xml:"PublicAccess"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + DeletedTime *time.Time `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + // IsImmutableStorageWithVersioningEnabled - Indicates if version level worm is enabled on this container. + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` +} + +// MarshalXML implements the xml.Marshaler interface for ContainerProperties. +func (cp ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + cp2 := (*containerProperties)(unsafe.Pointer(&cp)) + return e.EncodeElement(*cp2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for ContainerProperties. +func (cp *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + cp2 := (*containerProperties)(unsafe.Pointer(cp)) + return d.DecodeElement(cp2, &start) +} + +// ContainerReleaseLeaseResponse ... +type ContainerReleaseLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crlr ContainerReleaseLeaseResponse) Response() *http.Response { + return crlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crlr ContainerReleaseLeaseResponse) StatusCode() int { + return crlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crlr ContainerReleaseLeaseResponse) Status() string { + return crlr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crlr ContainerReleaseLeaseResponse) ClientRequestID() string { + return crlr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (crlr ContainerReleaseLeaseResponse) Date() time.Time { + s := crlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crlr ContainerReleaseLeaseResponse) ErrorCode() string { + return crlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (crlr ContainerReleaseLeaseResponse) ETag() ETag { + return ETag(crlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (crlr ContainerReleaseLeaseResponse) LastModified() time.Time { + s := crlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (crlr ContainerReleaseLeaseResponse) RequestID() string { + return crlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crlr ContainerReleaseLeaseResponse) Version() string { + return crlr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerRenameResponse ... +type ContainerRenameResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crr ContainerRenameResponse) Response() *http.Response { + return crr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crr ContainerRenameResponse) StatusCode() int { + return crr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crr ContainerRenameResponse) Status() string { + return crr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crr ContainerRenameResponse) ClientRequestID() string { + return crr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (crr ContainerRenameResponse) Date() time.Time { + s := crr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crr ContainerRenameResponse) ErrorCode() string { + return crr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (crr ContainerRenameResponse) RequestID() string { + return crr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crr ContainerRenameResponse) Version() string { + return crr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerRenewLeaseResponse ... +type ContainerRenewLeaseResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crlr ContainerRenewLeaseResponse) Response() *http.Response { + return crlr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crlr ContainerRenewLeaseResponse) StatusCode() int { + return crlr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crlr ContainerRenewLeaseResponse) Status() string { + return crlr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crlr ContainerRenewLeaseResponse) ClientRequestID() string { + return crlr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (crlr ContainerRenewLeaseResponse) Date() time.Time { + s := crlr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crlr ContainerRenewLeaseResponse) ErrorCode() string { + return crlr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (crlr ContainerRenewLeaseResponse) ETag() ETag { + return ETag(crlr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (crlr ContainerRenewLeaseResponse) LastModified() time.Time { + s := crlr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseID returns the value for header x-ms-lease-id. +func (crlr ContainerRenewLeaseResponse) LeaseID() string { + return crlr.rawResponse.Header.Get("x-ms-lease-id") +} + +// RequestID returns the value for header x-ms-request-id. +func (crlr ContainerRenewLeaseResponse) RequestID() string { + return crlr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crlr ContainerRenewLeaseResponse) Version() string { + return crlr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerRestoreResponse ... +type ContainerRestoreResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (crr ContainerRestoreResponse) Response() *http.Response { + return crr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (crr ContainerRestoreResponse) StatusCode() int { + return crr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (crr ContainerRestoreResponse) Status() string { + return crr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (crr ContainerRestoreResponse) ClientRequestID() string { + return crr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (crr ContainerRestoreResponse) Date() time.Time { + s := crr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (crr ContainerRestoreResponse) ErrorCode() string { + return crr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (crr ContainerRestoreResponse) RequestID() string { + return crr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (crr ContainerRestoreResponse) Version() string { + return crr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerSetAccessPolicyResponse ... +type ContainerSetAccessPolicyResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (csapr ContainerSetAccessPolicyResponse) Response() *http.Response { + return csapr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (csapr ContainerSetAccessPolicyResponse) StatusCode() int { + return csapr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (csapr ContainerSetAccessPolicyResponse) Status() string { + return csapr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (csapr ContainerSetAccessPolicyResponse) ClientRequestID() string { + return csapr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (csapr ContainerSetAccessPolicyResponse) Date() time.Time { + s := csapr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (csapr ContainerSetAccessPolicyResponse) ErrorCode() string { + return csapr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (csapr ContainerSetAccessPolicyResponse) ETag() ETag { + return ETag(csapr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (csapr ContainerSetAccessPolicyResponse) LastModified() time.Time { + s := csapr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (csapr ContainerSetAccessPolicyResponse) RequestID() string { + return csapr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (csapr ContainerSetAccessPolicyResponse) Version() string { + return csapr.rawResponse.Header.Get("x-ms-version") +} + +// ContainerSetMetadataResponse ... +type ContainerSetMetadataResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (csmr ContainerSetMetadataResponse) Response() *http.Response { + return csmr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (csmr ContainerSetMetadataResponse) StatusCode() int { + return csmr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (csmr ContainerSetMetadataResponse) Status() string { + return csmr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (csmr ContainerSetMetadataResponse) ClientRequestID() string { + return csmr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (csmr ContainerSetMetadataResponse) Date() time.Time { + s := csmr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (csmr ContainerSetMetadataResponse) ErrorCode() string { + return csmr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (csmr ContainerSetMetadataResponse) ETag() ETag { + return ETag(csmr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (csmr ContainerSetMetadataResponse) LastModified() time.Time { + s := csmr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (csmr ContainerSetMetadataResponse) RequestID() string { + return csmr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (csmr ContainerSetMetadataResponse) Version() string { + return csmr.rawResponse.Header.Get("x-ms-version") +} + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access +// resources in another domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain +// (the origin domain) to call APIs in another domain +type CorsRule struct { + // AllowedOrigins - The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS. + AllowedOrigins string `xml:"AllowedOrigins"` + // AllowedMethods - The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods string `xml:"AllowedMethods"` + // AllowedHeaders - the request headers that the origin domain may specify on the CORS request. + AllowedHeaders string `xml:"AllowedHeaders"` + // ExposedHeaders - The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer + ExposedHeaders string `xml:"ExposedHeaders"` + // MaxAgeInSeconds - The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds int32 `xml:"MaxAgeInSeconds"` +} + +// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is +// delimited text formatted. +type DelimitedTextConfiguration struct { + // ColumnSeparator - The string used to separate columns. + ColumnSeparator *string `xml:"ColumnSeparator"` + // FieldQuote - The string used to quote a specific field. + FieldQuote *string `xml:"FieldQuote"` + // RecordSeparator - The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` + // EscapeChar - The string used as an escape character. + EscapeChar *string `xml:"EscapeChar"` + // HeadersPresent - Represents whether the data has headers. + HeadersPresent *bool `xml:"HasHeaders"` +} + +// downloadResponse - Wraps the response from the blobClient.Download method. +type downloadResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (dr downloadResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range dr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (dr downloadResponse) Response() *http.Response { + return dr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (dr downloadResponse) StatusCode() int { + return dr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (dr downloadResponse) Status() string { + return dr.rawResponse.Status +} + +// Body returns the raw HTTP response object's Body. +func (dr downloadResponse) Body() io.ReadCloser { + return dr.rawResponse.Body +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (dr downloadResponse) AcceptRanges() string { + return dr.rawResponse.Header.Get("Accept-Ranges") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (dr downloadResponse) BlobCommittedBlockCount() int32 { + s := dr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (dr downloadResponse) BlobContentMD5() []byte { + s := dr.rawResponse.Header.Get("x-ms-blob-content-md5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (dr downloadResponse) BlobSequenceNumber() int64 { + s := dr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (dr downloadResponse) BlobType() BlobType { + return BlobType(dr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (dr downloadResponse) CacheControl() string { + return dr.rawResponse.Header.Get("Cache-Control") +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (dr downloadResponse) ClientRequestID() string { + return dr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentCrc64 returns the value for header x-ms-content-crc64. +func (dr downloadResponse) ContentCrc64() []byte { + s := dr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentDisposition returns the value for header Content-Disposition. +func (dr downloadResponse) ContentDisposition() string { + return dr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (dr downloadResponse) ContentEncoding() string { + return dr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (dr downloadResponse) ContentLanguage() string { + return dr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (dr downloadResponse) ContentLength() int64 { + s := dr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (dr downloadResponse) ContentMD5() []byte { + s := dr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentRange returns the value for header Content-Range. +func (dr downloadResponse) ContentRange() string { + return dr.rawResponse.Header.Get("Content-Range") +} + +// ContentType returns the value for header Content-Type. +func (dr downloadResponse) ContentType() string { + return dr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (dr downloadResponse) CopyCompletionTime() time.Time { + s := dr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (dr downloadResponse) CopyID() string { + return dr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (dr downloadResponse) CopyProgress() string { + return dr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (dr downloadResponse) CopySource() string { + return dr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (dr downloadResponse) CopyStatus() CopyStatusType { + return CopyStatusType(dr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (dr downloadResponse) CopyStatusDescription() string { + return dr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// Date returns the value for header Date. +func (dr downloadResponse) Date() time.Time { + s := dr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (dr downloadResponse) EncryptionKeySha256() string { + return dr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (dr downloadResponse) EncryptionScope() string { + return dr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (dr downloadResponse) ErrorCode() string { + return dr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (dr downloadResponse) ETag() ETag { + return ETag(dr.rawResponse.Header.Get("ETag")) +} + +// ImmutabilityPolicyExpiresOn returns the value for header x-ms-immutability-policy-until-date. +func (dr downloadResponse) ImmutabilityPolicyExpiresOn() time.Time { + s := dr.rawResponse.Header.Get("x-ms-immutability-policy-until-date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ImmutabilityPolicyMode returns the value for header x-ms-immutability-policy-mode. +func (dr downloadResponse) ImmutabilityPolicyMode() string { + return string(dr.rawResponse.Header.Get("x-ms-immutability-policy-mode")) +} + +// IsCurrentVersion returns the value for header x-ms-is-current-version. +func (dr downloadResponse) IsCurrentVersion() string { + return dr.rawResponse.Header.Get("x-ms-is-current-version") +} + +// IsSealed returns the value for header x-ms-blob-sealed. +func (dr downloadResponse) IsSealed() string { + return dr.rawResponse.Header.Get("x-ms-blob-sealed") +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (dr downloadResponse) IsServerEncrypted() string { + return dr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastAccessed returns the value for header x-ms-last-access-time. +func (dr downloadResponse) LastAccessed() time.Time { + s := dr.rawResponse.Header.Get("x-ms-last-access-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LastModified returns the value for header Last-Modified. +func (dr downloadResponse) LastModified() time.Time { + s := dr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (dr downloadResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(dr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (dr downloadResponse) LeaseState() LeaseStateType { + return LeaseStateType(dr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (dr downloadResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(dr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// LegalHold returns the value for header x-ms-legal-hold. +func (dr downloadResponse) LegalHold() string { + return dr.rawResponse.Header.Get("x-ms-legal-hold") +} + +// ObjectReplicationPolicyID returns the value for header x-ms-or-policy-id. +func (dr downloadResponse) ObjectReplicationPolicyID() string { + return dr.rawResponse.Header.Get("x-ms-or-policy-id") +} + +// ObjectReplicationRules returns the value for header x-ms-or. +func (dr downloadResponse) ObjectReplicationRules() string { + return dr.rawResponse.Header.Get("x-ms-or") +} + +// RequestID returns the value for header x-ms-request-id. +func (dr downloadResponse) RequestID() string { + return dr.rawResponse.Header.Get("x-ms-request-id") +} + +// TagCount returns the value for header x-ms-tag-count. +func (dr downloadResponse) TagCount() int64 { + s := dr.rawResponse.Header.Get("x-ms-tag-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// Version returns the value for header x-ms-version. +func (dr downloadResponse) Version() string { + return dr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (dr downloadResponse) VersionID() string { + return dr.rawResponse.Header.Get("x-ms-version-id") +} + +// FilterBlobItem - Blob info from a Filter Blobs API call +type FilterBlobItem struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Blob"` + Name string `xml:"Name"` + ContainerName string `xml:"ContainerName"` + Tags *BlobTags `xml:"Tags"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + Where string `xml:"Where"` + Blobs []FilterBlobItem `xml:"Blobs>Blob"` + NextMarker *string `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (fbs FilterBlobSegment) Response() *http.Response { + return fbs.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (fbs FilterBlobSegment) StatusCode() int { + return fbs.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (fbs FilterBlobSegment) Status() string { + return fbs.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (fbs FilterBlobSegment) ClientRequestID() string { + return fbs.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (fbs FilterBlobSegment) Date() time.Time { + s := fbs.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (fbs FilterBlobSegment) ErrorCode() string { + return fbs.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (fbs FilterBlobSegment) RequestID() string { + return fbs.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (fbs FilterBlobSegment) Version() string { + return fbs.rawResponse.Header.Get("x-ms-version") +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // Status - The status of the secondary location. Possible values include: 'GeoReplicationStatusLive', 'GeoReplicationStatusBootstrap', 'GeoReplicationStatusUnavailable', 'GeoReplicationStatusNone' + Status GeoReplicationStatusType `xml:"Status"` + // LastSyncTime - A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary. Primary writes after this point in time may or may not be available for reads. + LastSyncTime time.Time `xml:"LastSyncTime"` +} + +// MarshalXML implements the xml.Marshaler interface for GeoReplication. +func (gr GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + gr2 := (*geoReplication)(unsafe.Pointer(&gr)) + return e.EncodeElement(*gr2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for GeoReplication. +func (gr *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + gr2 := (*geoReplication)(unsafe.Pointer(gr)) + return d.DecodeElement(gr2, &start) +} + +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"JsonTextConfiguration"` + // RecordSeparator - The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// KeyInfo - Key information +type KeyInfo struct { + // Start - The date-time the key is active in ISO 8601 UTC time + Start string `xml:"Start"` + // Expiry - The date-time the key expires in ISO 8601 UTC time + Expiry string `xml:"Expiry"` +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + ContainerName string `xml:"ContainerName,attr"` + Prefix *string `xml:"Prefix"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + Segment BlobFlatListSegment `xml:"Blobs"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lbfsr ListBlobsFlatSegmentResponse) Response() *http.Response { + return lbfsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lbfsr ListBlobsFlatSegmentResponse) StatusCode() int { + return lbfsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lbfsr ListBlobsFlatSegmentResponse) Status() string { + return lbfsr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (lbfsr ListBlobsFlatSegmentResponse) ClientRequestID() string { + return lbfsr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentType returns the value for header Content-Type. +func (lbfsr ListBlobsFlatSegmentResponse) ContentType() string { + return lbfsr.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (lbfsr ListBlobsFlatSegmentResponse) Date() time.Time { + s := lbfsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lbfsr ListBlobsFlatSegmentResponse) ErrorCode() string { + return lbfsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lbfsr ListBlobsFlatSegmentResponse) RequestID() string { + return lbfsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lbfsr ListBlobsFlatSegmentResponse) Version() string { + return lbfsr.rawResponse.Header.Get("x-ms-version") +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + ContainerName string `xml:"ContainerName,attr"` + Prefix *string `xml:"Prefix"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + Delimiter *string `xml:"Delimiter"` + Segment BlobHierarchyListSegment `xml:"Blobs"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lbhsr ListBlobsHierarchySegmentResponse) Response() *http.Response { + return lbhsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lbhsr ListBlobsHierarchySegmentResponse) StatusCode() int { + return lbhsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lbhsr ListBlobsHierarchySegmentResponse) Status() string { + return lbhsr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (lbhsr ListBlobsHierarchySegmentResponse) ClientRequestID() string { + return lbhsr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentType returns the value for header Content-Type. +func (lbhsr ListBlobsHierarchySegmentResponse) ContentType() string { + return lbhsr.rawResponse.Header.Get("Content-Type") +} + +// Date returns the value for header Date. +func (lbhsr ListBlobsHierarchySegmentResponse) Date() time.Time { + s := lbhsr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lbhsr ListBlobsHierarchySegmentResponse) ErrorCode() string { + return lbhsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lbhsr ListBlobsHierarchySegmentResponse) RequestID() string { + return lbhsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lbhsr ListBlobsHierarchySegmentResponse) Version() string { + return lbhsr.rawResponse.Header.Get("x-ms-version") +} + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse struct { + rawResponse *http.Response + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"EnumerationResults"` + ServiceEndpoint string `xml:"ServiceEndpoint,attr"` + Prefix *string `xml:"Prefix"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + ContainerItems []ContainerItem `xml:"Containers>Container"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (lcsr ListContainersSegmentResponse) Response() *http.Response { + return lcsr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (lcsr ListContainersSegmentResponse) StatusCode() int { + return lcsr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (lcsr ListContainersSegmentResponse) Status() string { + return lcsr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (lcsr ListContainersSegmentResponse) ClientRequestID() string { + return lcsr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (lcsr ListContainersSegmentResponse) ErrorCode() string { + return lcsr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (lcsr ListContainersSegmentResponse) RequestID() string { + return lcsr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (lcsr ListContainersSegmentResponse) Version() string { + return lcsr.rawResponse.Header.Get("x-ms-version") +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // Version - The version of Storage Analytics to configure. + Version string `xml:"Version"` + // Delete - Indicates whether all delete requests should be logged. + Delete bool `xml:"Delete"` + // Read - Indicates whether all read requests should be logged. + Read bool `xml:"Read"` + // Write - Indicates whether all write requests should be logged. + Write bool `xml:"Write"` + RetentionPolicy RetentionPolicy `xml:"RetentionPolicy"` +} + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // Version - The version of Storage Analytics to configure. + Version *string `xml:"Version"` + // Enabled - Indicates whether metrics are enabled for the Blob service. + Enabled bool `xml:"Enabled"` + // IncludeAPIs - Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` +} + +// PageBlobClearPagesResponse ... +type PageBlobClearPagesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcpr PageBlobClearPagesResponse) Response() *http.Response { + return pbcpr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcpr PageBlobClearPagesResponse) StatusCode() int { + return pbcpr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcpr PageBlobClearPagesResponse) Status() string { + return pbcpr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbcpr PageBlobClearPagesResponse) BlobSequenceNumber() int64 { + s := pbcpr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbcpr PageBlobClearPagesResponse) ClientRequestID() string { + return pbcpr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbcpr PageBlobClearPagesResponse) ContentMD5() []byte { + s := pbcpr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbcpr PageBlobClearPagesResponse) Date() time.Time { + s := pbcpr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcpr PageBlobClearPagesResponse) ErrorCode() string { + return pbcpr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcpr PageBlobClearPagesResponse) ETag() ETag { + return ETag(pbcpr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbcpr PageBlobClearPagesResponse) LastModified() time.Time { + s := pbcpr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcpr PageBlobClearPagesResponse) RequestID() string { + return pbcpr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcpr PageBlobClearPagesResponse) Version() string { + return pbcpr.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (pbcpr PageBlobClearPagesResponse) XMsContentCrc64() []byte { + s := pbcpr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// PageBlobCopyIncrementalResponse ... +type PageBlobCopyIncrementalResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcir PageBlobCopyIncrementalResponse) Response() *http.Response { + return pbcir.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcir PageBlobCopyIncrementalResponse) StatusCode() int { + return pbcir.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcir PageBlobCopyIncrementalResponse) Status() string { + return pbcir.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbcir PageBlobCopyIncrementalResponse) ClientRequestID() string { + return pbcir.rawResponse.Header.Get("x-ms-client-request-id") +} + +// CopyID returns the value for header x-ms-copy-id. +func (pbcir PageBlobCopyIncrementalResponse) CopyID() string { + return pbcir.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (pbcir PageBlobCopyIncrementalResponse) CopyStatus() CopyStatusType { + return CopyStatusType(pbcir.rawResponse.Header.Get("x-ms-copy-status")) +} + +// Date returns the value for header Date. +func (pbcir PageBlobCopyIncrementalResponse) Date() time.Time { + s := pbcir.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcir PageBlobCopyIncrementalResponse) ErrorCode() string { + return pbcir.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcir PageBlobCopyIncrementalResponse) ETag() ETag { + return ETag(pbcir.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbcir PageBlobCopyIncrementalResponse) LastModified() time.Time { + s := pbcir.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcir PageBlobCopyIncrementalResponse) RequestID() string { + return pbcir.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcir PageBlobCopyIncrementalResponse) Version() string { + return pbcir.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobCreateResponse ... +type PageBlobCreateResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbcr PageBlobCreateResponse) Response() *http.Response { + return pbcr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbcr PageBlobCreateResponse) StatusCode() int { + return pbcr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbcr PageBlobCreateResponse) Status() string { + return pbcr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbcr PageBlobCreateResponse) ClientRequestID() string { + return pbcr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbcr PageBlobCreateResponse) ContentMD5() []byte { + s := pbcr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbcr PageBlobCreateResponse) Date() time.Time { + s := pbcr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (pbcr PageBlobCreateResponse) EncryptionKeySha256() string { + return pbcr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (pbcr PageBlobCreateResponse) EncryptionScope() string { + return pbcr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbcr PageBlobCreateResponse) ErrorCode() string { + return pbcr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbcr PageBlobCreateResponse) ETag() ETag { + return ETag(pbcr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (pbcr PageBlobCreateResponse) IsServerEncrypted() string { + return pbcr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (pbcr PageBlobCreateResponse) LastModified() time.Time { + s := pbcr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbcr PageBlobCreateResponse) RequestID() string { + return pbcr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbcr PageBlobCreateResponse) Version() string { + return pbcr.rawResponse.Header.Get("x-ms-version") +} + +// VersionID returns the value for header x-ms-version-id. +func (pbcr PageBlobCreateResponse) VersionID() string { + return pbcr.rawResponse.Header.Get("x-ms-version-id") +} + +// PageBlobResizeResponse ... +type PageBlobResizeResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbrr PageBlobResizeResponse) Response() *http.Response { + return pbrr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbrr PageBlobResizeResponse) StatusCode() int { + return pbrr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbrr PageBlobResizeResponse) Status() string { + return pbrr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbrr PageBlobResizeResponse) BlobSequenceNumber() int64 { + s := pbrr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbrr PageBlobResizeResponse) ClientRequestID() string { + return pbrr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (pbrr PageBlobResizeResponse) Date() time.Time { + s := pbrr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbrr PageBlobResizeResponse) ErrorCode() string { + return pbrr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbrr PageBlobResizeResponse) ETag() ETag { + return ETag(pbrr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbrr PageBlobResizeResponse) LastModified() time.Time { + s := pbrr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbrr PageBlobResizeResponse) RequestID() string { + return pbrr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbrr PageBlobResizeResponse) Version() string { + return pbrr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobUpdateSequenceNumberResponse ... +type PageBlobUpdateSequenceNumberResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Response() *http.Response { + return pbusnr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbusnr PageBlobUpdateSequenceNumberResponse) StatusCode() int { + return pbusnr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbusnr PageBlobUpdateSequenceNumberResponse) Status() string { + return pbusnr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbusnr PageBlobUpdateSequenceNumberResponse) BlobSequenceNumber() int64 { + s := pbusnr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ClientRequestID() string { + return pbusnr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Date() time.Time { + s := pbusnr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ErrorCode() string { + return pbusnr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbusnr PageBlobUpdateSequenceNumberResponse) ETag() ETag { + return ETag(pbusnr.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pbusnr PageBlobUpdateSequenceNumberResponse) LastModified() time.Time { + s := pbusnr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbusnr PageBlobUpdateSequenceNumberResponse) RequestID() string { + return pbusnr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbusnr PageBlobUpdateSequenceNumberResponse) Version() string { + return pbusnr.rawResponse.Header.Get("x-ms-version") +} + +// PageBlobUploadPagesFromURLResponse ... +type PageBlobUploadPagesFromURLResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbupfur PageBlobUploadPagesFromURLResponse) Response() *http.Response { + return pbupfur.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbupfur PageBlobUploadPagesFromURLResponse) StatusCode() int { + return pbupfur.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbupfur PageBlobUploadPagesFromURLResponse) Status() string { + return pbupfur.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbupfur PageBlobUploadPagesFromURLResponse) BlobSequenceNumber() int64 { + s := pbupfur.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbupfur PageBlobUploadPagesFromURLResponse) ContentMD5() []byte { + s := pbupfur.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbupfur PageBlobUploadPagesFromURLResponse) Date() time.Time { + s := pbupfur.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionKeySha256() string { + return pbupfur.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (pbupfur PageBlobUploadPagesFromURLResponse) EncryptionScope() string { + return pbupfur.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbupfur PageBlobUploadPagesFromURLResponse) ErrorCode() string { + return pbupfur.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbupfur PageBlobUploadPagesFromURLResponse) ETag() ETag { + return ETag(pbupfur.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (pbupfur PageBlobUploadPagesFromURLResponse) IsServerEncrypted() string { + return pbupfur.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (pbupfur PageBlobUploadPagesFromURLResponse) LastModified() time.Time { + s := pbupfur.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbupfur PageBlobUploadPagesFromURLResponse) RequestID() string { + return pbupfur.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbupfur PageBlobUploadPagesFromURLResponse) Version() string { + return pbupfur.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (pbupfur PageBlobUploadPagesFromURLResponse) XMsContentCrc64() []byte { + s := pbupfur.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// PageBlobUploadPagesResponse ... +type PageBlobUploadPagesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (pbupr PageBlobUploadPagesResponse) Response() *http.Response { + return pbupr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pbupr PageBlobUploadPagesResponse) StatusCode() int { + return pbupr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pbupr PageBlobUploadPagesResponse) Status() string { + return pbupr.rawResponse.Status +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (pbupr PageBlobUploadPagesResponse) BlobSequenceNumber() int64 { + s := pbupr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pbupr PageBlobUploadPagesResponse) ClientRequestID() string { + return pbupr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentMD5 returns the value for header Content-MD5. +func (pbupr PageBlobUploadPagesResponse) ContentMD5() []byte { + s := pbupr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// Date returns the value for header Date. +func (pbupr PageBlobUploadPagesResponse) Date() time.Time { + s := pbupr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (pbupr PageBlobUploadPagesResponse) EncryptionKeySha256() string { + return pbupr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (pbupr PageBlobUploadPagesResponse) EncryptionScope() string { + return pbupr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pbupr PageBlobUploadPagesResponse) ErrorCode() string { + return pbupr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pbupr PageBlobUploadPagesResponse) ETag() ETag { + return ETag(pbupr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-request-server-encrypted. +func (pbupr PageBlobUploadPagesResponse) IsServerEncrypted() string { + return pbupr.rawResponse.Header.Get("x-ms-request-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (pbupr PageBlobUploadPagesResponse) LastModified() time.Time { + s := pbupr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pbupr PageBlobUploadPagesResponse) RequestID() string { + return pbupr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pbupr PageBlobUploadPagesResponse) Version() string { + return pbupr.rawResponse.Header.Get("x-ms-version") +} + +// XMsContentCrc64 returns the value for header x-ms-content-crc64. +func (pbupr PageBlobUploadPagesResponse) XMsContentCrc64() []byte { + s := pbupr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// PageList - the list of pages +type PageList struct { + rawResponse *http.Response + PageRange []PageRange `xml:"PageRange"` + ClearRange []ClearRange `xml:"ClearRange"` + NextMarker Marker `xml:"NextMarker"` +} + +// Response returns the raw HTTP response object. +func (pl PageList) Response() *http.Response { + return pl.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (pl PageList) StatusCode() int { + return pl.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (pl PageList) Status() string { + return pl.rawResponse.Status +} + +// BlobContentLength returns the value for header x-ms-blob-content-length. +func (pl PageList) BlobContentLength() int64 { + s := pl.rawResponse.Header.Get("x-ms-blob-content-length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (pl PageList) ClientRequestID() string { + return pl.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (pl PageList) Date() time.Time { + s := pl.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (pl PageList) ErrorCode() string { + return pl.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (pl PageList) ETag() ETag { + return ETag(pl.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (pl PageList) LastModified() time.Time { + s := pl.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (pl PageList) RequestID() string { + return pl.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (pl PageList) Version() string { + return pl.rawResponse.Header.Get("x-ms-version") +} + +// PageRange ... +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +// QueryFormat ... +type QueryFormat struct { + // Type - Possible values include: 'QueryFormatDelimited', 'QueryFormatJSON', 'QueryFormatArrow', 'QueryFormatParquet', 'QueryFormatNone' + Type QueryFormatType `xml:"Type"` + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` + ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` + ParquetTextConfiguration map[string]interface{} `xml:"ParquetTextConfiguration"` +} + +// QueryRequest - Groups the set of query request settings. +type QueryRequest struct { + // QueryType - Required. The type of the provided query expression. + QueryType string `xml:"QueryType"` + // Expression - The query expression in SQL. The maximum size of the query expression is 256KiB. + Expression string `xml:"Expression"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +// QueryResponse - Wraps the response from the blobClient.Query method. +type QueryResponse struct { + rawResponse *http.Response +} + +// NewMetadata returns user-defined key/value pairs. +func (qr QueryResponse) NewMetadata() Metadata { + md := Metadata{} + for k, v := range qr.rawResponse.Header { + if len(k) > mdPrefixLen { + if prefix := k[0:mdPrefixLen]; strings.EqualFold(prefix, mdPrefix) { + md[strings.ToLower(k[mdPrefixLen:])] = v[0] + } + } + } + return md +} + +// Response returns the raw HTTP response object. +func (qr QueryResponse) Response() *http.Response { + return qr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (qr QueryResponse) StatusCode() int { + return qr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (qr QueryResponse) Status() string { + return qr.rawResponse.Status +} + +// Body returns the raw HTTP response object's Body. +func (qr QueryResponse) Body() io.ReadCloser { + return qr.rawResponse.Body +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (qr QueryResponse) AcceptRanges() string { + return qr.rawResponse.Header.Get("Accept-Ranges") +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (qr QueryResponse) BlobCommittedBlockCount() int32 { + s := qr.rawResponse.Header.Get("x-ms-blob-committed-block-count") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + i = 0 + } + return int32(i) +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (qr QueryResponse) BlobContentMD5() []byte { + s := qr.rawResponse.Header.Get("x-ms-blob-content-md5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (qr QueryResponse) BlobSequenceNumber() int64 { + s := qr.rawResponse.Header.Get("x-ms-blob-sequence-number") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// BlobType returns the value for header x-ms-blob-type. +func (qr QueryResponse) BlobType() BlobType { + return BlobType(qr.rawResponse.Header.Get("x-ms-blob-type")) +} + +// CacheControl returns the value for header Cache-Control. +func (qr QueryResponse) CacheControl() string { + return qr.rawResponse.Header.Get("Cache-Control") +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (qr QueryResponse) ClientRequestID() string { + return qr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ContentCrc64 returns the value for header x-ms-content-crc64. +func (qr QueryResponse) ContentCrc64() []byte { + s := qr.rawResponse.Header.Get("x-ms-content-crc64") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentDisposition returns the value for header Content-Disposition. +func (qr QueryResponse) ContentDisposition() string { + return qr.rawResponse.Header.Get("Content-Disposition") +} + +// ContentEncoding returns the value for header Content-Encoding. +func (qr QueryResponse) ContentEncoding() string { + return qr.rawResponse.Header.Get("Content-Encoding") +} + +// ContentLanguage returns the value for header Content-Language. +func (qr QueryResponse) ContentLanguage() string { + return qr.rawResponse.Header.Get("Content-Language") +} + +// ContentLength returns the value for header Content-Length. +func (qr QueryResponse) ContentLength() int64 { + s := qr.rawResponse.Header.Get("Content-Length") + if s == "" { + return -1 + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + i = 0 + } + return i +} + +// ContentMD5 returns the value for header Content-MD5. +func (qr QueryResponse) ContentMD5() []byte { + s := qr.rawResponse.Header.Get("Content-MD5") + if s == "" { + return nil + } + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b = nil + } + return b +} + +// ContentRange returns the value for header Content-Range. +func (qr QueryResponse) ContentRange() string { + return qr.rawResponse.Header.Get("Content-Range") +} + +// ContentType returns the value for header Content-Type. +func (qr QueryResponse) ContentType() string { + return qr.rawResponse.Header.Get("Content-Type") +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (qr QueryResponse) CopyCompletionTime() time.Time { + s := qr.rawResponse.Header.Get("x-ms-copy-completion-time") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// CopyID returns the value for header x-ms-copy-id. +func (qr QueryResponse) CopyID() string { + return qr.rawResponse.Header.Get("x-ms-copy-id") +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (qr QueryResponse) CopyProgress() string { + return qr.rawResponse.Header.Get("x-ms-copy-progress") +} + +// CopySource returns the value for header x-ms-copy-source. +func (qr QueryResponse) CopySource() string { + return qr.rawResponse.Header.Get("x-ms-copy-source") +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (qr QueryResponse) CopyStatus() CopyStatusType { + return CopyStatusType(qr.rawResponse.Header.Get("x-ms-copy-status")) +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (qr QueryResponse) CopyStatusDescription() string { + return qr.rawResponse.Header.Get("x-ms-copy-status-description") +} + +// Date returns the value for header Date. +func (qr QueryResponse) Date() time.Time { + s := qr.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// EncryptionKeySha256 returns the value for header x-ms-encryption-key-sha256. +func (qr QueryResponse) EncryptionKeySha256() string { + return qr.rawResponse.Header.Get("x-ms-encryption-key-sha256") +} + +// EncryptionScope returns the value for header x-ms-encryption-scope. +func (qr QueryResponse) EncryptionScope() string { + return qr.rawResponse.Header.Get("x-ms-encryption-scope") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (qr QueryResponse) ErrorCode() string { + return qr.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (qr QueryResponse) ETag() ETag { + return ETag(qr.rawResponse.Header.Get("ETag")) +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (qr QueryResponse) IsServerEncrypted() string { + return qr.rawResponse.Header.Get("x-ms-server-encrypted") +} + +// LastModified returns the value for header Last-Modified. +func (qr QueryResponse) LastModified() time.Time { + s := qr.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (qr QueryResponse) LeaseDuration() LeaseDurationType { + return LeaseDurationType(qr.rawResponse.Header.Get("x-ms-lease-duration")) +} + +// LeaseState returns the value for header x-ms-lease-state. +func (qr QueryResponse) LeaseState() LeaseStateType { + return LeaseStateType(qr.rawResponse.Header.Get("x-ms-lease-state")) +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (qr QueryResponse) LeaseStatus() LeaseStatusType { + return LeaseStatusType(qr.rawResponse.Header.Get("x-ms-lease-status")) +} + +// RequestID returns the value for header x-ms-request-id. +func (qr QueryResponse) RequestID() string { + return qr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (qr QueryResponse) Version() string { + return qr.rawResponse.Header.Get("x-ms-version") +} + +// QuerySerialization ... +type QuerySerialization struct { + Format QueryFormat `xml:"Format"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // Enabled - Indicates whether a retention policy is enabled for the storage service + Enabled bool `xml:"Enabled"` + // Days - Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted + Days *int32 `xml:"Days"` + // AllowPermanentDelete - Indicates whether permanent delete is allowed on this storage account. + AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` +} + +// ServiceGetAccountInfoResponse ... +type ServiceGetAccountInfoResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (sgair ServiceGetAccountInfoResponse) Response() *http.Response { + return sgair.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sgair ServiceGetAccountInfoResponse) StatusCode() int { + return sgair.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sgair ServiceGetAccountInfoResponse) Status() string { + return sgair.rawResponse.Status +} + +// AccountKind returns the value for header x-ms-account-kind. +func (sgair ServiceGetAccountInfoResponse) AccountKind() AccountKindType { + return AccountKindType(sgair.rawResponse.Header.Get("x-ms-account-kind")) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (sgair ServiceGetAccountInfoResponse) ClientRequestID() string { + return sgair.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (sgair ServiceGetAccountInfoResponse) Date() time.Time { + s := sgair.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sgair ServiceGetAccountInfoResponse) ErrorCode() string { + return sgair.rawResponse.Header.Get("x-ms-error-code") +} + +// IsHierarchicalNamespaceEnabled returns the value for header x-ms-is-hns-enabled. +func (sgair ServiceGetAccountInfoResponse) IsHierarchicalNamespaceEnabled() string { + return sgair.rawResponse.Header.Get("x-ms-is-hns-enabled") +} + +// RequestID returns the value for header x-ms-request-id. +func (sgair ServiceGetAccountInfoResponse) RequestID() string { + return sgair.rawResponse.Header.Get("x-ms-request-id") +} + +// SkuName returns the value for header x-ms-sku-name. +func (sgair ServiceGetAccountInfoResponse) SkuName() SkuNameType { + return SkuNameType(sgair.rawResponse.Header.Get("x-ms-sku-name")) +} + +// Version returns the value for header x-ms-version. +func (sgair ServiceGetAccountInfoResponse) Version() string { + return sgair.rawResponse.Header.Get("x-ms-version") +} + +// ServiceSetPropertiesResponse ... +type ServiceSetPropertiesResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (sspr ServiceSetPropertiesResponse) Response() *http.Response { + return sspr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sspr ServiceSetPropertiesResponse) StatusCode() int { + return sspr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sspr ServiceSetPropertiesResponse) Status() string { + return sspr.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (sspr ServiceSetPropertiesResponse) ClientRequestID() string { + return sspr.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sspr ServiceSetPropertiesResponse) ErrorCode() string { + return sspr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sspr ServiceSetPropertiesResponse) RequestID() string { + return sspr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sspr ServiceSetPropertiesResponse) Version() string { + return sspr.rawResponse.Header.Get("x-ms-version") +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // ID - a unique id + ID string `xml:"Id"` + AccessPolicy AccessPolicy `xml:"AccessPolicy"` +} + +// SignedIdentifiers - Wraps the response from the containerClient.GetAccessPolicy method. +type SignedIdentifiers struct { + rawResponse *http.Response + Items []SignedIdentifier `xml:"SignedIdentifier"` +} + +// Response returns the raw HTTP response object. +func (si SignedIdentifiers) Response() *http.Response { + return si.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (si SignedIdentifiers) StatusCode() int { + return si.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (si SignedIdentifiers) Status() string { + return si.rawResponse.Status +} + +// BlobPublicAccess returns the value for header x-ms-blob-public-access. +func (si SignedIdentifiers) BlobPublicAccess() PublicAccessType { + return PublicAccessType(si.rawResponse.Header.Get("x-ms-blob-public-access")) +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (si SignedIdentifiers) ClientRequestID() string { + return si.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (si SignedIdentifiers) Date() time.Time { + s := si.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (si SignedIdentifiers) ErrorCode() string { + return si.rawResponse.Header.Get("x-ms-error-code") +} + +// ETag returns the value for header ETag. +func (si SignedIdentifiers) ETag() ETag { + return ETag(si.rawResponse.Header.Get("ETag")) +} + +// LastModified returns the value for header Last-Modified. +func (si SignedIdentifiers) LastModified() time.Time { + s := si.rawResponse.Header.Get("Last-Modified") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// RequestID returns the value for header x-ms-request-id. +func (si SignedIdentifiers) RequestID() string { + return si.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (si SignedIdentifiers) Version() string { + return si.rawResponse.Header.Get("x-ms-version") +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // Enabled - Indicates whether this account is hosting a static website + Enabled bool `xml:"Enabled"` + // IndexDocument - The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` + // ErrorDocument404Path - The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + // DefaultIndexDocumentPath - Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` +} + +// // StorageError ... +// type StorageError struct { +// Message *string `xml:"Message"` +// } + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + rawResponse *http.Response + Logging *Logging `xml:"Logging"` + HourMetrics *Metrics `xml:"HourMetrics"` + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + // Cors - The set of CORS rules. + Cors []CorsRule `xml:"Cors>CorsRule"` + // DefaultServiceVersion - The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// Response returns the raw HTTP response object. +func (ssp StorageServiceProperties) Response() *http.Response { + return ssp.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (ssp StorageServiceProperties) StatusCode() int { + return ssp.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (ssp StorageServiceProperties) Status() string { + return ssp.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (ssp StorageServiceProperties) ClientRequestID() string { + return ssp.rawResponse.Header.Get("x-ms-client-request-id") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (ssp StorageServiceProperties) ErrorCode() string { + return ssp.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (ssp StorageServiceProperties) RequestID() string { + return ssp.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (ssp StorageServiceProperties) Version() string { + return ssp.rawResponse.Header.Get("x-ms-version") +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + rawResponse *http.Response + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// Response returns the raw HTTP response object. +func (sss StorageServiceStats) Response() *http.Response { + return sss.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sss StorageServiceStats) StatusCode() int { + return sss.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sss StorageServiceStats) Status() string { + return sss.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (sss StorageServiceStats) ClientRequestID() string { + return sss.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (sss StorageServiceStats) Date() time.Time { + s := sss.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sss StorageServiceStats) ErrorCode() string { + return sss.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sss StorageServiceStats) RequestID() string { + return sss.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sss StorageServiceStats) Version() string { + return sss.rawResponse.Header.Get("x-ms-version") +} + +// SubmitBatchResponse - Wraps the response from the containerClient.SubmitBatch method. +type SubmitBatchResponse struct { + rawResponse *http.Response +} + +// Response returns the raw HTTP response object. +func (sbr SubmitBatchResponse) Response() *http.Response { + return sbr.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (sbr SubmitBatchResponse) StatusCode() int { + return sbr.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (sbr SubmitBatchResponse) Status() string { + return sbr.rawResponse.Status +} + +// Body returns the raw HTTP response object's Body. +func (sbr SubmitBatchResponse) Body() io.ReadCloser { + return sbr.rawResponse.Body +} + +// ContentType returns the value for header Content-Type. +func (sbr SubmitBatchResponse) ContentType() string { + return sbr.rawResponse.Header.Get("Content-Type") +} + +// ErrorCode returns the value for header x-ms-error-code. +func (sbr SubmitBatchResponse) ErrorCode() string { + return sbr.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (sbr SubmitBatchResponse) RequestID() string { + return sbr.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (sbr SubmitBatchResponse) Version() string { + return sbr.rawResponse.Header.Get("x-ms-version") +} + +// UserDelegationKey - A user delegation key +type UserDelegationKey struct { + rawResponse *http.Response + // SignedOid - The Azure Active Directory object ID in GUID format. + SignedOid string `xml:"SignedOid"` + // SignedTid - The Azure Active Directory tenant ID in GUID format + SignedTid string `xml:"SignedTid"` + // SignedStart - The date-time the key is active + SignedStart time.Time `xml:"SignedStart"` + // SignedExpiry - The date-time the key expires + SignedExpiry time.Time `xml:"SignedExpiry"` + // SignedService - Abbreviation of the Azure Storage service that accepts the key + SignedService string `xml:"SignedService"` + // SignedVersion - The service version that created the key + SignedVersion string `xml:"SignedVersion"` + // Value - The key as a base64 string + Value string `xml:"Value"` +} + +// MarshalXML implements the xml.Marshaler interface for UserDelegationKey. +func (udk UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + udk2 := (*userDelegationKey)(unsafe.Pointer(&udk)) + return e.EncodeElement(*udk2, start) +} + +// UnmarshalXML implements the xml.Unmarshaler interface for UserDelegationKey. +func (udk *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + udk2 := (*userDelegationKey)(unsafe.Pointer(udk)) + return d.DecodeElement(udk2, &start) +} + +// Response returns the raw HTTP response object. +func (udk UserDelegationKey) Response() *http.Response { + return udk.rawResponse +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (udk UserDelegationKey) StatusCode() int { + return udk.rawResponse.StatusCode +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (udk UserDelegationKey) Status() string { + return udk.rawResponse.Status +} + +// ClientRequestID returns the value for header x-ms-client-request-id. +func (udk UserDelegationKey) ClientRequestID() string { + return udk.rawResponse.Header.Get("x-ms-client-request-id") +} + +// Date returns the value for header Date. +func (udk UserDelegationKey) Date() time.Time { + s := udk.rawResponse.Header.Get("Date") + if s == "" { + return time.Time{} + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + t = time.Time{} + } + return t +} + +// ErrorCode returns the value for header x-ms-error-code. +func (udk UserDelegationKey) ErrorCode() string { + return udk.rawResponse.Header.Get("x-ms-error-code") +} + +// RequestID returns the value for header x-ms-request-id. +func (udk UserDelegationKey) RequestID() string { + return udk.rawResponse.Header.Get("x-ms-request-id") +} + +// Version returns the value for header x-ms-version. +func (udk UserDelegationKey) Version() string { + return udk.rawResponse.Header.Get("x-ms-version") +} + +func init() { + if reflect.TypeOf((*UserDelegationKey)(nil)).Elem().Size() != reflect.TypeOf((*userDelegationKey)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between UserDelegationKey and userDelegationKey")) + } + if reflect.TypeOf((*AccessPolicy)(nil)).Elem().Size() != reflect.TypeOf((*accessPolicy)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between AccessPolicy and accessPolicy")) + } + if reflect.TypeOf((*BlobPropertiesInternal)(nil)).Elem().Size() != reflect.TypeOf((*blobPropertiesInternal)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between BlobPropertiesInternal and blobPropertiesInternal")) + } + if reflect.TypeOf((*ContainerProperties)(nil)).Elem().Size() != reflect.TypeOf((*containerProperties)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between ContainerProperties and containerProperties")) + } + if reflect.TypeOf((*GeoReplication)(nil)).Elem().Size() != reflect.TypeOf((*geoReplication)(nil)).Elem().Size() { + validateError(errors.New("size mismatch between GeoReplication and geoReplication")) + } +} + +const ( + rfc3339Format = "2006-01-02T15:04:05Z" +) + +// used to convert times from UTC to GMT before sending across the wire +var gmt = time.FixedZone("GMT", 0) + +// internal type used for marshalling time in RFC1123 format +type timeRFC1123 struct { + time.Time +} + +// MarshalText implements the encoding.TextMarshaler interface for timeRFC1123. +func (t timeRFC1123) MarshalText() ([]byte, error) { + return []byte(t.Format(time.RFC1123)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC1123. +func (t *timeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = time.Parse(time.RFC1123, string(data)) + return +} + +// internal type used for marshalling time in RFC3339 format +type timeRFC3339 struct { + time.Time +} + +// MarshalText implements the encoding.TextMarshaler interface for timeRFC3339. +func (t timeRFC3339) MarshalText() ([]byte, error) { + return []byte(t.Format(rfc3339Format)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for timeRFC3339. +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + t.Time, err = time.Parse(rfc3339Format, string(data)) + return +} + +// internal type used for marshalling base64 encoded strings +type base64Encoded struct { + b []byte +} + +// MarshalText implements the encoding.TextMarshaler interface for base64Encoded. +func (c base64Encoded) MarshalText() ([]byte, error) { + return []byte(base64.StdEncoding.EncodeToString(c.b)), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for base64Encoded. +func (c *base64Encoded) UnmarshalText(data []byte) error { + b, err := base64.StdEncoding.DecodeString(string(data)) + if err != nil { + return err + } + c.b = b + return nil +} + +// internal type used for marshalling +type userDelegationKey struct { + rawResponse *http.Response + SignedOid string `xml:"SignedOid"` + SignedTid string `xml:"SignedTid"` + SignedStart timeRFC3339 `xml:"SignedStart"` + SignedExpiry timeRFC3339 `xml:"SignedExpiry"` + SignedService string `xml:"SignedService"` + SignedVersion string `xml:"SignedVersion"` + Value string `xml:"Value"` +} + +// internal type used for marshalling +type accessPolicy struct { + Start *timeRFC3339 `xml:"Start"` + Expiry *timeRFC3339 `xml:"Expiry"` + Permission *string `xml:"Permission"` +} + +// internal type used for marshalling +type blobPropertiesInternal struct { + // XMLName is used for marshalling and is subject to removal in a future release. + XMLName xml.Name `xml:"Properties"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + LastModified timeRFC1123 `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + ContentLength *int64 `xml:"Content-Length"` + ContentType *string `xml:"Content-Type"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + ContentMD5 base64Encoded `xml:"Content-MD5"` + ContentDisposition *string `xml:"Content-Disposition"` + CacheControl *string `xml:"Cache-Control"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType BlobType `xml:"BlobType"` + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + LeaseState LeaseStateType `xml:"LeaseState"` + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + CopyID *string `xml:"CopyId"` + CopyStatus CopyStatusType `xml:"CopyStatus"` + CopySource *string `xml:"CopySource"` + CopyProgress *string `xml:"CopyProgress"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + AccessTier AccessTierType `xml:"AccessTier"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus ArchiveStatusType `xml:"ArchiveStatus"` + CustomerProvidedKeySha256 *string `xml:"CustomerProvidedKeySha256"` + EncryptionScope *string `xml:"EncryptionScope"` + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + TagCount *int32 `xml:"TagCount"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + IsSealed *bool `xml:"Sealed"` + RehydratePriority RehydratePriorityType `xml:"RehydratePriority"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + ImmutabilityPolicyMode BlobImmutabilityPolicyModeType `xml:"ImmutabilityPolicyMode"` + LegalHold *bool `xml:"LegalHold"` + Owner *string `xml:"Owner"` + Group *string `xml:"Group"` + Permissions *string `xml:"Permissions"` + ACL *string `xml:"Acl"` +} + +// internal type used for marshalling +type containerProperties struct { + LastModified timeRFC1123 `xml:"Last-Modified"` + Etag ETag `xml:"Etag"` + LeaseStatus LeaseStatusType `xml:"LeaseStatus"` + LeaseState LeaseStateType `xml:"LeaseState"` + LeaseDuration LeaseDurationType `xml:"LeaseDuration"` + PublicAccess PublicAccessType `xml:"PublicAccess"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` +} + +// internal type used for marshalling +type geoReplication struct { + Status GeoReplicationStatusType `xml:"Status"` + LastSyncTime timeRFC1123 `xml:"LastSyncTime"` +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go new file mode 100644 index 00000000000..01a81fdfb1c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_page_blob.go @@ -0,0 +1,1065 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/base64" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "time" +) + +// pageBlobClient is the client for the PageBlob methods of the Azblob service. +type pageBlobClient struct { + managementClient +} + +// newPageBlobClient creates an instance of the pageBlobClient client. +func newPageBlobClient(url url.URL, p pipeline.Pipeline) pageBlobClient { + return pageBlobClient{newManagementClient(url, p)} +} + +// ClearPages the Clear Pages operation clears a set of pages from a page blob +// +// contentLength is the length of the request. timeout is the timeout parameter is expressed in seconds. For more +// information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not +// specified, encryption is performed with the root account encryption key. For more information, see Encryption at +// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be +// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the +// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key +// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption +// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default +// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. +// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number +// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob +// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate +// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) ClearPages(ctx context.Context, contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobClearPagesResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.clearPagesPreparer(contentLength, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.clearPagesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobClearPagesResponse), err +} + +// clearPagesPreparer prepares the ClearPages request. +func (client pageBlobClient) clearPagesPreparer(contentLength int64, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "page") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifSequenceNumberLessThanOrEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) + } + if ifSequenceNumberLessThan != nil { + req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) + } + if ifSequenceNumberEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-page-write", "clear") + return req, nil +} + +// clearPagesResponder handles the response to the ClearPages request. +func (client pageBlobClient) clearPagesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobClearPagesResponse{rawResponse: resp.Response()}, err +} + +// CopyIncremental the Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are +// transferred to the destination. The copied snapshots are complete copies of the original snapshot and can be read or +// copied from as usual. This API is supported since REST version 2016-05-31. +// +// copySource is specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that +// specifies a page blob snapshot. The value should be URL-encoded as it would appear in a request URI. The source blob +// must either be public or must be authenticated via a shared access signature. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. ifModifiedSince is specify this header value to operate only on a blob if +// it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only +// on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate +// only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a +// matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. +// requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics +// logs when storage analytics logging is enabled. +func (client pageBlobClient) CopyIncremental(ctx context.Context, copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobCopyIncrementalResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.copyIncrementalPreparer(copySource, timeout, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.copyIncrementalResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobCopyIncrementalResponse), err +} + +// copyIncrementalPreparer prepares the CopyIncremental request. +func (client pageBlobClient) copyIncrementalPreparer(copySource string, timeout *int32, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "incrementalcopy") + req.URL.RawQuery = params.Encode() + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-copy-source", copySource) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// copyIncrementalResponder handles the response to the CopyIncremental request. +func (client pageBlobClient) copyIncrementalResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobCopyIncrementalResponse{rawResponse: resp.Response()}, err +} + +// Create the Create operation creates a new page blob. +// +// contentLength is the length of the request. blobContentLength is this header specifies the maximum size for the page +// blob, up to 1 TB. The page blob size must be aligned to a 512-byte boundary. timeout is the timeout parameter is +// expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. tier is optional. Indicates the tier to be set on the page blob. +// blobContentType is optional. Sets the blob's content type. If specified, this property is stored with the blob and +// returned with a read request. blobContentEncoding is optional. Sets the blob's content encoding. If specified, this +// property is stored with the blob and returned with a read request. blobContentLanguage is optional. Set the blob's +// content language. If specified, this property is stored with the blob and returned with a read request. +// blobContentMD5 is optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for +// the individual blocks were validated when each was uploaded. blobCacheControl is optional. Sets the blob's cache +// control. If specified, this property is stored with the blob and returned with a read request. metadata is optional. +// Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the +// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value +// pairs are specified, the destination blob is created with the specified metadata, and metadata is not copied from +// the source blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules +// for C# identifiers. See Naming and Referencing Containers, Blobs, and Metadata for more information. leaseID is if +// specified, the operation only succeeds if the resource's lease is active and matches this ID. blobContentDisposition +// is optional. Sets the blob's Content-Disposition header. encryptionKey is optional. Specifies the encryption key to +// use to encrypt the data provided in the request. If not specified, encryption is performed with the root account +// encryption key. For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the +// SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. +// encryptionAlgorithm is the algorithm used to produce the encryption key hash. Currently, the only accepted value is +// "AES256". Must be provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version +// 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided in the +// request. If not specified, encryption is performed with the default account encryption scope. For more information, +// see Encryption at Rest for Azure Storage Services. ifModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. blobSequenceNumber is set for page blobs only. The sequence number is a user-controlled value that you can +// use to track requests. The value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. blobTagsString is optional. Used to set blob tags in various blob operations. +// immutabilityPolicyExpiry is specifies the date time when the blobs immutability policy is set to expire. +// immutabilityPolicyMode is specifies the immutability policy mode to set on the blob. legalHold is specified if a +// legal hold should be set on the blob. +func (client pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (*PageBlobCreateResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.createPreparer(contentLength, blobContentLength, timeout, tier, blobContentType, blobContentEncoding, blobContentLanguage, blobContentMD5, blobCacheControl, metadata, leaseID, blobContentDisposition, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobSequenceNumber, requestID, blobTagsString, immutabilityPolicyExpiry, immutabilityPolicyMode, legalHold) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.createResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobCreateResponse), err +} + +// createPreparer prepares the Create request. +func (client pageBlobClient) createPreparer(contentLength int64, blobContentLength int64, timeout *int32, tier PremiumPageBlobAccessTierType, blobContentType *string, blobContentEncoding *string, blobContentLanguage *string, blobContentMD5 []byte, blobCacheControl *string, metadata map[string]string, leaseID *string, blobContentDisposition *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string, blobTagsString *string, immutabilityPolicyExpiry *time.Time, immutabilityPolicyMode BlobImmutabilityPolicyModeType, legalHold *bool) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if tier != PremiumPageBlobAccessTierNone { + req.Header.Set("x-ms-access-tier", string(tier)) + } + if blobContentType != nil { + req.Header.Set("x-ms-blob-content-type", *blobContentType) + } + if blobContentEncoding != nil { + req.Header.Set("x-ms-blob-content-encoding", *blobContentEncoding) + } + if blobContentLanguage != nil { + req.Header.Set("x-ms-blob-content-language", *blobContentLanguage) + } + if blobContentMD5 != nil { + req.Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobContentMD5)) + } + if blobCacheControl != nil { + req.Header.Set("x-ms-blob-cache-control", *blobCacheControl) + } + if metadata != nil { + for k, v := range metadata { + req.Header.Set("x-ms-meta-"+k, v) + } + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if blobContentDisposition != nil { + req.Header.Set("x-ms-blob-content-disposition", *blobContentDisposition) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + if blobSequenceNumber != nil { + req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if blobTagsString != nil { + req.Header.Set("x-ms-tags", *blobTagsString) + } + if immutabilityPolicyExpiry != nil { + req.Header.Set("x-ms-immutability-policy-until-date", (*immutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)) + } + if immutabilityPolicyMode != BlobImmutabilityPolicyModeNone { + req.Header.Set("x-ms-immutability-policy-mode", string(immutabilityPolicyMode)) + } + if legalHold != nil { + req.Header.Set("x-ms-legal-hold", strconv.FormatBool(*legalHold)) + } + req.Header.Set("x-ms-blob-type", "PageBlob") + return req, nil +} + +// createResponder handles the response to the Create request. +func (client pageBlobClient) createResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobCreateResponse{rawResponse: resp.Response()}, err +} + +// GetPageRanges the Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a +// page blob +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the specified +// date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been modified +// since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching value. +// ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a SQL +// where clause on blob tags to operate only on blobs with a matching value. requestID is provides a client-generated, +// opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is +// enabled. +func (client pageBlobClient) GetPageRanges(ctx context.Context, snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPageRangesPreparer(snapshot, timeout, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageList), err +} + +// getPageRangesPreparer prepares the GetPageRanges request. +func (client pageBlobClient) getPageRangesPreparer(snapshot *string, timeout *int32, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "pagelist") + req.URL.RawQuery = params.Encode() + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPageRangesResponder handles the response to the GetPageRanges request. +func (client pageBlobClient) getPageRangesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &PageList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetPageRangesDiff the Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were +// changed between target blob and previous snapshot. +// +// snapshot is the snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to +// retrieve. For more information on working with blob snapshots, see Creating +// a Snapshot of a Blob. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. prevsnapshot is optional in version 2015-07-08 and newer. The prevsnapshot +// parameter is a DateTime value that specifies that the response will contain only pages that were changed between +// target blob and previous snapshot. Changed pages include both updated and cleared pages. The target blob may be a +// snapshot, as long as the snapshot specified by prevsnapshot is the older of the two. Note that incremental snapshots +// are currently supported only for blobs created on or after January 1, 2016. prevSnapshotURL is optional. This header +// is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot of the +// target blob. The response will only contain pages that were changed between the target blob and its previous +// snapshot. rangeParameter is return only the bytes of the blob in the specified range. leaseID is if specified, the +// operation only succeeds if the resource's lease is active and matches this ID. ifModifiedSince is specify this +// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is +// specify this header value to operate only on a blob if it has not been modified since the specified date/time. +// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag +// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to +// operate only on blobs with a matching value. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) GetPageRangesDiff(ctx context.Context, snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageList, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPageRangesDiffPreparer(snapshot, timeout, prevsnapshot, prevSnapshotURL, rangeParameter, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPageRangesDiffResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageList), err +} + +// getPageRangesDiffPreparer prepares the GetPageRangesDiff request. +func (client pageBlobClient) getPageRangesDiffPreparer(snapshot *string, timeout *int32, prevsnapshot *string, prevSnapshotURL *string, rangeParameter *string, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if snapshot != nil && len(*snapshot) > 0 { + params.Set("snapshot", *snapshot) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if prevsnapshot != nil && len(*prevsnapshot) > 0 { + params.Set("prevsnapshot", *prevsnapshot) + } + params.Set("comp", "pagelist") + req.URL.RawQuery = params.Encode() + if prevSnapshotURL != nil { + req.Header.Set("x-ms-previous-snapshot-url", *prevSnapshotURL) + } + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPageRangesDiffResponder handles the response to the GetPageRangesDiff request. +func (client pageBlobClient) getPageRangesDiffResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &PageList{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// Resize resize the Blob +// +// blobContentLength is this header specifies the maximum size for the page blob, up to 1 TB. The page blob size must +// be aligned to a 512-byte boundary. timeout is the timeout parameter is expressed in seconds. For more information, +// see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. encryptionKey is optional. Specifies the encryption key to use to encrypt the +// data provided in the request. If not specified, encryption is performed with the root account encryption key. For +// more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the +// provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the +// algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided +// if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies +// the name of the encryption scope to use to encrypt the data provided in the request. If not specified, encryption is +// performed with the default account encryption scope. For more information, see Encryption at Rest for Azure Storage +// Services. ifModifiedSince is specify this header value to operate only on a blob if it has been modified since the +// specified date/time. ifUnmodifiedSince is specify this header value to operate only on a blob if it has not been +// modified since the specified date/time. ifMatch is specify an ETag value to operate only on blobs with a matching +// value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. ifTags is specify a +// SQL where clause on blob tags to operate only on blobs with a matching value. requestID is provides a +// client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage +// analytics logging is enabled. +func (client pageBlobClient) Resize(ctx context.Context, blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobResizeResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.resizePreparer(blobContentLength, timeout, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.resizeResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobResizeResponse), err +} + +// resizePreparer prepares the Resize request. +func (client pageBlobClient) resizePreparer(blobContentLength int64, timeout *int32, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// resizeResponder handles the response to the Resize request. +func (client pageBlobClient) resizeResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobResizeResponse{rawResponse: resp.Response()}, err +} + +// UpdateSequenceNumber update the sequence number of the blob +// +// sequenceNumberAction is required if the x-ms-blob-sequence-number header is set for the request. This property +// applies to page blobs only. This property indicates how the service should modify the blob's sequence number timeout +// is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. leaseID is if specified, the operation only succeeds if the resource's +// lease is active and matches this ID. ifModifiedSince is specify this header value to operate only on a blob if it +// has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to operate only on a +// blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value to operate only on +// blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs without a matching value. +// ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching value. blobSequenceNumber +// is set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The +// value of the sequence number must be between 0 and 2^63 - 1. requestID is provides a client-generated, opaque value +// with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string) (*PageBlobUpdateSequenceNumberResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.updateSequenceNumberPreparer(sequenceNumberAction, timeout, leaseID, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, blobSequenceNumber, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.updateSequenceNumberResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobUpdateSequenceNumberResponse), err +} + +// updateSequenceNumberPreparer prepares the UpdateSequenceNumber request. +func (client pageBlobClient) updateSequenceNumberPreparer(sequenceNumberAction SequenceNumberActionType, timeout *int32, leaseID *string, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, blobSequenceNumber *int64, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) + if blobSequenceNumber != nil { + req.Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*blobSequenceNumber, 10)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// updateSequenceNumberResponder handles the response to the UpdateSequenceNumber request. +func (client pageBlobClient) updateSequenceNumberResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobUpdateSequenceNumberResponse{rawResponse: resp.Response()}, err +} + +// UploadPages the Upload Pages operation writes a range of pages to a page blob +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. transactionalContentMD5 is specify the transactional md5 for the +// body, to be validated by the service. transactionalContentCrc64 is specify the transactional crc64 for the body, to +// be validated by the service. timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. rangeParameter is return only the bytes of the blob in the specified +// range. leaseID is if specified, the operation only succeeds if the resource's lease is active and matches this ID. +// encryptionKey is optional. Specifies the encryption key to use to encrypt the data provided in the request. If not +// specified, encryption is performed with the root account encryption key. For more information, see Encryption at +// Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of the provided encryption key. Must be +// provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is the algorithm used to produce the +// encryption key hash. Currently, the only accepted value is "AES256". Must be provided if the x-ms-encryption-key +// header is provided. encryptionScope is optional. Version 2019-07-07 and later. Specifies the name of the encryption +// scope to use to encrypt the data provided in the request. If not specified, encryption is performed with the default +// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. +// ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has a sequence number +// less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to operate only on a blob +// if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this header value to operate +// only on a blob if it has the specified sequence number. ifModifiedSince is specify this header value to operate only +// on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. ifMatch is specify an ETag value +// to operate only on blobs with a matching value. ifNoneMatch is specify an ETag value to operate only on blobs +// without a matching value. ifTags is specify a SQL where clause on blob tags to operate only on blobs with a matching +// value. requestID is provides a client-generated, opaque value with a 1 KB character limit that is recorded in the +// analytics logs when storage analytics logging is enabled. +func (client pageBlobClient) UploadPages(ctx context.Context, body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (*PageBlobUploadPagesResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.uploadPagesPreparer(body, contentLength, transactionalContentMD5, transactionalContentCrc64, timeout, rangeParameter, leaseID, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobUploadPagesResponse), err +} + +// uploadPagesPreparer prepares the UploadPages request. +func (client pageBlobClient) uploadPagesPreparer(body io.ReadSeeker, contentLength int64, transactionalContentMD5 []byte, transactionalContentCrc64 []byte, timeout *int32, rangeParameter *string, leaseID *string, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "page") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if transactionalContentMD5 != nil { + req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(transactionalContentMD5)) + } + if transactionalContentCrc64 != nil { + req.Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(transactionalContentCrc64)) + } + if rangeParameter != nil { + req.Header.Set("x-ms-range", *rangeParameter) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if ifSequenceNumberLessThanOrEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) + } + if ifSequenceNumberLessThan != nil { + req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) + } + if ifSequenceNumberEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + req.Header.Set("x-ms-page-write", "update") + return req, nil +} + +// uploadPagesResponder handles the response to the UploadPages request. +func (client pageBlobClient) uploadPagesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobUploadPagesResponse{rawResponse: resp.Response()}, err +} + +// UploadPagesFromURL the Upload Pages operation writes a range of pages to a page blob where the contents are read +// from a URL +// +// sourceURL is specify a URL to the copy source. sourceRange is bytes of source data in the specified range. The +// length of this range should match the ContentLength header and x-ms-range/Range destination range header. +// contentLength is the length of the request. rangeParameter is the range of bytes to which the source range would be +// written. The range should be 512 aligned and range-end is required. sourceContentMD5 is specify the md5 calculated +// for the range of bytes that must be read from the copy source. sourceContentcrc64 is specify the crc64 calculated +// for the range of bytes that must be read from the copy source. timeout is the timeout parameter is expressed in +// seconds. For more information, see Setting +// Timeouts for Blob Service Operations. encryptionKey is optional. Specifies the encryption key to use to encrypt +// the data provided in the request. If not specified, encryption is performed with the root account encryption key. +// For more information, see Encryption at Rest for Azure Storage Services. encryptionKeySha256 is the SHA-256 hash of +// the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. encryptionAlgorithm is +// the algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be +// provided if the x-ms-encryption-key header is provided. encryptionScope is optional. Version 2019-07-07 and later. +// Specifies the name of the encryption scope to use to encrypt the data provided in the request. If not specified, +// encryption is performed with the default account encryption scope. For more information, see Encryption at Rest for +// Azure Storage Services. leaseID is if specified, the operation only succeeds if the resource's lease is active and +// matches this ID. ifSequenceNumberLessThanOrEqualTo is specify this header value to operate only on a blob if it has +// a sequence number less than or equal to the specified. ifSequenceNumberLessThan is specify this header value to +// operate only on a blob if it has a sequence number less than the specified. ifSequenceNumberEqualTo is specify this +// header value to operate only on a blob if it has the specified sequence number. ifModifiedSince is specify this +// header value to operate only on a blob if it has been modified since the specified date/time. ifUnmodifiedSince is +// specify this header value to operate only on a blob if it has not been modified since the specified date/time. +// ifMatch is specify an ETag value to operate only on blobs with a matching value. ifNoneMatch is specify an ETag +// value to operate only on blobs without a matching value. ifTags is specify a SQL where clause on blob tags to +// operate only on blobs with a matching value. sourceIfModifiedSince is specify this header value to operate only on a +// blob if it has been modified since the specified date/time. sourceIfUnmodifiedSince is specify this header value to +// operate only on a blob if it has not been modified since the specified date/time. sourceIfMatch is specify an ETag +// value to operate only on blobs with a matching value. sourceIfNoneMatch is specify an ETag value to operate only on +// blobs without a matching value. requestID is provides a client-generated, opaque value with a 1 KB character limit +// that is recorded in the analytics logs when storage analytics logging is enabled. copySourceAuthorization is only +// Bearer type is supported. Credentials should be a valid OAuth access token to copy source. +func (client pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (*PageBlobUploadPagesFromURLResponse, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.uploadPagesFromURLPreparer(sourceURL, sourceRange, contentLength, rangeParameter, sourceContentMD5, sourceContentcrc64, timeout, encryptionKey, encryptionKeySha256, encryptionAlgorithm, encryptionScope, leaseID, ifSequenceNumberLessThanOrEqualTo, ifSequenceNumberLessThan, ifSequenceNumberEqualTo, ifModifiedSince, ifUnmodifiedSince, ifMatch, ifNoneMatch, ifTags, sourceIfModifiedSince, sourceIfUnmodifiedSince, sourceIfMatch, sourceIfNoneMatch, requestID, copySourceAuthorization) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.uploadPagesFromURLResponder}, req) + if err != nil { + return nil, err + } + return resp.(*PageBlobUploadPagesFromURLResponse), err +} + +// uploadPagesFromURLPreparer prepares the UploadPagesFromURL request. +func (client pageBlobClient) uploadPagesFromURLPreparer(sourceURL string, sourceRange string, contentLength int64, rangeParameter string, sourceContentMD5 []byte, sourceContentcrc64 []byte, timeout *int32, encryptionKey *string, encryptionKeySha256 *string, encryptionAlgorithm EncryptionAlgorithmType, encryptionScope *string, leaseID *string, ifSequenceNumberLessThanOrEqualTo *int64, ifSequenceNumberLessThan *int64, ifSequenceNumberEqualTo *int64, ifModifiedSince *time.Time, ifUnmodifiedSince *time.Time, ifMatch *ETag, ifNoneMatch *ETag, ifTags *string, sourceIfModifiedSince *time.Time, sourceIfUnmodifiedSince *time.Time, sourceIfMatch *ETag, sourceIfNoneMatch *ETag, requestID *string, copySourceAuthorization *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "page") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-copy-source", sourceURL) + req.Header.Set("x-ms-source-range", sourceRange) + if sourceContentMD5 != nil { + req.Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(sourceContentMD5)) + } + if sourceContentcrc64 != nil { + req.Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(sourceContentcrc64)) + } + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Header.Set("x-ms-range", rangeParameter) + if encryptionKey != nil { + req.Header.Set("x-ms-encryption-key", *encryptionKey) + } + if encryptionKeySha256 != nil { + req.Header.Set("x-ms-encryption-key-sha256", *encryptionKeySha256) + } + if encryptionAlgorithm != EncryptionAlgorithmNone { + req.Header.Set("x-ms-encryption-algorithm", string(encryptionAlgorithm)) + } + if encryptionScope != nil { + req.Header.Set("x-ms-encryption-scope", *encryptionScope) + } + if leaseID != nil { + req.Header.Set("x-ms-lease-id", *leaseID) + } + if ifSequenceNumberLessThanOrEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*ifSequenceNumberLessThanOrEqualTo, 10)) + } + if ifSequenceNumberLessThan != nil { + req.Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*ifSequenceNumberLessThan, 10)) + } + if ifSequenceNumberEqualTo != nil { + req.Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*ifSequenceNumberEqualTo, 10)) + } + if ifModifiedSince != nil { + req.Header.Set("If-Modified-Since", (*ifModifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifUnmodifiedSince != nil { + req.Header.Set("If-Unmodified-Since", (*ifUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if ifMatch != nil { + req.Header.Set("If-Match", string(*ifMatch)) + } + if ifNoneMatch != nil { + req.Header.Set("If-None-Match", string(*ifNoneMatch)) + } + if ifTags != nil { + req.Header.Set("x-ms-if-tags", *ifTags) + } + if sourceIfModifiedSince != nil { + req.Header.Set("x-ms-source-if-modified-since", (*sourceIfModifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfUnmodifiedSince != nil { + req.Header.Set("x-ms-source-if-unmodified-since", (*sourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)) + } + if sourceIfMatch != nil { + req.Header.Set("x-ms-source-if-match", string(*sourceIfMatch)) + } + if sourceIfNoneMatch != nil { + req.Header.Set("x-ms-source-if-none-match", string(*sourceIfNoneMatch)) + } + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + if copySourceAuthorization != nil { + req.Header.Set("x-ms-copy-source-authorization", *copySourceAuthorization) + } + req.Header.Set("x-ms-page-write", "update") + return req, nil +} + +// uploadPagesFromURLResponder handles the response to the UploadPagesFromURL request. +func (client pageBlobClient) uploadPagesFromURLResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusCreated) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &PageBlobUploadPagesFromURLResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go new file mode 100644 index 00000000000..8a023d0a02c --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_responder_policy.go @@ -0,0 +1,74 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io/ioutil" +) + +type responder func(resp pipeline.Response) (result pipeline.Response, err error) + +// ResponderPolicyFactory is a Factory capable of creating a responder pipeline. +type responderPolicyFactory struct { + responder responder +} + +// New creates a responder policy factory. +func (arpf responderPolicyFactory) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy { + return responderPolicy{next: next, responder: arpf.responder} +} + +type responderPolicy struct { + next pipeline.Policy + responder responder +} + +// Do sends the request to the service and validates/deserializes the HTTP response. +func (arp responderPolicy) Do(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + resp, err := arp.next.Do(ctx, request) + if err != nil { + return resp, err + } + return arp.responder(resp) +} + +// validateResponse checks an HTTP response's status code against a legal set of codes. +// If the response code is not legal, then validateResponse reads all of the response's body +// (containing error information) and returns a response error. +func validateResponse(resp pipeline.Response, successStatusCodes ...int) error { + if resp == nil { + return NewResponseError(nil, nil, "nil response") + } + responseCode := resp.Response().StatusCode + for _, i := range successStatusCodes { + if i == responseCode { + return nil + } + } + // only close the body in the failure case. in the + // success case responders will close the body as required. + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return err + } + // the service code, description and details will be populated during unmarshalling + responseError := NewResponseError(nil, resp.Response(), resp.Response().Status) + if len(b) > 0 { + if err = xml.Unmarshal(b, &responseError); err != nil { + return NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return responseError +} + +// removes any BOM from the byte slice +func removeBOM(b []byte) []byte { + // UTF8 + return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go new file mode 100644 index 00000000000..3dcc75bb52b --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_response_error.go @@ -0,0 +1,95 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + "net" + "net/http" +) + +// if you want to provide custom error handling set this variable to your constructor function +var responseErrorFactory func(cause error, response *http.Response, description string) error + +// ResponseError identifies a responder-generated network or response parsing error. +type ResponseError interface { + // Error exposes the Error(), Temporary() and Timeout() methods. + net.Error // Includes the Go error interface + // Response returns the HTTP response. You may examine this but you should not modify it. + Response() *http.Response +} + +// NewResponseError creates an error object that implements the error interface. +func NewResponseError(cause error, response *http.Response, description string) error { + if responseErrorFactory != nil { + return responseErrorFactory(cause, response, description) + } + return &responseError{ + ErrorNode: pipeline.ErrorNode{}.Initialize(cause, 3), + response: response, + description: description, + } +} + +// responseError is the internal struct that implements the public ResponseError interface. +type responseError struct { + pipeline.ErrorNode // This is embedded so that responseError "inherits" Error, Temporary, Timeout, and Cause + response *http.Response + description string +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e *responseError) Error() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "===== RESPONSE ERROR (Code=%v) =====\n", e.response.StatusCode) + fmt.Fprintf(b, "Status=%s, Description: %s\n", e.response.Status, e.description) + s := b.String() + return e.ErrorNode.Error(s) +} + +// Response implements the ResponseError interface's method to return the HTTP response. +func (e *responseError) Response() *http.Response { + return e.response +} + +// RFC7807 PROBLEM ------------------------------------------------------------------------------------ +// RFC7807Problem ... This type can be publicly embedded in another type that wants to add additional members. +/*type RFC7807Problem struct { + // Mandatory: A (relative) URI reference identifying the problem type (it MAY refer to human-readable documentation). + typeURI string // Should default to "about:blank" + // Optional: Short, human-readable summary (maybe localized). + title string + // Optional: HTTP status code generated by the origin server + status int + // Optional: Human-readable explanation for this problem occurance. + // Should help client correct the problem. Clients should NOT parse this string. + detail string + // Optional: A (relative) URI identifying this specific problem occurence (it may or may not be dereferenced). + instance string +} +// NewRFC7807Problem ... +func NewRFC7807Problem(typeURI string, status int, titleFormat string, a ...interface{}) error { + return &RFC7807Problem{ + typeURI: typeURI, + status: status, + title: fmt.Sprintf(titleFormat, a...), + } +} +// Error returns the error information as a string. +func (e *RFC7807Problem) Error() string { + return e.title +} +// TypeURI ... +func (e *RFC7807Problem) TypeURI() string { + if e.typeURI == "" { + e.typeURI = "about:blank" + } + return e.typeURI +} +// Members ... +func (e *RFC7807Problem) Members() (status int, title, detail, instance string) { + return e.status, e.title, e.detail, e.instance +}*/ diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go new file mode 100644 index 00000000000..3072da0e64f --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_service.go @@ -0,0 +1,618 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "bytes" + "context" + "encoding/xml" + "github.com/Azure/azure-pipeline-go/pipeline" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +// serviceClient is the client for the Service methods of the Azblob service. +type serviceClient struct { + managementClient +} + +// newServiceClient creates an instance of the serviceClient client. +func newServiceClient(url url.URL, p pipeline.Pipeline) serviceClient { + return serviceClient{newManagementClient(url, p)} +} + +// FilterBlobs the Filter Blobs operation enables callers to list blobs across all containers whose tags match a given +// search expression. Filter blobs searches across all containers within a storage account but can be scoped within +// the expression to a single container. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. where is filters +// the results to return only to return only blobs whose tags match the specified expression. marker is a string value +// that identifies the portion of the list of containers to be returned with the next listing operation. The operation +// returns the NextMarker value within the response body if the listing operation did not return all containers +// remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter +// in a subsequent call to request the next page of list items. The marker value is opaque to the client. maxresults is +// specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a +// value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a +// partition boundary, then the service will return a continuation token for retrieving the remainder of the results. +// For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the +// default of 5000. +func (client serviceClient) FilterBlobs(ctx context.Context, timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (*FilterBlobSegment, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}, + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.filterBlobsPreparer(timeout, requestID, where, marker, maxresults) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.filterBlobsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*FilterBlobSegment), err +} + +// filterBlobsPreparer prepares the FilterBlobs request. +func (client serviceClient) filterBlobsPreparer(timeout *int32, requestID *string, where *string, marker *string, maxresults *int32) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + if where != nil && len(*where) > 0 { + params.Set("where", *where) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + params.Set("comp", "blobs") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// filterBlobsResponder handles the response to the FilterBlobs request. +func (client serviceClient) filterBlobsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &FilterBlobSegment{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetAccountInfo returns the sku name and account kind +func (client serviceClient) GetAccountInfo(ctx context.Context) (*ServiceGetAccountInfoResponse, error) { + req, err := client.getAccountInfoPreparer() + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getAccountInfoResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ServiceGetAccountInfoResponse), err +} + +// getAccountInfoPreparer prepares the GetAccountInfo request. +func (client serviceClient) getAccountInfoPreparer() (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + params.Set("restype", "account") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + return req, nil +} + +// getAccountInfoResponder handles the response to the GetAccountInfo request. +func (client serviceClient) getAccountInfoResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ServiceGetAccountInfoResponse{rawResponse: resp.Response()}, err +} + +// GetProperties gets the properties of a storage account's Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) GetProperties(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceProperties, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getPropertiesPreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*StorageServiceProperties), err +} + +// getPropertiesPreparer prepares the GetProperties request. +func (client serviceClient) getPropertiesPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getPropertiesResponder handles the response to the GetProperties request. +func (client serviceClient) getPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &StorageServiceProperties{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetStatistics retrieves statistics related to replication for the Blob service. It is only available on the +// secondary location endpoint when read-access geo-redundant replication is enabled for the storage account. +// +// timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) GetStatistics(ctx context.Context, timeout *int32, requestID *string) (*StorageServiceStats, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getStatisticsPreparer(timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getStatisticsResponder}, req) + if err != nil { + return nil, err + } + return resp.(*StorageServiceStats), err +} + +// getStatisticsPreparer prepares the GetStatistics request. +func (client serviceClient) getStatisticsPreparer(timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "stats") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// getStatisticsResponder handles the response to the GetStatistics request. +func (client serviceClient) getStatisticsResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &StorageServiceStats{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// GetUserDelegationKey retrieves a user delegation key for the Blob service. This is only a valid operation when using +// bearer token authentication. +// +// keyInfo is key information timeout is the timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, timeout *int32, requestID *string) (*UserDelegationKey, error) { + if err := validate([]validation{ + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.getUserDelegationKeyPreparer(keyInfo, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.getUserDelegationKeyResponder}, req) + if err != nil { + return nil, err + } + return resp.(*UserDelegationKey), err +} + +// getUserDelegationKeyPreparer prepares the GetUserDelegationKey request. +func (client serviceClient) getUserDelegationKeyPreparer(keyInfo KeyInfo, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("POST", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "userdelegationkey") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(keyInfo) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// getUserDelegationKeyResponder handles the response to the GetUserDelegationKey request. +func (client serviceClient) getUserDelegationKeyResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &UserDelegationKey{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// ListContainersSegment the List Containers Segment operation returns a list of the containers under the specified +// account +// +// prefix is filters the results to return only containers whose name begins with the specified prefix. marker is a +// string value that identifies the portion of the list of containers to be returned with the next listing operation. +// The operation returns the NextMarker value within the response body if the listing operation did not return all +// containers remaining to be listed with the current page. The NextMarker value can be used as the value for the +// marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the +// client. maxresults is specifies the maximum number of containers to return. If the request does not specify +// maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the +// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the +// remainder of the results. For this reason, it is possible that the service will return fewer results than specified +// by maxresults, or than the default of 5000. include is include this parameter to specify that the container's +// metadata be returned as part of the response body. timeout is the timeout parameter is expressed in seconds. For +// more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) ListContainersSegment(ctx context.Context, prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (*ListContainersSegmentResponse, error) { + if err := validate([]validation{ + {targetValue: maxresults, + constraints: []constraint{{target: "maxresults", name: null, rule: false, + chain: []constraint{{target: "maxresults", name: inclusiveMinimum, rule: 1, chain: nil}}}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.listContainersSegmentPreparer(prefix, marker, maxresults, include, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.listContainersSegmentResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ListContainersSegmentResponse), err +} + +// listContainersSegmentPreparer prepares the ListContainersSegment request. +func (client serviceClient) listContainersSegmentPreparer(prefix *string, marker *string, maxresults *int32, include []ListContainersIncludeType, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("GET", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if prefix != nil && len(*prefix) > 0 { + params.Set("prefix", *prefix) + } + if marker != nil && len(*marker) > 0 { + params.Set("marker", *marker) + } + if maxresults != nil { + params.Set("maxresults", strconv.FormatInt(int64(*maxresults), 10)) + } + if include != nil && len(include) > 0 { + params.Set("include", joinConst(include, ",")) + } + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "list") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// listContainersSegmentResponder handles the response to the ListContainersSegment request. +func (client serviceClient) listContainersSegmentResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + result := &ListContainersSegmentResponse{rawResponse: resp.Response()} + if err != nil { + return result, err + } + defer resp.Response().Body.Close() + b, err := ioutil.ReadAll(resp.Response().Body) + if err != nil { + return result, err + } + if len(b) > 0 { + b = removeBOM(b) + err = xml.Unmarshal(b, result) + if err != nil { + return result, NewResponseError(err, resp.Response(), "failed to unmarshal response body") + } + } + return result, nil +} + +// SetProperties sets properties for a storage account's Blob service endpoint, including properties for Storage +// Analytics and CORS (Cross-Origin Resource Sharing) rules +// +// storageServiceProperties is the StorageService properties. timeout is the timeout parameter is expressed in seconds. +// For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (*ServiceSetPropertiesResponse, error) { + if err := validate([]validation{ + {targetValue: storageServiceProperties, + constraints: []constraint{{target: "storageServiceProperties.Logging", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy", name: null, rule: true, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.Logging.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.HourMetrics", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.HourMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.MinuteMetrics", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.MinuteMetrics.RetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}, + }}, + {target: "storageServiceProperties.DeleteRetentionPolicy", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: null, rule: false, + chain: []constraint{{target: "storageServiceProperties.DeleteRetentionPolicy.Days", name: inclusiveMinimum, rule: 1, chain: nil}}}, + }}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.setPropertiesPreparer(storageServiceProperties, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.setPropertiesResponder}, req) + if err != nil { + return nil, err + } + return resp.(*ServiceSetPropertiesResponse), err +} + +// setPropertiesPreparer prepares the SetProperties request. +func (client serviceClient) setPropertiesPreparer(storageServiceProperties StorageServiceProperties, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("PUT", client.url, nil) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("restype", "service") + params.Set("comp", "properties") + req.URL.RawQuery = params.Encode() + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + b, err := xml.Marshal(storageServiceProperties) + if err != nil { + return req, pipeline.NewError(err, "failed to marshal request body") + } + req.Header.Set("Content-Type", "application/xml") + err = req.SetBody(bytes.NewReader(b)) + if err != nil { + return req, pipeline.NewError(err, "failed to set request body") + } + return req, nil +} + +// setPropertiesResponder handles the response to the SetProperties request. +func (client serviceClient) setPropertiesResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK, http.StatusAccepted) + if resp == nil { + return nil, err + } + io.Copy(ioutil.Discard, resp.Response().Body) + resp.Response().Body.Close() + return &ServiceSetPropertiesResponse{rawResponse: resp.Response()}, err +} + +// SubmitBatch the Batch operation allows multiple API calls to be embedded into a single HTTP request. +// +// body is initial data body will be closed upon successful return. Callers should ensure closure when receiving an +// error.contentLength is the length of the request. multipartContentType is required. The value of this header must be +// multipart/mixed with a batch boundary. Example header value: multipart/mixed; boundary=batch_ timeout is the +// timeout parameter is expressed in seconds. For more information, see Setting +// Timeouts for Blob Service Operations. requestID is provides a client-generated, opaque value with a 1 KB +// character limit that is recorded in the analytics logs when storage analytics logging is enabled. +func (client serviceClient) SubmitBatch(ctx context.Context, body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (*SubmitBatchResponse, error) { + if err := validate([]validation{ + {targetValue: body, + constraints: []constraint{{target: "body", name: null, rule: true, chain: nil}}}, + {targetValue: timeout, + constraints: []constraint{{target: "timeout", name: null, rule: false, + chain: []constraint{{target: "timeout", name: inclusiveMinimum, rule: 0, chain: nil}}}}}}); err != nil { + return nil, err + } + req, err := client.submitBatchPreparer(body, contentLength, multipartContentType, timeout, requestID) + if err != nil { + return nil, err + } + resp, err := client.Pipeline().Do(ctx, responderPolicyFactory{responder: client.submitBatchResponder}, req) + if err != nil { + return nil, err + } + return resp.(*SubmitBatchResponse), err +} + +// submitBatchPreparer prepares the SubmitBatch request. +func (client serviceClient) submitBatchPreparer(body io.ReadSeeker, contentLength int64, multipartContentType string, timeout *int32, requestID *string) (pipeline.Request, error) { + req, err := pipeline.NewRequest("POST", client.url, body) + if err != nil { + return req, pipeline.NewError(err, "failed to create request") + } + params := req.URL.Query() + if timeout != nil { + params.Set("timeout", strconv.FormatInt(int64(*timeout), 10)) + } + params.Set("comp", "batch") + req.URL.RawQuery = params.Encode() + req.Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Header.Set("Content-Type", multipartContentType) + req.Header.Set("x-ms-version", ServiceVersion) + if requestID != nil { + req.Header.Set("x-ms-client-request-id", *requestID) + } + return req, nil +} + +// submitBatchResponder handles the response to the SubmitBatch request. +func (client serviceClient) submitBatchResponder(resp pipeline.Response) (pipeline.Response, error) { + err := validateResponse(resp, http.StatusOK) + if resp == nil { + return nil, err + } + return &SubmitBatchResponse{rawResponse: resp.Response()}, err +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go new file mode 100644 index 00000000000..98a2614e606 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_validation.go @@ -0,0 +1,367 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "fmt" + "github.com/Azure/azure-pipeline-go/pipeline" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type constraint struct { + // Target field name for validation. + target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + rule interface{} + + // Chain validations for struct type + chain []constraint +} + +// Validation stores parameter-wise validation. +type validation struct { + targetValue interface{} + constraints []constraint +} + +// Constraint list +const ( + empty = "Empty" + null = "Null" + readOnly = "ReadOnly" + pattern = "Pattern" + maxLength = "MaxLength" + minLength = "MinLength" + maxItems = "MaxItems" + minItems = "MinItems" + multipleOf = "MultipleOf" + uniqueItems = "UniqueItems" + inclusiveMaximum = "InclusiveMaximum" + exclusiveMaximum = "ExclusiveMaximum" + exclusiveMinimum = "ExclusiveMinimum" + inclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func validate(m []validation) error { + for _, item := range m { + v := reflect.ValueOf(item.targetValue) + for _, constraint := range item.constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.target)) + } + err := validate([]validation{ + { + targetValue: getInterfaceValue(f), + constraints: []constraint{v}, + }, + }) + return err +} + +func validatePtr(x reflect.Value, v constraint) error { + if v.name == readOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x.Elem()), + constraints: v.chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v constraint) error { + i := x.Int() + r, ok := v.rule.(int) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + switch v.name { + case multipleOf: + if i%int64(r) != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case exclusiveMinimum: + if i <= int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case exclusiveMaximum: + if i >= int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case inclusiveMinimum: + if i < int64(r) { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case inclusiveMaximum: + if i > int64(r) { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.name)) + } + return nil +} + +func validateFloat(x reflect.Value, v constraint) error { + f := x.Float() + r, ok := v.rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.name, v.rule)) + } + switch v.name { + case exclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case exclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case inclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case inclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.name)) + } + return nil +} + +func validateString(x reflect.Value, v constraint) error { + s := x.String() + switch v.name { + case empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case pattern: + reg, err := regexp.Compile(v.rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.rule)) + } + case maxLength: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + if len(s) > v.rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than %v", v.rule)) + } + case minLength: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.name, v.rule)) + } + if len(s) < v.rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than %v", v.rule)) + } + case readOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.name)) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x), + constraints: v.chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v constraint) error { + switch v.name { + case null: + if x.IsNil() { + return checkNil(x, v) + } + case empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case maxItems: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) + } + if x.Len() > v.rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.rule, x.Len())) + } + case minItems: + if _, ok := v.rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.name, v.rule)) + } + if x.Len() < v.rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.rule, x.Len())) + } + case uniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.name, x.Kind())) + } + case readOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case pattern: + reg, err := regexp.Compile(v.rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.name)) + } + if v.chain != nil { + return validate([]validation{ + { + targetValue: getInterfaceValue(x), + constraints: v.chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v constraint) error { + if _, ok := v.rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) + } + if v.rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v constraint) error { + if _, ok := v.rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.name, v.rule)) + } + if v.rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v constraint, message string) error { + return pipeline.NewError(nil, fmt.Sprintf("validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.target, v.name, getInterfaceValue(x), message)) +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go new file mode 100644 index 00000000000..d7672a1dfd2 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_generated_version.go @@ -0,0 +1,14 @@ +package azblob + +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/0.0.0 azblob/2020-10-02" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return "0.0.0" +} diff --git a/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go new file mode 100644 index 00000000000..45be7e0fdc7 --- /dev/null +++ b/vendor/github.com/Azure/azure-storage-blob-go/azblob/zz_response_helpers.go @@ -0,0 +1,240 @@ +package azblob + +import ( + "context" + "io" + "net/http" + "time" +) + +// BlobHTTPHeaders contains read/writeable blob properties. +type BlobHTTPHeaders struct { + ContentType string + ContentMD5 []byte + ContentEncoding string + ContentLanguage string + ContentDisposition string + CacheControl string +} + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (bgpr BlobGetPropertiesResponse) NewHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + ContentType: bgpr.ContentType(), + ContentEncoding: bgpr.ContentEncoding(), + ContentLanguage: bgpr.ContentLanguage(), + ContentDisposition: bgpr.ContentDisposition(), + CacheControl: bgpr.CacheControl(), + ContentMD5: bgpr.ContentMD5(), + } +} + +// ///////////////////////////////////////////////////////////////////////////// + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (dr downloadResponse) NewHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + ContentType: dr.ContentType(), + ContentEncoding: dr.ContentEncoding(), + ContentLanguage: dr.ContentLanguage(), + ContentDisposition: dr.ContentDisposition(), + CacheControl: dr.CacheControl(), + ContentMD5: dr.ContentMD5(), + } +} + +// ///////////////////////////////////////////////////////////////////////////// + +// DownloadResponse wraps AutoRest generated downloadResponse and helps to provide info for retry. +type DownloadResponse struct { + r *downloadResponse + ctx context.Context + b BlobURL + getInfo HTTPGetterInfo +} + +// Body constructs new RetryReader stream for reading data. If a connection failes +// while reading, it will make additional requests to reestablish a connection and +// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0 +// (the default), returns the original response body and no retries will be performed. +func (r *DownloadResponse) Body(o RetryReaderOptions) io.ReadCloser { + if o.MaxRetryRequests == 0 { // No additional retries + return r.Response().Body + } + return NewRetryReader(r.ctx, r.Response(), r.getInfo, o, + func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { + resp, err := r.b.Download(ctx, getInfo.Offset, getInfo.Count, BlobAccessConditions{ + ModifiedAccessConditions: ModifiedAccessConditions{IfMatch: getInfo.ETag}, + }, false, o.ClientProvidedKeyOptions) + if err != nil { + return nil, err + } + return resp.Response(), err + }, + ) +} + +// Response returns the raw HTTP response object. +func (r DownloadResponse) Response() *http.Response { + return r.r.Response() +} + +// NewHTTPHeaders returns the user-modifiable properties for this blob. +func (r DownloadResponse) NewHTTPHeaders() BlobHTTPHeaders { + return r.r.NewHTTPHeaders() +} + +// BlobContentMD5 returns the value for header x-ms-blob-content-md5. +func (r DownloadResponse) BlobContentMD5() []byte { + return r.r.BlobContentMD5() +} + +// ContentMD5 returns the value for header Content-MD5. +func (r DownloadResponse) ContentMD5() []byte { + return r.r.ContentMD5() +} + +// StatusCode returns the HTTP status code of the response, e.g. 200. +func (r DownloadResponse) StatusCode() int { + return r.r.StatusCode() +} + +// Status returns the HTTP status message of the response, e.g. "200 OK". +func (r DownloadResponse) Status() string { + return r.r.Status() +} + +// AcceptRanges returns the value for header Accept-Ranges. +func (r DownloadResponse) AcceptRanges() string { + return r.r.AcceptRanges() +} + +// BlobCommittedBlockCount returns the value for header x-ms-blob-committed-block-count. +func (r DownloadResponse) BlobCommittedBlockCount() int32 { + return r.r.BlobCommittedBlockCount() +} + +// BlobSequenceNumber returns the value for header x-ms-blob-sequence-number. +func (r DownloadResponse) BlobSequenceNumber() int64 { + return r.r.BlobSequenceNumber() +} + +// BlobType returns the value for header x-ms-blob-type. +func (r DownloadResponse) BlobType() BlobType { + return r.r.BlobType() +} + +// CacheControl returns the value for header Cache-Control. +func (r DownloadResponse) CacheControl() string { + return r.r.CacheControl() +} + +// ContentDisposition returns the value for header Content-Disposition. +func (r DownloadResponse) ContentDisposition() string { + return r.r.ContentDisposition() +} + +// ContentEncoding returns the value for header Content-Encoding. +func (r DownloadResponse) ContentEncoding() string { + return r.r.ContentEncoding() +} + +// ContentLanguage returns the value for header Content-Language. +func (r DownloadResponse) ContentLanguage() string { + return r.r.ContentLanguage() +} + +// ContentLength returns the value for header Content-Length. +func (r DownloadResponse) ContentLength() int64 { + return r.r.ContentLength() +} + +// ContentRange returns the value for header Content-Range. +func (r DownloadResponse) ContentRange() string { + return r.r.ContentRange() +} + +// ContentType returns the value for header Content-Type. +func (r DownloadResponse) ContentType() string { + return r.r.ContentType() +} + +// CopyCompletionTime returns the value for header x-ms-copy-completion-time. +func (r DownloadResponse) CopyCompletionTime() time.Time { + return r.r.CopyCompletionTime() +} + +// CopyID returns the value for header x-ms-copy-id. +func (r DownloadResponse) CopyID() string { + return r.r.CopyID() +} + +// CopyProgress returns the value for header x-ms-copy-progress. +func (r DownloadResponse) CopyProgress() string { + return r.r.CopyProgress() +} + +// CopySource returns the value for header x-ms-copy-source. +func (r DownloadResponse) CopySource() string { + return r.r.CopySource() +} + +// CopyStatus returns the value for header x-ms-copy-status. +func (r DownloadResponse) CopyStatus() CopyStatusType { + return r.r.CopyStatus() +} + +// CopyStatusDescription returns the value for header x-ms-copy-status-description. +func (r DownloadResponse) CopyStatusDescription() string { + return r.r.CopyStatusDescription() +} + +// Date returns the value for header Date. +func (r DownloadResponse) Date() time.Time { + return r.r.Date() +} + +// ETag returns the value for header ETag. +func (r DownloadResponse) ETag() ETag { + return ETag(r.r.ETag()) +} + +// IsServerEncrypted returns the value for header x-ms-server-encrypted. +func (r DownloadResponse) IsServerEncrypted() string { + return r.r.IsServerEncrypted() +} + +// LastModified returns the value for header Last-Modified. +func (r DownloadResponse) LastModified() time.Time { + return r.r.LastModified() +} + +// LeaseDuration returns the value for header x-ms-lease-duration. +func (r DownloadResponse) LeaseDuration() LeaseDurationType { + return r.r.LeaseDuration() +} + +// LeaseState returns the value for header x-ms-lease-state. +func (r DownloadResponse) LeaseState() LeaseStateType { + return r.r.LeaseState() +} + +// LeaseStatus returns the value for header x-ms-lease-status. +func (r DownloadResponse) LeaseStatus() LeaseStatusType { + return r.r.LeaseStatus() +} + +// RequestID returns the value for header x-ms-request-id. +func (r DownloadResponse) RequestID() string { + return r.r.RequestID() +} + +// Version returns the value for header x-ms-version. +func (r DownloadResponse) Version() string { + return r.r.Version() +} + +// NewMetadata returns user-defined key/value pairs. +func (r DownloadResponse) NewMetadata() Metadata { + return r.r.NewMetadata() +} diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 00000000000..3350aaf7064 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,32 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ +.vscode/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 00000000000..d1f596bfc9b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,1004 @@ +# CHANGELOG + +## v14.2.0 + +- Added package comment to make `github.com/Azure/go-autorest` importable. + +## v14.1.1 + +### Bug Fixes + +- Change `x-ms-authorization-auxiliary` header value separator to comma. + +## v14.1.0 + +### New Features + +- Added `azure.SetEnvironment()` that will update the global environments map with the specified values. + +## v14.0.1 + +### Bug Fixes + +- Fix race condition when refreshing token. +- Fixed some tests to work with Go 1.14. + +## v14.0.0 + +## Breaking Changes + +- By default, the `DoRetryForStatusCodes` functions will no longer infinitely retry a request when the response returns an HTTP status code of 429 (StatusTooManyRequests). To opt in to the old behavior set `autorest.Count429AsRetry` to `false`. + +## New Features + +- Variable `autorest.Max429Delay` can be used to control the maximum delay between retries when a 429 is received with no `Retry-After` header. The default is zero which means there is no cap. + +## v13.4.0 + +## New Features + +- Added field `SendDecorators` to the `Client` type. This can be used to specify a custom chain of SendDecorators per client. +- Added method `Client.Send()` which includes logic for selecting the preferred chain of SendDecorators. + +## v13.3.3 + +### Bug Fixes + +- Fixed connection leak when retrying requests. +- Enabled exponential back-off with a 2-minute cap when retrying on 429. +- Fixed some cases where errors were inadvertently dropped. + +## v13.3.2 + +### Bug Fixes + +- Updated `autorest.AsStringSlice()` to convert slice elements to their string representation. + +## v13.3.1 + +- Updated external dependencies. + +### Bug Fixes + +## v13.3.0 + +### New Features + +- Added support for shared key and shared access signature token authorization. + - `autorest.NewSharedKeyAuthorizer()` and dependent types. + - `autorest.NewSASTokenAuthorizer()` and dependent types. +- Added `ServicePrincipalToken.SetCustomRefresh()` so a custom refresh function can be invoked when a token has expired. + +### Bug Fixes + +- Fixed `cli.AccessTokensPath()` to respect `AZURE_CONFIG_DIR` when set. +- Support parsing error messages in XML responses. + +## v13.2.0 + +### New Features + +- Added the following functions to replace their versions that don't take a context. + - `adal.InitiateDeviceAuthWithContext()` + - `adal.CheckForUserCompletionWithContext()` + - `adal.WaitForUserCompletionWithContext()` + +## v13.1.0 + +### New Features + +- Added support for MSI authentication on Azure App Service and Azure Functions. + +## v13.0.2 + +### Bug Fixes + +- Always retry a request even if the sender returns a non-nil error. + +## v13.0.1 + +## Bug Fixes + +- Fixed `autorest.WithQueryParameters()` so that it properly encodes multi-value query parameters. + +## v13.0.0 + +## Breaking Changes + +The `tracing` package has been rewritten to provide a common interface for consumers to wire in the tracing package of their choice. +What this means is that by default no tracing provider will be compiled into your program and setting the `AZURE_SDK_TRACING_ENABLED` +environment variable will have no effect. To enable this previous behavior you must now add the following import to your source file. +```go + import _ "github.com/Azure/go-autorest/tracing/opencensus" +``` +The APIs required by autorest-generated code have remained but some APIs have been removed and new ones added. +The following APIs and variables have been removed (the majority of them were moved to the `opencensus` package). +- tracing.Transport +- tracing.Enable() +- tracing.EnableWithAIForwarding() +- tracing.Disable() + +The following APIs and types have been added +- tracing.Tracer +- tracing.Register() + +To hook up a tracer simply call `tracing.Register()` passing in a type that satisfies the `tracing.Tracer` interface. + +## v12.4.3 + +### Bug Fixes + +- `autorest.MultiTenantServicePrincipalTokenAuthorizer` will now properly add its auxiliary bearer tokens. + +## v12.4.2 + +### Bug Fixes + +- Improvements to the fixes made in v12.4.1. + - Remove `override` stanza from Gopkg.toml and `replace` directive from go.mod as they don't apply when being consumed as a dependency. + - Switched to latest version of `ocagent` that still depends on protobuf v1.2. + - Add indirect dependencies to the `required` clause with matching `constraint` stanzas so that `dep` dependencies match go.sum. + +## v12.4.1 + +### Bug Fixes + +- Updated OpenCensus and OCAgent versions to versions that don't depend on v1.3+ of protobuf as it was breaking kubernetes. +- Pinned opencensus-proto to a version that's compatible with our versions of OpenCensus and OCAgent. + +## v12.4.0 + +### New Features + +- Added `autorest.WithPrepareDecorators` and `autorest.GetPrepareDecorators` for adding and retrieving a custom chain of PrepareDecorators to the provided context. + +## v12.3.0 + +### New Features + +- Support for multi-tenant via x-ms-authorization-auxiliary header has been added for client credentials with + secret scenario; this basically bundles multiple OAuthConfig and ServicePrincipalToken types into corresponding + MultiTenant* types along with a new authorizer that adds the primary and auxiliary token headers to the reqest. + The authenticaion helpers have been updated to support this scenario; if environment var AZURE_AUXILIARY_TENANT_IDS + is set with a semicolon delimited list of tenants the multi-tenant codepath will kick in to create the appropriate authorizer. + See `adal.NewMultiTenantOAuthConfig`, `adal.NewMultiTenantServicePrincipalToken` and `autorest.NewMultiTenantServicePrincipalTokenAuthorizer` + along with their supporting types and methods. +- Added `autorest.WithSendDecorators` and `autorest.GetSendDecorators` for adding and retrieving a custom chain of SendDecorators to the provided context. +- Added `autorest.DoRetryForStatusCodesWithCap` and `autorest.DelayForBackoffWithCap` to enforce an upper bound on the duration between retries. + +## v12.2.0 + +### New Features + +- Added `autorest.WithXML`, `autorest.AsMerge`, `autorest.WithBytes` preparer decorators. +- Added `autorest.ByUnmarshallingBytes` response decorator. +- Added `Response.IsHTTPStatus` and `Response.HasHTTPStatus` helper methods for inspecting HTTP status code in `autorest.Response` types. + +### Bug Fixes + +- `autorest.DelayWithRetryAfter` now supports HTTP-Dates in the `Retry-After` header and is not limited to just 429 status codes. + +## v12.1.0 + +### New Features + +- Added `to.ByteSlicePtr()`. +- Added blob/queue storage resource ID to `azure.ResourceIdentifier`. + +## v12.0.0 + +### Breaking Changes + +In preparation for modules the following deprecated content has been removed. + + - async.NewFuture() + - async.Future.Done() + - async.Future.WaitForCompletion() + - async.DoPollForAsynchronous() + - The `utils` package + - validation.NewErrorWithValidationError() + - The `version` package + +## v11.9.0 + +### New Features + +- Add `ResourceIdentifiers` field to `azure.Environment` containing resource IDs for public and sovereign clouds. + +## v11.8.0 + +### New Features + +- Added `autorest.NewClientWithOptions()` to support endpoints that require free renegotiation. + +## v11.7.1 + +### Bug Fixes + +- Fix missing support for http(s) proxy when using the default sender. + +## v11.7.0 + +### New Features + +- Added methods to obtain a ServicePrincipalToken on the various credential configuration types in the `auth` package. + +## v11.6.1 + +### Bug Fixes + +- Fix ACR DNS endpoint for government clouds. +- Add Cosmos DB DNS endpoints. +- Update dependencies to resolve build breaks in OpenCensus. + +## v11.6.0 + +### New Features + +- Added type `autorest.BasicAuthorizer` to support Basic authentication. + +## v11.5.2 + +### Bug Fixes + +- Fixed `GetTokenFromCLI` did not work with zsh. + +## v11.5.1 + +### Bug Fixes + +- In `Client.sender()` set the minimum TLS version on HTTP clients to 1.2. + +## v11.5.0 + +### New Features + +- The `auth` package has been refactored so that the environment and file settings are now available. +- The methods used in `auth.NewAuthorizerFromEnvironment()` are now exported so that custom authorization chains can be created. +- Added support for certificate authorization for file-based config. + +## v11.4.0 + +### New Features + +- Added `adal.AddToUserAgent()` so callers can append custom data to the user-agent header used for ADAL requests. +- Exported `adal.UserAgent()` for parity with `autorest.Client`. + +## v11.3.2 + +### Bug Fixes + +- In `Future.WaitForCompletionRef()` if the provided context has a deadline don't add the default deadline. + +## v11.3.1 + +### Bug Fixes + +- For an LRO PUT operation the final GET URL was incorrectly set to the Location polling header in some cases. + +## v11.3.0 + +### New Features + +- Added method `ServicePrincipalToken()` to `DeviceFlowConfig` type. + +## v11.2.8 + +### Bug Fixes + +- Deprecate content in the `version` package. The functionality has been superseded by content in the `autorest` package. + +## v11.2.7 + +### Bug Fixes + +- Fix environment variable name for enabling tracing from `AZURE_SDK_TRACING_ENABELD` to `AZURE_SDK_TRACING_ENABLED`. + Note that for backward compatibility reasons, both will work until the next major version release of the package. + +## v11.2.6 + +### Bug Fixes + +- If zero bytes are read from a polling response body don't attempt to unmarshal them. + +## v11.2.5 + +### Bug Fixes + +- Removed race condition in `autorest.DoRetryForStatusCodes`. + +## v11.2.4 + +### Bug Fixes + +- Function `cli.ProfilePath` now respects environment `AZURE_CONFIG_DIR` if available. + +## v11.2.1 + +NOTE: Versions of Go prior to 1.10 have been removed from CI as they no +longer work with golint. + +### Bug Fixes + +- Method `MSIConfig.Authorizer` now supports user-assigned identities. +- The adal package now reports its own user-agent string. + +## v11.2.0 + +### New Features + +- Added `tracing` package that enables instrumentation of HTTP and API calls. + Setting the env variable `AZURE_SDK_TRACING_ENABLED` or calling `tracing.Enable` + will start instrumenting the code for metrics and traces. + Additionally, setting the env variable `OCAGENT_TRACE_EXPORTER_ENDPOINT` or + calling `tracing.EnableWithAIForwarding` will start the instrumentation and connect to an + App Insights Local Forwarder that is needs to be running. Note that if the + AI Local Forwarder is not running tracking will still be enabled. + By default, instrumentation is disabled. Once enabled, instrumentation can also + be programatically disabled by calling `Disable`. +- Added `DoneWithContext` call for checking LRO status. `Done` has been deprecated. + +### Bug Fixes + +- Don't use the initial request's context for LRO polling. +- Don't override the `refreshLock` and the `http.Client` when unmarshalling `ServicePrincipalToken` if + it is already set. + +## v11.1.1 + +### Bug Fixes + +- When creating a future always include the polling tracker even if there's a failure; this allows the underlying response to be obtained by the caller. + +## v11.1.0 + +### New Features + +- Added `auth.NewAuthorizerFromCLI` to create an authorizer configured from the Azure 2.0 CLI. +- Added `adal.NewOAuthConfigWithAPIVersion` to create an OAuthConfig with the specified API version. + +## v11.0.1 + +### New Features + +- Added `x5c` header to client assertion for certificate Issuer+Subject Name authentication. + +## v11.0.0 + +### Breaking Changes + +- To handle differences between ADFS and AAD the following fields have had their types changed from `string` to `json.Number` + - ExpiresIn + - ExpiresOn + - NotBefore + +### New Features + +- Added `auth.NewAuthorizerFromFileWithResource` to create an authorizer from the config file with the specified resource. +- Setting a client's `PollingDuration` to zero will use the provided context to control a LRO's polling duration. + +## v10.15.5 + +### Bug Fixes + +- In `DoRetryForStatusCodes`, if a request's context is cancelled return the last response. + +## v10.15.4 + +### Bug Fixes + +- If a polling operation returns a failure status code return the associated error. + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a `Failed` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's `AdditionalInfo` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting `AZURE_GO_SDK_LOG_LEVEL` to `LogInfo` will log request/response + without their bodies. To include the bodies set the log level to `LogDebug`. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in `AZURE_GO_SDK_LOG_FILE`. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will _not_ be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +| -------------------------: | :---------------------------: | +| azure.NewFuture() | azure.NewFutureFromResponse() | +| Future.WaitForCompletion() | Future.WaitForCompletionRef() | + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added \*WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features + +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features + +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 + +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes + +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil \*http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + +- RetriableRequest can now tolerate a ReadSeekable body being read but not reset. +- Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 + +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 + +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 + +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 + +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 + +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 + +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 + +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 + +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 + +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 + +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 + +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 + +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 + +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 + +- Better error messages for long running operation failures + +## v7.0.3 + +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 + +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 + +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 + +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 + +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 + +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 + +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 + +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 + +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 + +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 00000000000..a434e73ac49 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 00000000000..dc6e3e633e6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,324 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:892e39e5c083d0943f1e80ab8351690f183c6a5ab24e1d280adcad424c26255e" + name = "contrib.go.opencensus.io/exporter/ocagent" + packages = ["."] + pruneopts = "UT" + revision = "a8a6f458bbc1d5042322ad1f9b65eeb0b69be9ea" + version = "v0.6.0" + +[[projects]] + digest = "1:8f5acd4d4462b5136af644d25101f0968a7a94ee90fcb2059cec5b7cc42e0b20" + name = "github.com/census-instrumentation/opencensus-proto" + packages = [ + "gen-go/agent/common/v1", + "gen-go/agent/metrics/v1", + "gen-go/agent/trace/v1", + "gen-go/metrics/v1", + "gen-go/resource/v1", + "gen-go/trace/v1", + ] + pruneopts = "UT" + revision = "d89fa54de508111353cb0b06403c00569be780d8" + version = "v0.2.1" + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:76dc72490af7174349349838f2fe118996381b31ea83243812a97e5a0fd5ed55" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "UT" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + digest = "1:cf0d2e435fd4ce45b789e93ef24b5f08e86be0e9807a16beb3694e2d8c9af965" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "UT" + revision = "d2133a1ce379ef6fa992b0514a77146c60db9d1c" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8" + name = "github.com/golang/groupcache" + packages = ["lru"] + pruneopts = "UT" + revision = "611e8accdfc92c4187d399e95ce826046d4c8d73" + +[[projects]] + digest = "1:e3839df32927e8d3403cd5aa7253d966e8ff80fc8f10e2e35d146461cd83fcfa" + name = "github.com/golang/protobuf" + packages = [ + "descriptor", + "jsonpb", + "proto", + "protoc-gen-go/descriptor", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/struct", + "ptypes/timestamp", + "ptypes/wrappers", + ] + pruneopts = "UT" + revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7" + version = "v1.3.2" + +[[projects]] + digest = "1:c560cd79300fac84f124b96225181a637a70b60155919a3c36db50b7cca6b806" + name = "github.com/grpc-ecosystem/grpc-gateway" + packages = [ + "internal", + "runtime", + "utilities", + ] + pruneopts = "UT" + revision = "f7120437bb4f6c71f7f5076ad65a45310de2c009" + version = "v1.12.1" + +[[projects]] + digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "UT" + revision = "af06845cf3004701891bf4fdb884bfe4920b3727" + version = "v1.1.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:99d32780e5238c2621fff621123997c3e3cca96db8be13179013aea77dfab551" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "UT" + revision = "221dbe5ed46703ee255b1da0dec05086f5035f62" + version = "v1.4.0" + +[[projects]] + digest = "1:7c5e00383399fe13de0b4b65c9fdde16275407ce8ac02d867eafeaa916edcc71" + name = "go.opencensus.io" + packages = [ + ".", + "internal", + "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", + "plugin/ocgrpc", + "plugin/ochttp", + "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", + "stats", + "stats/internal", + "stats/view", + "tag", + "trace", + "trace/internal", + "trace/propagation", + "trace/tracestate", + ] + pruneopts = "UT" + revision = "aad2c527c5defcf89b5afab7f37274304195a6b2" + version = "v0.22.2" + +[[projects]] + branch = "master" + digest = "1:f604f5e2ee721b6757d962dfe7bab4f28aae50c456e39cfb2f3819762a44a6ae" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "UT" + revision = "e9b2fee46413994441b28dfca259d911d963dfed" + +[[projects]] + branch = "master" + digest = "1:334b27eac455cb6567ea28cd424230b07b1a64334a2f861a8075ac26ce10af43" + name = "golang.org/x/lint" + packages = [ + ".", + "golint", + ] + pruneopts = "UT" + revision = "fdd1cda4f05fd1fd86124f0ef9ce31a0b72c8448" + +[[projects]] + branch = "master" + digest = "1:257a75d024975428ab9192bfc334c3490882f8cb21322ea5784ca8eca000a910" + name = "golang.org/x/net" + packages = [ + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "1ddd1de85cb0337b623b740a609d35817d516a8d" + +[[projects]] + branch = "master" + digest = "1:382bb5a7fb4034db3b6a2d19e5a4a6bcf52f4750530603c01ca18a172fa3089b" + name = "golang.org/x/sync" + packages = ["semaphore"] + pruneopts = "UT" + revision = "cd5d95a43a6e21273425c7ae415d3df9ea832eeb" + +[[projects]] + branch = "master" + digest = "1:4da420ceda5f68e8d748aa2169d0ed44ffadb1bbd6537cf778a49563104189b8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "ce4227a45e2eb77e5c847278dcc6a626742e2945" + +[[projects]] + digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/language", + "internal/language/compact", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" + +[[projects]] + branch = "master" + digest = "1:4eb5ea8395fb60212dd58b92c9db80bab59d5e99c7435f9a6a0a528c373b60e7" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "go/gcexportdata", + "go/internal/gcimporter", + "go/types/typeutil", + ] + pruneopts = "UT" + revision = "259af5ff87bdcd4abf2ecda8edc3f13f04f26a42" + +[[projects]] + digest = "1:964bb30febc27fabfbec4759fa530c6ec35e77a7c85fed90b9317ea39a054877" + name = "google.golang.org/api" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "8a410c21381766a810817fd6200fce8838ecb277" + version = "v0.14.0" + +[[projects]] + branch = "master" + digest = "1:a8d5c2c6e746b3485e36908ab2a9e3d77b86b81f8156d88403c7d2b462431dfd" + name = "google.golang.org/genproto" + packages = [ + "googleapis/api/httpbody", + "googleapis/rpc/status", + "protobuf/field_mask", + ] + pruneopts = "UT" + revision = "51378566eb590fa106d1025ea12835a4416dda84" + +[[projects]] + digest = "1:b59ce3ddb11daeeccccc9cb3183b58ebf8e9a779f1c853308cd91612e817a301" + name = "google.golang.org/grpc" + packages = [ + ".", + "backoff", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/balancerload", + "internal/binarylog", + "internal/buffer", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/resolver/dns", + "internal/resolver/passthrough", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "serviceconfig", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "1a3960e4bd028ac0cec0a2afd27d7d8e67c11514" + version = "v1.25.1" + +[[projects]] + digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" + version = "v2.2.7" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "contrib.go.opencensus.io/exporter/ocagent", + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "go.opencensus.io/plugin/ochttp", + "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "go.opencensus.io/stats/view", + "go.opencensus.io/trace", + "golang.org/x/crypto/pkcs12", + "golang.org/x/lint/golint", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 00000000000..1fc28659696 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,59 @@ +# Gopkg.toml example +# +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + +required = ["golang.org/x/lint/golint"] + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + name = "contrib.go.opencensus.io/exporter/ocagent" + version = "0.6.0" + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.2.0" + +[[constraint]] + name = "github.com/dimchansky/utfbom" + version = "1.1.0" + +[[constraint]] + name = "github.com/mitchellh/go-homedir" + version = "1.1.0" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.3.0" + +[[constraint]] + name = "go.opencensus.io" + version = "0.22.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 00000000000..de1e19a44df --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,165 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/Azure.go-autorest?branchName=master)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=625&branchName=master) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +### Using with Go Modules +In [v12.0.1](https://github.com/Azure/go-autorest/pull/386), this repository introduced the following modules. + +- autorest/adal +- autorest/azure/auth +- autorest/azure/cli +- autorest/date +- autorest/mocks +- autorest/to +- autorest/validation +- autorest +- logger +- tracing + +Tagging cumulative SDK releases as a whole (e.g. `v12.3.0`) is still enabled to support consumers of this repo that have not yet migrated to modules. + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 00000000000..b11eb07884b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,294 @@ +# NOTE: This module will go out of support by March 31, 2023. For authenticating with Azure AD, use module [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) instead. For help migrating from `adal` to `azidentiy` please consult the [migration guide](https://aka.ms/azsdk/go/identity/migration). General information about the retirement of this and other legacy modules can be found [here](https://azure.microsoft.com/updates/support-for-azure-sdk-libraries-that-do-not-conform-to-our-current-azure-sdk-guidelines-will-be-retired-as-of-31-march-2023/). + +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + *oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + *oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + *oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + *oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 00000000000..fa5964742fc --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 00000000000..9daa4b58b88 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,273 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +// Deprecated: use InitiateDeviceAuthWithContext() instead. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + return InitiateDeviceAuthWithContext(context.Background(), sender, oauthConfig, clientID, resource) +} + +// InitiateDeviceAuthWithContext initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuthWithContext(ctx context.Context, sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +// Deprecated: use CheckForUserCompletionWithContext() instead. +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return CheckForUserCompletionWithContext(context.Background(), sender, code) +} + +// CheckForUserCompletionWithContext takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + // return a more meaningful error message if available + if token.ErrorDescription != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, *token.Error, *token.ErrorDescription) + } + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +// Deprecated: use WaitForUserCompletionWithContext() instead. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + return WaitForUserCompletionWithContext(context.Background(), sender, code) +} + +// WaitForUserCompletionWithContext calls CheckForUserCompletion repeatedly until a token is granted or an error +// state occurs. This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletionWithContext(ctx context.Context, sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletionWithContext(ctx, sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + select { + case <-time.After(waitDuration): + // noop + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go new file mode 100644 index 00000000000..647a61bb8c9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go_mod_tidy_hack.go @@ -0,0 +1,25 @@ +//go:build modhack +// +build modhack + +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 00000000000..2a974a39b3c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,135 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "golang.org/x/crypto/pkcs12" +) + +var ( + // ErrMissingCertificate is returned when no local certificate is found in the provided PFX data. + ErrMissingCertificate = errors.New("adal: certificate missing") + + // ErrMissingPrivateKey is returned when no private key is found in the provided PFX data. + ErrMissingPrivateKey = errors.New("adal: private key missing") +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} + +// DecodePfxCertificateData extracts the x509 certificate and RSA private key from the provided PFX data. +// The PFX data must contain a private key along with a certificate whose public key matches that of the +// private key or an error is returned. +// If the private key is not password protected pass the empty string for password. +func DecodePfxCertificateData(pfxData []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + blocks, err := pkcs12.ToPEM(pfxData, password) + if err != nil { + return nil, nil, err + } + // first extract the private key + var priv *rsa.PrivateKey + for _, block := range blocks { + if block.Type == "PRIVATE KEY" { + priv, err = x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, nil, err + } + break + } + } + if priv == nil { + return nil, nil, ErrMissingPrivateKey + } + // now find the certificate with the matching public key of our private key + var cert *x509.Certificate + for _, block := range blocks { + if block.Type == "CERTIFICATE" { + pcert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, nil, err + } + certKey, ok := pcert.PublicKey.(*rsa.PublicKey) + if !ok { + // keep looking + continue + } + if priv.E == certKey.E && priv.N.Cmp(certKey.N) == 0 { + // found a match + cert = pcert + break + } + } + } + if cert == nil { + return nil, nil, ErrMissingCertificate + } + return cert, priv, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 00000000000..eb649bce9f7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,101 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/tls" + "net" + "net/http" + "net/http/cookiejar" + "sync" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// DO NOT ACCESS THIS DIRECTLY. go through sender() +var defaultSender Sender +var defaultSenderInit = &sync.Once{} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(sender(), decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +func sender() Sender { + // note that we can't init defaultSender in init() since it will + // execute before calling code has had a chance to enable tracing + defaultSenderInit.Do(func() { + // copied from http.DefaultTransport with a TLS minimum version. + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + var roundTripper http.RoundTripper = transport + if tracing.IsEnabled() { + roundTripper = tracing.NewTransport(transport) + } + j, _ := cookiejar.New(nil) + defaultSender = &http.Client{Jar: j, Transport: roundTripper} + }) + return defaultSender +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 00000000000..2a24ab80cf1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1430 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/logger" + "github.com/golang-jwt/jwt/v4" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the API version to use for the MSI endpoint + msiAPIVersion = "2018-02-01" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 + + // asMSIEndpointEnv is the environment variable used to store the endpoint on App Service and Functions + msiEndpointEnv = "MSI_ENDPOINT" + + // asMSISecretEnv is the environment variable used to store the request secret on App Service and Functions + msiSecretEnv = "MSI_SECRET" + + // the API version to use for the legacy App Service MSI endpoint + appServiceAPIVersion2017 = "2017-09-01" + + // secret header used when authenticating against app service MSI endpoint + secretHeader = "Secret" + + // the format for expires_on in UTC with AM/PM + expiresOnDateFormatPM = "1/2/2006 15:04:05 PM +00:00" + + // the format for expires_on in UTC without AM/PM + expiresOnDateFormat = "1/2/2006 15:04:05 +00:00" +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// TokenRefresh is a type representing a custom callback to refresh a token +type TokenRefresh func(ctx context.Context, resource string) (*Token, error) + +// JWTCallback is the type representing callback that will be called to get the federated OIDC JWT +type JWTCallback func() (string, error) + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +// OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(24 * time.Hour).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { + msiType msiType + clientResourceID string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalFederatedSecret implements ServicePrincipalSecret for Federated JWTs. +type ServicePrincipalFederatedSecret struct { + jwtCallback JWTCallback +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during OAuth Token Acquisition using a JWT signed by an OIDC issuer. +func (secret *ServicePrincipalFederatedSecret) SetAuthenticationValues(_ *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.jwtCallback() + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalFederatedSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalFederatedSecret is not supported") +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + customRefreshFunc TokenRefresh + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + // Settings this to a value less than 1 will use the default value. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// SetCustomRefreshFunc sets a custom refresh function used to refresh the token. +func (spt *ServicePrincipalToken) SetCustomRefreshFunc(customRefreshFunc TokenRefresh) { + spt.customRefreshFunc = customRefreshFunc +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + case "ServicePrincipalFederatedSecret": + return errors.New("unmarshalling ServicePrincipalFederatedSecret is not supported") + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = sender() + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromFederatedToken creates a ServicePrincipalToken from the supplied federated OIDC JWT. +// +// Deprecated: Use NewServicePrincipalTokenFromFederatedTokenWithCallback to refresh jwt dynamically. +func NewServicePrincipalTokenFromFederatedToken(oauthConfig OAuthConfig, clientID string, jwt string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if jwt == "" { + return nil, fmt.Errorf("parameter 'jwt' cannot be empty") + } + return NewServicePrincipalTokenFromFederatedTokenCallback( + oauthConfig, + clientID, + func() (string, error) { + return jwt, nil + }, + resource, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromFederatedTokenCallback creates a ServicePrincipalToken from the supplied federated OIDC JWTCallback. +func NewServicePrincipalTokenFromFederatedTokenCallback(oauthConfig OAuthConfig, clientID string, jwtCallback JWTCallback, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if jwtCallback == nil { + return nil, fmt.Errorf("parameter 'jwtCallback' cannot be empty") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalFederatedSecret{ + jwtCallback: jwtCallback, + }, + callbacks..., + ) +} + +type msiType int + +const ( + msiTypeUnavailable msiType = iota + msiTypeAppServiceV20170901 + msiTypeCloudShell + msiTypeIMDS +) + +func (m msiType) String() string { + switch m { + case msiTypeAppServiceV20170901: + return "AppServiceV20170901" + case msiTypeCloudShell: + return "CloudShell" + case msiTypeIMDS: + return "IMDS" + default: + return fmt.Sprintf("unhandled MSI type %d", m) + } +} + +// returns the MSI type and endpoint, or an error +func getMSIType() (msiType, string, error) { + if endpointEnvVar := os.Getenv(msiEndpointEnv); endpointEnvVar != "" { + // if the env var MSI_ENDPOINT is set + if secretEnvVar := os.Getenv(msiSecretEnv); secretEnvVar != "" { + // if BOTH the env vars MSI_ENDPOINT and MSI_SECRET are set the msiType is AppService + return msiTypeAppServiceV20170901, endpointEnvVar, nil + } + // if ONLY the env var MSI_ENDPOINT is set the msiType is CloudShell + return msiTypeCloudShell, endpointEnvVar, nil + } + // if MSI_ENDPOINT is NOT set assume the msiType is IMDS + return msiTypeIMDS, msiEndpoint, nil +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +// NOTE: this always returns the IMDS endpoint, it does not work for app services or cloud shell. +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// GetMSIAppServiceEndpoint get the MSI endpoint for App Service and Functions. +// It will return an error when not running in an app service/functions environment. +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIAppServiceEndpoint() (string, error) { + msiType, endpoint, err := getMSIType() + if err != nil { + return "", err + } + switch msiType { + case msiTypeAppServiceV20170901: + return endpoint, nil + default: + return "", fmt.Errorf("%s is not app service environment", msiType) + } +} + +// GetMSIEndpoint get the appropriate MSI endpoint depending on the runtime environment +// Deprecated: NewServicePrincipalTokenFromMSI() and variants will automatically detect the endpoint. +func GetMSIEndpoint() (string, error) { + _, endpoint, err := getMSIType() + return endpoint, err +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", "", callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the clientID of specified user assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, "", callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithIdentityResourceID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the azure resource id of user assigned identity when creating the token. +// msiEndpoint - empty string, or pass a non-empty string to override the default value. +// Deprecated: use NewServicePrincipalTokenFromManagedIdentity() instead. +func NewServicePrincipalTokenFromMSIWithIdentityResourceID(msiEndpoint, resource string, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(identityResourceID, "identityResourceID"); err != nil { + return nil, err + } + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, "", identityResourceID, callbacks...) +} + +// ManagedIdentityOptions contains optional values for configuring managed identity authentication. +type ManagedIdentityOptions struct { + // ClientID is the user-assigned identity to use during authentication. + // It is mutually exclusive with IdentityResourceID. + ClientID string + + // IdentityResourceID is the resource ID of the user-assigned identity to use during authentication. + // It is mutually exclusive with ClientID. + IdentityResourceID string +} + +// NewServicePrincipalTokenFromManagedIdentity creates a ServicePrincipalToken using a managed identity. +// It supports the following managed identity environments. +// - App Service Environment (API version 2017-09-01 only) +// - Cloud shell +// - IMDS with a system or user assigned identity +func NewServicePrincipalTokenFromManagedIdentity(resource string, options *ManagedIdentityOptions, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if options == nil { + options = &ManagedIdentityOptions{} + } + return newServicePrincipalTokenFromMSI("", resource, options.ClientID, options.IdentityResourceID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource, userAssignedID, identityResourceID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != "" && identityResourceID != "" { + return nil, errors.New("cannot specify userAssignedID and identityResourceID") + } + msiType, endpoint, err := getMSIType() + if err != nil { + logger.Instance.Writef(logger.LogError, "Error determining managed identity environment: %v\n", err) + return nil, err + } + logger.Instance.Writef(logger.LogInfo, "Managed identity environment is %s, endpoint is %s\n", msiType, endpoint) + if msiEndpoint != "" { + endpoint = msiEndpoint + logger.Instance.Writef(logger.LogInfo, "Managed identity custom endpoint is %s\n", endpoint) + } + msiEndpointURL, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + // cloud shell sends its data in the request body + if msiType != msiTypeCloudShell { + v := url.Values{} + v.Set("resource", resource) + clientIDParam := "client_id" + switch msiType { + case msiTypeAppServiceV20170901: + clientIDParam = "clientid" + v.Set("api-version", appServiceAPIVersion2017) + break + case msiTypeIMDS: + v.Set("api-version", msiAPIVersion) + } + if userAssignedID != "" { + v.Set(clientIDParam, userAssignedID) + } else if identityResourceID != "" { + v.Set("mi_res_id", identityResourceID) + } + msiEndpointURL.RawQuery = v.Encode() + } + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{ + msiType: msiType, + clientResourceID: identityResourceID, + }, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + ClientID: userAssignedID, + }, + refreshLock: &sync.RWMutex{}, + sender: sender(), + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + // must take the read lock when initially checking the token's expiration + if spt.inner.AutoRefresh && spt.Token().WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check again to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is safe for concurrent use. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + if spt.customRefreshFunc != nil { + token, err := spt.customRefreshFunc(ctx, resource) + if err != nil { + return err + } + spt.inner.Token = *token + return spt.InvokeRefreshCallbacks(spt.inner.Token) + } + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + req = req.WithContext(ctx) + var resp *http.Response + authBodyFilter := func(b []byte) []byte { + if logger.Level() != logger.LogAuth { + return []byte("**REDACTED** authentication body") + } + return b + } + if msiSecret, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + switch msiSecret.msiType { + case msiTypeAppServiceV20170901: + req.Method = http.MethodGet + req.Header.Set("secret", os.Getenv(msiSecretEnv)) + break + case msiTypeCloudShell: + req.Header.Set("Metadata", "true") + data := url.Values{} + data.Set("resource", spt.inner.Resource) + if spt.inner.ClientID != "" { + data.Set("client_id", spt.inner.ClientID) + } else if msiSecret.clientResourceID != "" { + data.Set("msi_res_id", msiSecret.clientResourceID) + } + req.Body = ioutil.NopCloser(strings.NewReader(data.Encode())) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + break + case msiTypeIMDS: + req.Method = http.MethodGet + req.Header.Set("Metadata", "true") + break + } + logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + logger.Instance.WriteRequest(req, logger.Filter{Body: authBodyFilter}) + resp, err = spt.sender.Do(req) + } + + // don't return a TokenRefreshError here; this will allow retry logic to apply + if err != nil { + return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) + } else if resp == nil { + return fmt.Errorf("adal: received nil response and error") + } + + logger.Instance.WriteResponse(resp, logger.Filter{Body: authBodyFilter}) + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v Endpoint %s", resp.StatusCode, err, req.URL.String()), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s Endpoint %s", resp.StatusCode, string(rb), req.URL.String()), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + token := struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + // AAD returns expires_in as a string, ADFS returns it as an int + ExpiresIn json.Number `json:"expires_in"` + // expires_on can be in three formats, a UTC time stamp, or the number of seconds as a string *or* int. + ExpiresOn interface{} `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` + }{} + // return a TokenRefreshError in the follow error cases as the token is in an unexpected format + err = json.Unmarshal(rb, &token) + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)), resp) + } + expiresOn := json.Number("") + // ADFS doesn't include the expires_on field + if token.ExpiresOn != nil { + if expiresOn, err = parseExpiresOn(token.ExpiresOn); err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: failed to parse expires_on: %v value '%s'", err, token.ExpiresOn), resp) + } + } + spt.inner.Token.AccessToken = token.AccessToken + spt.inner.Token.RefreshToken = token.RefreshToken + spt.inner.Token.ExpiresIn = token.ExpiresIn + spt.inner.Token.ExpiresOn = expiresOn + spt.inner.Token.NotBefore = token.NotBefore + spt.inner.Token.Resource = token.Resource + spt.inner.Token.Type = token.Type + + return spt.InvokeRefreshCallbacks(spt.inner.Token) +} + +// converts expires_on to the number of seconds +func parseExpiresOn(s interface{}) (json.Number, error) { + // the JSON unmarshaler treats JSON numbers unmarshaled into an interface{} as float64 + asFloat64, ok := s.(float64) + if ok { + // this is the number of seconds as int case + return json.Number(strconv.FormatInt(int64(asFloat64), 10)), nil + } + asStr, ok := s.(string) + if !ok { + return "", fmt.Errorf("unexpected expires_on type %T", s) + } + // convert the expiration date to the number of seconds from the unix epoch + timeToDuration := func(t time.Time) json.Number { + return json.Number(strconv.FormatInt(t.UTC().Unix(), 10)) + } + if _, err := json.Number(asStr).Int64(); err == nil { + // this is the number of seconds case, no conversion required + return json.Number(asStr), nil + } else if eo, err := time.Parse(expiresOnDateFormatPM, asStr); err == nil { + return timeToDuration(eo), nil + } else if eo, err := time.Parse(expiresOnDateFormat, asStr); err == nil { + return timeToDuration(eo), nil + } else { + // unknown format + return json.Number(""), err + } +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + // maxAttempts is user-specified, ensure that its value is greater than zero else no request will be made + if maxAttempts < 1 { + maxAttempts = defaultMaxMSIRefreshAttempts + } + + for attempt < maxAttempts { + if resp != nil && resp.Body != nil { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } + resp, err = sender.Do(req) + // we want to retry if err is not nil or the status code is in the list of retry codes + if err == nil && !responseHasStatusCode(resp, retries...) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +func responseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp != nil { + for _, i := range codes { + if i == resp.StatusCode { + return true + } + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// NewMultiTenantServicePrincipalTokenFromCertificate creates a new MultiTenantServicePrincipalToken with the specified certificate credentials and resource. +func NewMultiTenantServicePrincipalTokenFromCertificate(multiTenantCfg MultiTenantOAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalTokenWithSecret( + *multiTenantCfg.PrimaryTenant(), + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalTokenWithSecret( + *auxTenants[i], + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + ) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} + +// MSIAvailable returns true if the MSI endpoint is available for authentication. +func MSIAvailable(ctx context.Context, s Sender) bool { + msiType, _, err := getMSIType() + + if err != nil { + return false + } + + if msiType != msiTypeIMDS { + return true + } + + if s == nil { + s = sender() + } + + resp, err := getMSIEndpoint(ctx, s) + + if err == nil { + resp.Body.Close() + } + + return err == nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go new file mode 100644 index 00000000000..89190a4213c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_1.13.go @@ -0,0 +1,76 @@ +//go:build go1.13 +// +build go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "fmt" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + // http.NewRequestWithContext() was added in Go 1.13 + req, _ := http.NewRequestWithContext(tempCtx, http.MethodGet, msiEndpoint, nil) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh primary token: %w", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %w", err) + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go new file mode 100644 index 00000000000..27ec4efad7c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_legacy.go @@ -0,0 +1,75 @@ +//go:build !go1.13 +// +build !go1.13 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adal + +import ( + "context" + "net/http" + "time" +) + +func getMSIEndpoint(ctx context.Context, sender Sender) (*http.Response, error) { + tempCtx, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + req, _ := http.NewRequest(http.MethodGet, msiEndpoint, nil) + req = req.WithContext(tempCtx) + q := req.URL.Query() + q.Add("api-version", msiAPIVersion) + req.URL.RawQuery = q.Encode() + return sender.Do(req) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +func (mt *MultiTenantServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.RefreshWithContext(ctx); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshWithContext(ctx); err != nil { + return err + } + } + return nil +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +func (mt *MultiTenantServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + if err := mt.PrimaryToken.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.RefreshExchangeWithContext(ctx, resource); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 00000000000..c867b348439 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 00000000000..c4571065685 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go new file mode 100644 index 00000000000..4e054320717 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 00000000000..b453fad0491 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 00000000000..48fb39ba9b9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 00000000000..7073959b2a9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 00000000000..12addf0ebb4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/azure-pipelines.yml b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml new file mode 100644 index 00000000000..6fb8404fd01 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/azure-pipelines.yml @@ -0,0 +1,105 @@ +variables: + GOPATH: '$(system.defaultWorkingDirectory)/work' + sdkPath: '$(GOPATH)/src/github.com/$(build.repository.name)' + +jobs: + - job: 'goautorest' + displayName: 'Run go-autorest CI Checks' + + strategy: + matrix: + Linux_Go113: + vm.image: 'ubuntu-18.04' + go.version: '1.13' + Linux_Go114: + vm.image: 'ubuntu-18.04' + go.version: '1.14' + + pool: + vmImage: '$(vm.image)' + + steps: + - task: GoTool@0 + inputs: + version: '$(go.version)' + displayName: "Select Go Version" + + - script: | + set -e + mkdir -p '$(GOPATH)/bin' + mkdir -p '$(sdkPath)' + shopt -s extglob + mv !(work) '$(sdkPath)' + echo '##vso[task.prependpath]$(GOPATH)/bin' + displayName: 'Create Go Workspace' + + - script: | + set -e + curl -sSL https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure -v + go install ./vendor/golang.org/x/lint/golint + go get github.com/jstemmer/go-junit-report + go get github.com/axw/gocov/gocov + go get github.com/AlekSi/gocov-xml + go get -u github.com/matm/gocov-html + workingDirectory: '$(sdkPath)' + displayName: 'Install Dependencies' + + - script: | + go vet ./autorest/... + go vet ./logger/... + go vet ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Vet' + + - script: | + go build -v ./autorest/... + go build -v ./logger/... + go build -v ./tracing/... + workingDirectory: '$(sdkPath)' + displayName: 'Build' + + - script: | + set -e + go test -race -v -coverprofile=coverage.txt -covermode atomic ./autorest/... ./logger/... ./tracing/... 2>&1 | go-junit-report > report.xml + gocov convert coverage.txt > coverage.json + gocov-xml < coverage.json > coverage.xml + gocov-html < coverage.json > coverage.html + workingDirectory: '$(sdkPath)' + displayName: 'Run Tests' + + - script: grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Copyright Header Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + gofmt -s -l -w ./autorest/. >&2 + gofmt -s -l -w ./logger/. >&2 + gofmt -s -l -w ./tracing/. >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Format Check' + failOnStderr: true + condition: succeededOrFailed() + + - script: | + golint ./autorest/... >&2 + golint ./logger/... >&2 + golint ./tracing/... >&2 + workingDirectory: '$(sdkPath)' + displayName: 'Linter Check' + failOnStderr: true + condition: succeededOrFailed() + + - task: PublishTestResults@2 + inputs: + testRunner: JUnit + testResultsFiles: $(sdkPath)/report.xml + failTaskOnFailedTests: true + + - task: PublishCodeCoverageResults@1 + inputs: + codeCoverageTool: Cobertura + summaryFileLocation: $(sdkPath)/coverage.xml + additionalCodeCoverageFiles: $(sdkPath)/coverage.html diff --git a/vendor/github.com/Azure/go-autorest/doc.go b/vendor/github.com/Azure/go-autorest/doc.go new file mode 100644 index 00000000000..99ae6ca988a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/doc.go @@ -0,0 +1,18 @@ +/* +Package go-autorest provides an HTTP request client for use with Autorest-generated API client packages. +*/ +package go_autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/LICENSE b/vendor/github.com/Azure/go-autorest/logger/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go new file mode 100644 index 00000000000..0aa27680db9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 00000000000..2f5d8cc1a19 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,337 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug + + // LogAuth is a special case of LogDebug, it tells a logger to also log the body of an authentication request and response. + // NOTE: this can disclose sensitive information, use with care. + LogAuth +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logAuth = "AUTH" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + case logAuth: + lt = LogAuth + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + case LogAuth: + return logAuth + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/tracing/LICENSE b/vendor/github.com/Azure/go-autorest/tracing/LICENSE new file mode 100644 index 00000000000..b9d6a27ea92 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go new file mode 100644 index 00000000000..e163975cd4e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go_mod_tidy_hack.go @@ -0,0 +1,24 @@ +// +build modhack + +package tracing + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the github.com/Azure/go-autorest import, won't actually become part of +// the resultant binary. + +// Necessary for safely adding multi-module repo. +// See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "github.com/Azure/go-autorest" diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 00000000000..0e7a6e96254 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,67 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" +) + +// Tracer represents an HTTP tracing facility. +type Tracer interface { + NewTransport(base *http.Transport) http.RoundTripper + StartSpan(ctx context.Context, name string) context.Context + EndSpan(ctx context.Context, httpStatusCode int, err error) +} + +var ( + tracer Tracer +) + +// Register will register the provided Tracer. Pass nil to unregister a Tracer. +func Register(t Tracer) { + tracer = t +} + +// IsEnabled returns true if a Tracer has been registered. +func IsEnabled() bool { + return tracer != nil +} + +// NewTransport creates a new instrumenting http.RoundTripper for the +// registered Tracer. If no Tracer has been registered it returns nil. +func NewTransport(base *http.Transport) http.RoundTripper { + if tracer != nil { + return tracer.NewTransport(base) + } + return nil +} + +// StartSpan starts a trace span with the specified name, associating it with the +// provided context. Has no effect if a Tracer has not been registered. +func StartSpan(ctx context.Context, name string) context.Context { + if tracer != nil { + return tracer.StartSpan(ctx, name) + } + return ctx +} + +// EndSpan ends a previously started span stored in the context. +// Has no effect if a Tracer has not been registered. +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + if tracer != nil { + tracer.EndSpan(ctx, httpStatusCode, err) + } +} diff --git a/vendor/github.com/Unknwon/goconfig/.gitignore b/vendor/github.com/Unknwon/goconfig/.gitignore new file mode 100644 index 00000000000..c81d5b37479 --- /dev/null +++ b/vendor/github.com/Unknwon/goconfig/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +*.iml +.idea \ No newline at end of file diff --git a/vendor/github.com/Unknwon/goconfig/LICENSE b/vendor/github.com/Unknwon/goconfig/LICENSE new file mode 100644 index 00000000000..8405e89a0b1 --- /dev/null +++ b/vendor/github.com/Unknwon/goconfig/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/Unknwon/goconfig/README.md b/vendor/github.com/Unknwon/goconfig/README.md new file mode 100644 index 00000000000..3a2218cb076 --- /dev/null +++ b/vendor/github.com/Unknwon/goconfig/README.md @@ -0,0 +1,68 @@ +goconfig [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/goconfig) +======== + +[中文文档](README_ZH.md) + +**IMPORTANT** + +- This library is under bug fix only mode, which means no more features will be added. +- I'm continuing working on better Go code with a different library: [ini](https://github.com/go-ini/ini). + +## About + +Package goconfig is a easy-use, comments-support configuration file parser for the Go Programming Language, which provides a structure similar to what you would find on Microsoft Windows INI files. + +The configuration file consists of sections, led by a `[section]` header and followed by `name:value` or `name=value` entries. Note that leading whitespace is removed from values. The optional values can contain format strings which refer to other values in the same section, or values in a special DEFAULT section. Comments are indicated by ";" or "#"; comments may begin anywhere on a single line. + +## Features + +- It simplified operation processes, easy to use and undersatnd; therefore, there are less chances to have errors. +- It uses exactly the same way to access a configuration file as you use Windows APIs, so you don't need to change your code style. +- It supports read recursion sections. +- It supports auto increment of key. +- It supports **READ** and **WRITE** configuration file with comments each section or key which all the other parsers don't support!!!!!!! +- It supports get value through type bool, float64, int, int64 and string, methods that start with "Must" means ignore errors and get zero-value if error occurs, or you can specify a default value. +- It's able to load multiple files to overwrite key values. + +## Installation + + go get github.com/unknwon/goconfig + +## API Documentation + +[Go Walker](http://gowalker.org/github.com/unknwon/goconfig). + +## Example + +Please see [conf.ini](testdata/conf.ini) as an example. + +### Usage + +- Function `LoadConfigFile` load file(s) depends on your situation, and return a variable with type `ConfigFile`. +- `GetValue` gives basic functionality of getting a value of given section and key. +- Methods like `Bool`, `Int`, `Int64` return corresponding type of values. +- Methods start with `Must` return corresponding type of values and returns zero-value of given type if something goes wrong. +- `SetValue` sets value to given section and key, and inserts somewhere if it does not exist. +- `DeleteKey` deletes by given section and key. +- Finally, `SaveConfigFile` saves your configuration to local file system. +- Use method `Reload` in case someone else modified your file(s). +- Methods contains `Comment` help you manipulate comments. +- `LoadFromReader` allows loading data without an intermediate file. +- `SaveConfigData` added, which writes configuration to an arbitrary writer. +- `ReloadData` allows to reload data from memory. + +Note that you cannot mix in-memory configuration with on-disk configuration. + +## More Information + +- All characters are CASE SENSITIVE, BE CAREFUL! + +## Credits + +- [goconf](http://code.google.com/p/goconf/) +- [robfig/config](https://github.com/robfig/config) +- [Delete an item from a slice](https://groups.google.com/forum/?fromgroups=#!topic/golang-nuts/lYz8ftASMQ0) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/Unknwon/goconfig/README_ZH.md b/vendor/github.com/Unknwon/goconfig/README_ZH.md new file mode 100644 index 00000000000..d5fcbb95b68 --- /dev/null +++ b/vendor/github.com/Unknwon/goconfig/README_ZH.md @@ -0,0 +1,64 @@ +goconfig [![Build Status](https://drone.io/github.com/Unknwon/goconfig/status.png)](https://drone.io/github.com/Unknwon/goconfig/latest) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/goconfig) +======== + +本库已被 [《Go名库讲解》](https://github.com/Unknwon/go-rock-libraries-showcases/tree/master/lectures/01-goconfig) 收录讲解,欢迎前往学习如何使用! + +编码规范:基于 [Go 编码规范](https://github.com/Unknwon/go-code-convention) + +## 关于 + +包 goconfig 是一个易于使用,支持注释的 Go 语言配置文件解析器,该文件的书写格式和 Windows 下的 INI 文件一样。 + +配置文件由形为 `[section]` 的节构成,内部使用 `name:value` 或 `name=value` 这样的键值对;每行开头和尾部的空白符号都将被忽略;如果未指定任何节,则会默认放入名为 `DEFAULT` 的节当中;可以使用 “;” 或 “#” 来作为注释的开头,并可以放置于任意的单独一行中。 + +## 特性 + +- 简化流程,易于理解,更少出错。 +- 提供与 Windows API 一模一样的操作方式。 +- 支持读取递归节。 +- 支持自增键名。 +- 支持对注释的 **读** 和 **写** 操作,其它所有解析器都不支持!!!! +- 可以直接返回 bool, float64, int, int64 和 string 类型的值,如果使用 “Must” 开头的方法,则一定会返回这个类型的一个值而不返回错误,如果错误发生则会返回零值。 +- 支持加载多个文件来重写值。 + +## 安装 + + go get github.com/Unknwon/goconfig + +或 + + gopm get github.com/Unknwon/goconfig + + +## API 文档 + +[Go Walker](http://gowalker.org/github.com/Unknwon/goconfig). + +## 示例 + +请查看 [conf.ini](testdata/conf.ini) 文件作为使用示例。 + +### 用例 + +- 函数 `LoadConfigFile` 加载一个或多个文件,然后返回一个类型为 `ConfigFile` 的变量。 +- `GetValue` 可以简单的获取某个值。 +- 像 `Bool`、`Int`、`Int64` 这样的方法会直接返回指定类型的值。 +- 以 `Must` 开头的方法不会返回错误,但当错误发生时会返回零值。 +- `SetValue` 可以设置某个值。 +- `DeleteKey` 可以删除某个键。 +- 最后,`SaveConfigFile` 可以保持您的配置到本地文件系统。 +- 使用方法 `Reload` 可以重载您的配置文件。 + +## 更多信息 + +- 所有字符都是大小写敏感的! + +## 参考信息 + +- [goconf](http://code.google.com/p/goconf/) +- [robfig/config](https://github.com/robfig/config) +- [Delete an item from a slice](https://groups.google.com/forum/?fromgroups=#!topic/golang-nuts/lYz8ftASMQ0) + +## 授权许可 + +本项目采用 Apache v2 开源授权许可证,完整的授权说明已放置在 [LICENSE](LICENSE) 文件中。 diff --git a/vendor/github.com/Unknwon/goconfig/conf.go b/vendor/github.com/Unknwon/goconfig/conf.go new file mode 100644 index 00000000000..657b465649c --- /dev/null +++ b/vendor/github.com/Unknwon/goconfig/conf.go @@ -0,0 +1,562 @@ +// Copyright 2013 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package goconfig is a fully functional and comments-support configuration file(.ini) parser. +package goconfig + +import ( + "fmt" + "regexp" + "runtime" + "strconv" + "strings" + "sync" +) + +const ( + // Default section name. + DEFAULT_SECTION = "DEFAULT" + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 200 +) + +type ParseError int + +const ( + ERR_SECTION_NOT_FOUND ParseError = iota + 1 + ERR_KEY_NOT_FOUND + ERR_BLANK_SECTION_NAME + ERR_COULD_NOT_PARSE +) + +var LineBreak = "\n" + +// Variable regexp pattern: %(variable)s +var varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +// A ConfigFile represents a INI formar configuration file. +type ConfigFile struct { + lock sync.RWMutex // Go map is not safe. + fileNames []string // Support mutil-files. + data map[string]map[string]string // Section -> key : value + + // Lists can keep sections and keys in order. + sectionList []string // Section name list. + keyList map[string][]string // Section -> Key name list + + sectionComments map[string]string // Sections comments. + keyComments map[string]map[string]string // Keys comments. + BlockMode bool // Indicates whether use lock or not. + prettyFormat bool // Write spaces around "=" to look better. +} + +// newConfigFile creates an empty configuration representation. +func newConfigFile(fileNames []string) *ConfigFile { + c := new(ConfigFile) + c.fileNames = fileNames + c.data = make(map[string]map[string]string) + c.keyList = make(map[string][]string) + c.sectionComments = make(map[string]string) + c.keyComments = make(map[string]map[string]string) + c.BlockMode = true + c.prettyFormat = true + return c +} + +// SetValue adds a new section-key-value to the configuration. +// It returns true if the key and value were inserted, +// or returns false if the value was overwritten. +// If the section does not exist in advance, it will be created. +func (c *ConfigFile) SetValue(section, key, value string) bool { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + if len(key) == 0 { + return false + } + + if c.BlockMode { + c.lock.Lock() + defer c.lock.Unlock() + } + + // Check if section exists. + if _, ok := c.data[section]; !ok { + // Execute add operation. + c.data[section] = make(map[string]string) + // Append section to list. + c.sectionList = append(c.sectionList, section) + } + + // Check if key exists. + _, ok := c.data[section][key] + c.data[section][key] = value + if !ok { + // If not exists, append to key list. + c.keyList[section] = append(c.keyList[section], key) + } + return !ok +} + +// DeleteKey deletes the key in given section. +// It returns true if the key was deleted, +// or returns false if the section or key didn't exist. +func (c *ConfigFile) DeleteKey(section, key string) bool { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + if c.BlockMode { + c.lock.Lock() + defer c.lock.Unlock() + } + + // Check if section exists. + if _, ok := c.data[section]; !ok { + return false + } + + // Check if key exists. + if _, ok := c.data[section][key]; ok { + delete(c.data[section], key) + // Remove comments of key. + c.SetKeyComments(section, key, "") + // Get index of key. + i := 0 + for _, keyName := range c.keyList[section] { + if keyName == key { + break + } + i++ + } + // Remove from key list. + c.keyList[section] = append(c.keyList[section][:i], c.keyList[section][i+1:]...) + return true + } + return false +} + +// GetValue returns the value of key available in the given section. +// If the value needs to be unfolded +// (see e.g. %(google)s example in the GoConfig_test.go), +// then String does this unfolding automatically, up to +// _DEPTH_VALUES number of iterations. +// It returns an error and empty string value if the section does not exist, +// or key does not exist in DEFAULT and current sections. +func (c *ConfigFile) GetValue(section, key string) (string, error) { + if c.BlockMode { + c.lock.RLock() + defer c.lock.RUnlock() + } + + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + // Check if section exists + if _, ok := c.data[section]; !ok { + // Section does not exist. + return "", GetError{ERR_SECTION_NOT_FOUND, section} + } + + // Section exists. + // Check if key exists or empty value. + value, ok := c.data[section][key] + if !ok { + // Check if it is a sub-section. + if i := strings.LastIndex(section, "."); i > -1 { + return c.GetValue(section[:i], key) + } + + // Return empty value. + return "", GetError{ERR_KEY_NOT_FOUND, key} + } + + // Key exists. + var i int + for i = 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(value) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search variable in default section. + nvalue, err := c.GetValue(DEFAULT_SECTION, noption) + if err != nil && section != DEFAULT_SECTION { + // Search in the same section. + if _, ok := c.data[section][noption]; ok { + nvalue = c.data[section][noption] + } + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + value = strings.Replace(value, vr, nvalue, -1) + } + return value, nil +} + +// Bool returns bool type value. +func (c *ConfigFile) Bool(section, key string) (bool, error) { + value, err := c.GetValue(section, key) + if err != nil { + return false, err + } + return strconv.ParseBool(value) +} + +// Float64 returns float64 type value. +func (c *ConfigFile) Float64(section, key string) (float64, error) { + value, err := c.GetValue(section, key) + if err != nil { + return 0.0, err + } + return strconv.ParseFloat(value, 64) +} + +// Int returns int type value. +func (c *ConfigFile) Int(section, key string) (int, error) { + value, err := c.GetValue(section, key) + if err != nil { + return 0, err + } + return strconv.Atoi(value) +} + +// Int64 returns int64 type value. +func (c *ConfigFile) Int64(section, key string) (int64, error) { + value, err := c.GetValue(section, key) + if err != nil { + return 0, err + } + return strconv.ParseInt(value, 10, 64) +} + +// MustValue always returns value without error. +// It returns empty string if error occurs, or the default value if given. +func (c *ConfigFile) MustValue(section, key string, defaultVal ...string) string { + val, err := c.GetValue(section, key) + if len(defaultVal) > 0 && (err != nil || len(val) == 0) { + return defaultVal[0] + } + return val +} + +// MustValueSet always returns value without error, +// It returns empty string if error occurs, or the default value if given, +// and a bool value indicates whether default value is returned. +func (c *ConfigFile) MustValueSet(section, key string, defaultVal ...string) (string, bool) { + val, err := c.GetValue(section, key) + if len(defaultVal) > 0 && (err != nil || len(val) == 0) { + c.SetValue(section, key, defaultVal[0]) + return defaultVal[0], true + } + return val, false +} + +// MustValueRange always returns value without error, +// it returns default value if error occurs or doesn't fit into range. +func (c *ConfigFile) MustValueRange(section, key, defaultVal string, candidates []string) string { + val, err := c.GetValue(section, key) + if err != nil || len(val) == 0 { + return defaultVal + } + + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// MustValueArray always returns value array without error, +// it returns empty array if error occurs, split by delimiter otherwise. +func (c *ConfigFile) MustValueArray(section, key, delim string) []string { + val, err := c.GetValue(section, key) + if err != nil || len(val) == 0 { + return []string{} + } + + vals := strings.Split(val, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (c *ConfigFile) MustBool(section, key string, defaultVal ...bool) bool { + val, err := c.Bool(section, key) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (c *ConfigFile) MustFloat64(section, key string, defaultVal ...float64) float64 { + value, err := c.Float64(section, key) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return value +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (c *ConfigFile) MustInt(section, key string, defaultVal ...int) int { + value, err := c.Int(section, key) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return value +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (c *ConfigFile) MustInt64(section, key string, defaultVal ...int64) int64 { + value, err := c.Int64(section, key) + if len(defaultVal) > 0 && err != nil { + return defaultVal[0] + } + return value +} + +// GetSectionList returns the list of all sections +// in the same order in the file. +func (c *ConfigFile) GetSectionList() []string { + list := make([]string, len(c.sectionList)) + copy(list, c.sectionList) + return list +} + +// GetKeyList returns the list of all keys in give section +// in the same order in the file. +// It returns nil if given section does not exist. +func (c *ConfigFile) GetKeyList(section string) []string { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + if c.BlockMode { + c.lock.RLock() + defer c.lock.RUnlock() + } + + // Check if section exists. + if _, ok := c.data[section]; !ok { + return nil + } + + // Non-default section has a blank key as section keeper. + list := make([]string, 0, len(c.keyList[section])) + for _, key := range c.keyList[section] { + if key != " " { + list = append(list, key) + } + } + return list +} + +// DeleteSection deletes the entire section by given name. +// It returns true if the section was deleted, and false if the section didn't exist. +func (c *ConfigFile) DeleteSection(section string) bool { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + if c.BlockMode { + c.lock.Lock() + defer c.lock.Unlock() + } + + // Check if section exists. + if _, ok := c.data[section]; !ok { + return false + } + + delete(c.data, section) + // Remove comments of section. + c.SetSectionComments(section, "") + // Get index of section. + i := 0 + for _, secName := range c.sectionList { + if secName == section { + break + } + i++ + } + // Remove from section and key list. + c.sectionList = append(c.sectionList[:i], c.sectionList[i+1:]...) + delete(c.keyList, section) + return true +} + +// GetSection returns key-value pairs in given section. +// If section does not exist, returns nil and error. +func (c *ConfigFile) GetSection(section string) (map[string]string, error) { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + if c.BlockMode { + c.lock.Lock() + defer c.lock.Unlock() + } + + // Check if section exists. + if _, ok := c.data[section]; !ok { + // Section does not exist. + return nil, GetError{ERR_SECTION_NOT_FOUND, section} + } + + // Remove pre-defined key. + secMap := deepCopy(c.data[section]) + delete(secMap, " ") + + // Section exists. + return secMap, nil +} + +// SetSectionComments adds new section comments to the configuration. +// If comments are empty(0 length), it will remove its section comments! +// It returns true if the comments were inserted or removed, +// or returns false if the comments were overwritten. +func (c *ConfigFile) SetSectionComments(section, comments string) bool { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + if len(comments) == 0 { + if _, ok := c.sectionComments[section]; ok { + delete(c.sectionComments, section) + } + + // Not exists can be seen as remove. + return true + } + + // Check if comments exists. + _, ok := c.sectionComments[section] + if comments[0] != '#' && comments[0] != ';' { + comments = "; " + comments + } + c.sectionComments[section] = comments + return !ok +} + +// SetKeyComments adds new section-key comments to the configuration. +// If comments are empty(0 length), it will remove its section-key comments! +// It returns true if the comments were inserted or removed, +// or returns false if the comments were overwritten. +// If the section does not exist in advance, it is created. +func (c *ConfigFile) SetKeyComments(section, key, comments string) bool { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + // Check if section exists. + if _, ok := c.keyComments[section]; ok { + if len(comments) == 0 { + if _, ok := c.keyComments[section][key]; ok { + delete(c.keyComments[section], key) + } + + // Not exists can be seen as remove. + return true + } + } else { + if len(comments) == 0 { + // Not exists can be seen as remove. + return true + } else { + // Execute add operation. + c.keyComments[section] = make(map[string]string) + } + } + + // Check if key exists. + _, ok := c.keyComments[section][key] + if comments[0] != '#' && comments[0] != ';' { + comments = "; " + comments + } + c.keyComments[section][key] = comments + return !ok +} + +// GetSectionComments returns the comments in the given section. +// It returns an empty string(0 length) if the comments do not exist. +func (c *ConfigFile) GetSectionComments(section string) (comments string) { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + return c.sectionComments[section] +} + +// GetKeyComments returns the comments of key in the given section. +// It returns an empty string(0 length) if the comments do not exist. +func (c *ConfigFile) GetKeyComments(section, key string) (comments string) { + // Blank section name represents DEFAULT section. + if len(section) == 0 { + section = DEFAULT_SECTION + } + + if _, ok := c.keyComments[section]; ok { + return c.keyComments[section][key] + } + return "" +} + +// SetPrettyFormat set the prettyFormat to decide whether write spaces around "=". +func (c *ConfigFile) SetPrettyFormat(pretty bool) { + c.prettyFormat = pretty +} + +// GetError occurs when get value in configuration file with invalid parameter. +type GetError struct { + Reason ParseError + Name string +} + +// Error implements Error interface. +func (err GetError) Error() string { + switch err.Reason { + case ERR_SECTION_NOT_FOUND: + return fmt.Sprintf("section '%s' not found", err.Name) + case ERR_KEY_NOT_FOUND: + return fmt.Sprintf("key '%s' not found", err.Name) + } + return "invalid get error" +} diff --git a/vendor/github.com/Unknwon/goconfig/read.go b/vendor/github.com/Unknwon/goconfig/read.go new file mode 100644 index 00000000000..17b4dbc37fe --- /dev/null +++ b/vendor/github.com/Unknwon/goconfig/read.go @@ -0,0 +1,294 @@ +// Copyright 2013 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package goconfig + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "strings" + "time" +) + +// Read reads an io.Reader and returns a configuration representation. +// This representation can be queried with GetValue. +func (c *ConfigFile) read(reader io.Reader) (err error) { + buf := bufio.NewReader(reader) + + // Handle BOM-UTF8. + // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding + mask, err := buf.Peek(3) + if err == nil && len(mask) >= 3 && + mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + buf.Read(mask) + } + + count := 1 // Counter for auto increment. + // Current section name. + section := DEFAULT_SECTION + var comments string + // Parse line-by-line + for { + line, err := buf.ReadString('\n') + line = strings.TrimSpace(line) + lineLengh := len(line) //[SWH|+] + if err != nil { + if err != io.EOF { + return err + } + + // Reached end of file, if nothing to read then break, + // otherwise handle the last line. + if lineLengh == 0 { + break + } + } + + // switch written for readability (not performance) + switch { + case lineLengh == 0: // Empty line + continue + case line[0] == '#' || line[0] == ';': // Comment + // Append comments + if len(comments) == 0 { + comments = line + } else { + comments += LineBreak + line + } + continue + case line[0] == '[' && line[lineLengh-1] == ']': // New section. + // Get section name. + section = strings.TrimSpace(line[1 : lineLengh-1]) + // Set section comments and empty if it has comments. + if len(comments) > 0 { + c.SetSectionComments(section, comments) + comments = "" + } + // Make section exist even though it does not have any key. + c.SetValue(section, " ", " ") + // Reset counter. + count = 1 + continue + case section == "": // No section defined so far + return ReadError{ERR_BLANK_SECTION_NAME, line} + default: // Other alternatives + var ( + i int + keyQuote string + key string + valQuote string + value string + ) + //[SWH|+]:支持引号包围起来的字串 + if line[0] == '"' { + if lineLengh >= 6 && line[0:3] == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + if keyQuote != "" { + qLen := len(keyQuote) + pos := strings.Index(line[qLen:], keyQuote) + if pos == -1 { + return ReadError{ERR_COULD_NOT_PARSE, line} + } + pos = pos + qLen + i = strings.IndexAny(line[pos:], "=:") + if i <= 0 { + return ReadError{ERR_COULD_NOT_PARSE, line} + } + i = i + pos + key = line[qLen:pos] //保留引号内的两端的空格 + } else { + i = strings.IndexAny(line, "=:") + if i <= 0 { + return ReadError{ERR_COULD_NOT_PARSE, line} + } + key = strings.TrimSpace(line[0:i]) + } + //[SWH|+]; + + // Check if it needs auto increment. + if key == "-" { + key = "#" + fmt.Sprint(count) + count++ + } + + //[SWH|+]:支持引号包围起来的字串 + lineRight := strings.TrimSpace(line[i+1:]) + lineRightLength := len(lineRight) + firstChar := "" + if lineRightLength >= 2 { + firstChar = lineRight[0:1] + } + if firstChar == "`" { + valQuote = "`" + } else if lineRightLength >= 6 && lineRight[0:3] == `"""` { + valQuote = `"""` + } + if valQuote != "" { + qLen := len(valQuote) + pos := strings.LastIndex(lineRight[qLen:], valQuote) + if pos == -1 { + return ReadError{ERR_COULD_NOT_PARSE, line} + } + pos = pos + qLen + value = lineRight[qLen:pos] + } else { + value = strings.TrimSpace(lineRight[0:]) + } + //[SWH|+]; + + c.SetValue(section, key, value) + // Set key comments and empty if it has comments. + if len(comments) > 0 { + c.SetKeyComments(section, key, comments) + comments = "" + } + } + + // Reached end of file. + if err == io.EOF { + break + } + } + return nil +} + +// LoadFromData accepts raw data directly from memory +// and returns a new configuration representation. +// Note that the configuration is written to the system +// temporary folder, so your file should not contain +// sensitive information. +func LoadFromData(data []byte) (c *ConfigFile, err error) { + // Save memory data to temporary file to support further operations. + tmpName := path.Join(os.TempDir(), "goconfig", fmt.Sprintf("%d", time.Now().Nanosecond())) + if err = os.MkdirAll(path.Dir(tmpName), os.ModePerm); err != nil { + return nil, err + } + if err = ioutil.WriteFile(tmpName, data, 0655); err != nil { + return nil, err + } + + c = newConfigFile([]string{tmpName}) + err = c.read(bytes.NewBuffer(data)) + return c, err +} + +// LoadFromReader accepts raw data directly from a reader +// and returns a new configuration representation. +// You must use ReloadData to reload. +// You cannot append files a configfile read this way. +func LoadFromReader(in io.Reader) (c *ConfigFile, err error) { + c = newConfigFile([]string{""}) + err = c.read(in) + return c, err +} + +func (c *ConfigFile) loadFile(fileName string) (err error) { + f, err := os.Open(fileName) + if err != nil { + return err + } + defer f.Close() + + return c.read(f) +} + +// LoadConfigFile reads a file and returns a new configuration representation. +// This representation can be queried with GetValue. +func LoadConfigFile(fileName string, moreFiles ...string) (c *ConfigFile, err error) { + // Append files' name together. + fileNames := make([]string, 1, len(moreFiles)+1) + fileNames[0] = fileName + if len(moreFiles) > 0 { + fileNames = append(fileNames, moreFiles...) + } + + c = newConfigFile(fileNames) + + for _, name := range fileNames { + if err = c.loadFile(name); err != nil { + return nil, err + } + } + + return c, nil +} + +// Reload reloads configuration file in case it has changes. +func (c *ConfigFile) Reload() (err error) { + var cfg *ConfigFile + if len(c.fileNames) == 1 { + if c.fileNames[0] == "" { + return fmt.Errorf("file opened from in-memory data, use ReloadData to reload") + } + cfg, err = LoadConfigFile(c.fileNames[0]) + } else { + cfg, err = LoadConfigFile(c.fileNames[0], c.fileNames[1:]...) + } + + if err == nil { + *c = *cfg + } + return err +} + +// ReloadData reloads configuration file from memory +func (c *ConfigFile) ReloadData(in io.Reader) (err error) { + var cfg *ConfigFile + if len(c.fileNames) != 1 { + return fmt.Errorf("Multiple files loaded, unable to mix in-memory and file data") + } + + cfg, err = LoadFromReader(in) + if err == nil { + *c = *cfg + } + return err +} + +// AppendFiles appends more files to ConfigFile and reload automatically. +func (c *ConfigFile) AppendFiles(files ...string) error { + if len(c.fileNames) == 1 && c.fileNames[0] == "" { + return fmt.Errorf("Cannot append file data to in-memory data") + } + c.fileNames = append(c.fileNames, files...) + return c.Reload() +} + +// ReadError occurs when read configuration file with wrong format. +type ReadError struct { + Reason ParseError + Content string // Line content +} + +// Error implement Error interface. +func (err ReadError) Error() string { + switch err.Reason { + case ERR_BLANK_SECTION_NAME: + return "empty section name not allowed" + case ERR_COULD_NOT_PARSE: + return fmt.Sprintf("could not parse line: %s", string(err.Content)) + } + return "invalid read error" +} diff --git a/vendor/github.com/Unknwon/goconfig/util.go b/vendor/github.com/Unknwon/goconfig/util.go new file mode 100644 index 00000000000..9b33fbb66c4 --- /dev/null +++ b/vendor/github.com/Unknwon/goconfig/util.go @@ -0,0 +1,25 @@ +// Copyright 2013 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package goconfig + +// deepCopy will copy a new map with different address +func deepCopy(d map[string]string) map[string]string { + rs := make(map[string]string) + for k, v := range d { + rs[k] = v + } + + return rs +} diff --git a/vendor/github.com/Unknwon/goconfig/write.go b/vendor/github.com/Unknwon/goconfig/write.go new file mode 100644 index 00000000000..63057c42691 --- /dev/null +++ b/vendor/github.com/Unknwon/goconfig/write.go @@ -0,0 +1,114 @@ +// Copyright 2013 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package goconfig + +import ( + "bytes" + "io" + "os" + "strings" +) + +// SaveConfigData writes configuration to a writer +func SaveConfigData(c *ConfigFile, out io.Writer) (err error) { + equalSign := "=" + if c.prettyFormat { + equalSign = " = " + } + + buf := bytes.NewBuffer(nil) + for _, section := range c.sectionList { + // Write section comments. + if len(c.GetSectionComments(section)) > 0 { + if _, err = buf.WriteString(c.GetSectionComments(section) + LineBreak); err != nil { + return err + } + } + + if section != DEFAULT_SECTION { + // Write section name. + if _, err = buf.WriteString("[" + section + "]" + LineBreak); err != nil { + return err + } + } + + for _, key := range c.keyList[section] { + if key != " " { + // Write key comments. + if len(c.GetKeyComments(section, key)) > 0 { + if _, err = buf.WriteString(c.GetKeyComments(section, key) + LineBreak); err != nil { + return err + } + } + + keyName := key + // Check if it's auto increment. + if keyName[0] == '#' { + keyName = "-" + } + //[SWH|+]:支持键名包含等号和冒号 + if strings.Contains(keyName, `=`) || strings.Contains(keyName, `:`) { + if strings.Contains(keyName, "`") { + if strings.Contains(keyName, `"`) { + keyName = `"""` + keyName + `"""` + } else { + keyName = `"` + keyName + `"` + } + } else { + keyName = "`" + keyName + "`" + } + } + value := c.data[section][key] + // In case key value contains "`" or "\"". + if strings.Contains(value, "`") { + if strings.Contains(value, `"`) { + value = `"""` + value + `"""` + } else { + value = `"` + value + `"` + } + } + + // Write key and value. + if _, err = buf.WriteString(keyName + equalSign + value + LineBreak); err != nil { + return err + } + } + } + + // Put a line between sections. + if _, err = buf.WriteString(LineBreak); err != nil { + return err + } + } + + if _, err := buf.WriteTo(out); err != nil { + return err + } + return nil +} + +// SaveConfigFile writes configuration file to local file system +func SaveConfigFile(c *ConfigFile, filename string) (err error) { + // Write configuration file by filename. + var f *os.File + if f, err = os.Create(filename); err != nil { + return err + } + + if err := SaveConfigData(c, f); err != nil { + return err + } + return f.Close() +} diff --git a/vendor/github.com/aalpar/deheap/LICENSE.txt b/vendor/github.com/aalpar/deheap/LICENSE.txt new file mode 100644 index 00000000000..d8ef07fed67 --- /dev/null +++ b/vendor/github.com/aalpar/deheap/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright 2019 Aaron H. Alpar + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files +(the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/aalpar/deheap/README.md b/vendor/github.com/aalpar/deheap/README.md new file mode 100644 index 00000000000..a3a24f0ca27 --- /dev/null +++ b/vendor/github.com/aalpar/deheap/README.md @@ -0,0 +1,14 @@ +# deheap + +Package deheap provides the implementation of a doubly ended heap. +Doubly ended heaps are heaps with two sides, a min side and a max side. +Like normal single-sided heaps, elements can be pushed onto and pulled +off of a deheap. deheaps have an additional `Pop` function, `PopMax`, that +returns elements from the opposite side of the ordering. + +This implementation has emphasized compatibility with existing libraries +in the sort and heap packages. + +Performace of the deheap functions should be very close to the +performance of the functions of the heap library + diff --git a/vendor/github.com/aalpar/deheap/VERSION b/vendor/github.com/aalpar/deheap/VERSION new file mode 100644 index 00000000000..6b3126cee74 --- /dev/null +++ b/vendor/github.com/aalpar/deheap/VERSION @@ -0,0 +1 @@ +v1.0 diff --git a/vendor/github.com/aalpar/deheap/deheap.go b/vendor/github.com/aalpar/deheap/deheap.go new file mode 100644 index 00000000000..6f53f6feaf9 --- /dev/null +++ b/vendor/github.com/aalpar/deheap/deheap.go @@ -0,0 +1,228 @@ +// +// Copyright 2019 Aaron H. Alpar +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files +// (the "Software"), to deal in the Software without restriction, +// including without limitation the rights to use, copy, modify, merge, +// publish, distribute, sublicense, and/or sell copies of the Software, +// and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included +// in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +// IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +// CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +// TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +// SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +// + +// +// Package deheap provides the implementation of a doubly ended heap. +// Doubly ended heaps are heaps with two sides, a min side and a max side. +// Like normal single-sided heaps, elements can be pushed onto and pulled +// off of a deheap. deheaps have an additional Pop function, PopMax, that +// returns elements from the opposite side of the ordering. +// +// This implementation has emphasized compatibility with existing libraries +// in the sort and heap packages. +// +// Performace of the deheap functions should be very close to the +// performance of the functions of the heap library +// +package deheap + +import ( + "container/heap" + "math/bits" +) + +func hparent(i int) int { + return (i - 1) / 2 +} + +func hlchild(i int) int { + return (i * 2) + 1 +} + +func parent(i int) int { + return ((i + 1) / 4) - 1 +} + +func lchild(i int) int { + return ((i + 1 ) * 4) - 1 +} + +func level(i int) int { + return bits.Len(uint(i)+1) - 1 +} + +func isMinHeap(i int) bool { + return level(i) % 2 == 0 +} + +func min4(h heap.Interface, l int, min bool, i int) int { + q := i + i++ + if i >= l { + return q + } + if min == h.Less(i, q) { + q = i + } + i++ + if i >= l { + return q + } + if min == h.Less(i, q) { + q = i + } + i++ + if i >= l { + return q + } + if min == h.Less(i, q) { + q = i + } + return q +} + +// min2 +func min2(h heap.Interface, l int, min bool, i int) int { + if i+1 >= l { + return i + } + if min != h.Less(i+1, i) { + return i + } + return i + 1 +} + +// min3 +func min3(h heap.Interface, l int, min bool, i, j, k int) int { + q := i + if j < l && h.Less(j, q) == min { + q = j + } + if k < l && h.Less(k, q) == min { + q = k + } + return q +} + +// bubbledown +func bubbledown(h heap.Interface, l int, min bool, i int) (q int, r int) { + q = i + r = i + for { + // find min of children + j := min2(h, l, min, hlchild(i)) + if j >= l { + break + } + // find min of grandchildren + k := min4(h, l, min, lchild(i)) + // swap of less than the element at i + v := min3(h, l, min, i, j, k) + if v == i || v >= l { + break + } + // v == k + q = v + h.Swap(v, i) + if v == j { + break + } + p := hparent(v) + if h.Less(p, v) == min { + h.Swap(p, v) + r = p + } + i = v + } + return q, r +} + +// bubbleup +func bubbleup(h heap.Interface, min bool, i int) (q bool) { + if i < 0 { + return false + } + j := parent(i) + for j >= 0 && min == h.Less(i, j) { + q = true + h.Swap(i, j) + i = j + j = parent(i) + } + min = !min + j = hparent(i) + for j >= 0 && min == h.Less(i, j) { + q = true + h.Swap(i, j) + i = j + j = parent(i) + } + return q +} + +// Pop the smallest value off the heap. See heap.Pop(). +// Time complexity is O(log n), where n = h.Len() +func Pop(h heap.Interface) interface{} { + l := h.Len()-1 + h.Swap(0, l) + q := h.Pop() + bubbledown(h, l, true, 0) + return q +} + +// Pop the largest value off the heap. See heap.Pop(). +// Time complexity is O(log n), where n = h.Len() +func PopMax(h heap.Interface) interface{} { + l := h.Len() + j := 0 + if l > 1 { + j = min2(h, l,false, 1) + } + l = l - 1 + h.Swap(j, l) + q := h.Pop() + bubbledown(h, l,false, j) + return q +} + +// Remove element at index i. See heap.Remove(). +// The complexity is O(log n) where n = h.Len(). +func Remove(h heap.Interface, i int) (q interface{}) { + l := h.Len() - 1 + h.Swap(i, l) + q = h.Pop() + if l != i { + q, r := bubbledown(h, l, isMinHeap(i), i) + bubbleup(h, isMinHeap(q), q) + bubbleup(h, isMinHeap(r), r) + } + return q +} + +// Push an element onto the heap. See heap.Push() +// Time complexity is O(log n), where n = h.Len() +func Push(h heap.Interface, o interface{}) { + h.Push(o) + l := h.Len() + i := l - 1 + bubbleup(h, isMinHeap(i), i) +} + +// Init initializes the heap. +// This should be called once on non-empty heaps before calling Pop(), PopMax() or Push(). See heap.Init() +func Init(h heap.Interface) { + l := h.Len() + for i := 0; i < l; i++ { + bubbleup(h, isMinHeap(i), i) + } +} diff --git a/vendor/github.com/aalpar/deheap/fuzz.go b/vendor/github.com/aalpar/deheap/fuzz.go new file mode 100644 index 00000000000..87de107f48f --- /dev/null +++ b/vendor/github.com/aalpar/deheap/fuzz.go @@ -0,0 +1,113 @@ +// Run fuzz tests on the package +// +// This is done with a byte heap to test and a simple reimplementation +// to check correctness against. +// +// First install go-fuzz +// +// go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build +// +// Next build the instrumented package +// +// go-fuzz-build +// +// Finally fuzz away +// +// go-fuzz +// +// See https://github.com/dvyukov/go-fuzz for more instructions + +//+build gofuzz + +package deheap + +import ( + "fmt" + "sort" +) + +// An byteHeap is a double ended heap of bytes +type byteDeheap []byte + +func (h byteDeheap) Len() int { return len(h) } +func (h byteDeheap) Less(i, j int) bool { return h[i] < h[j] } +func (h byteDeheap) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +func (h *byteDeheap) Push(x interface{}) { + *h = append(*h, x.(byte)) +} + +func (h *byteDeheap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[:n-1] + return x +} + +// sortedHeap is an inefficient reimplementation for test purposes +type sortedHeap []byte + +func (h *sortedHeap) Push(x byte) { + data := *h + i := sort.Search(len(data), func(i int) bool { return data[i] >= x }) + // i is the either the position of x or where it should be inserted + data = append(data, 0) + copy(data[i+1:], data[i:]) + data[i] = x + *h = data +} + +func (h *sortedHeap) Pop() (x byte) { + data := *h + x = data[0] + *h = data[1:] + return x +} + +func (h *sortedHeap) PopMax() (x byte) { + data := *h + x = data[len(data)-1] + *h = data[:len(data)-1] + return x +} + +// Fuzzer input is a string of bytes. +// +// If the byte is one of these, then the action is performed +// '<' Pop (minimum) +// '>' PopMax +// Otherwise the bytes is Pushed onto the heap +func Fuzz(data []byte) int { + h := &byteDeheap{} + Init(h) + s := sortedHeap{} + + for _, c := range data { + switch c { + case '<': + if h.Len() > 0 { + got := Pop(h) + want := s.Pop() + if got != want { + panic(fmt.Sprintf("Pop: want = %d, got = %d", want, got)) + } + } + case '>': + if h.Len() > 0 { + got := PopMax(h) + want := s.PopMax() + if got != want { + panic(fmt.Sprintf("PopMax: want = %d, got = %d", want, got)) + } + } + default: + Push(h, c) + s.Push(c) + } + if len(s) != h.Len() { + panic("wrong length") + } + } + return 1 +} diff --git a/vendor/github.com/abbot/go-http-auth/.gitignore b/vendor/github.com/abbot/go-http-auth/.gitignore new file mode 100644 index 00000000000..112ea3951cd --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/.gitignore @@ -0,0 +1,5 @@ +*~ +*.a +*.6 +*.out +_testmain.go diff --git a/vendor/github.com/abbot/go-http-auth/LICENSE b/vendor/github.com/abbot/go-http-auth/LICENSE new file mode 100644 index 00000000000..e454a52586f --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/LICENSE @@ -0,0 +1,178 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + diff --git a/vendor/github.com/abbot/go-http-auth/Makefile b/vendor/github.com/abbot/go-http-auth/Makefile new file mode 100644 index 00000000000..25f208da078 --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/Makefile @@ -0,0 +1,12 @@ +include $(GOROOT)/src/Make.inc + +TARG=auth_digest +GOFILES=\ + auth.go\ + digest.go\ + basic.go\ + misc.go\ + md5crypt.go\ + users.go\ + +include $(GOROOT)/src/Make.pkg diff --git a/vendor/github.com/abbot/go-http-auth/README.md b/vendor/github.com/abbot/go-http-auth/README.md new file mode 100644 index 00000000000..73ae9852252 --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/README.md @@ -0,0 +1,71 @@ +HTTP Authentication implementation in Go +======================================== + +This is an implementation of HTTP Basic and HTTP Digest authentication +in Go language. It is designed as a simple wrapper for +http.RequestHandler functions. + +Features +-------- + + * Supports HTTP Basic and HTTP Digest authentication. + * Supports htpasswd and htdigest formatted files. + * Automatic reloading of password files. + * Pluggable interface for user/password storage. + * Supports MD5, SHA1 and BCrypt for Basic authentication password storage. + * Configurable Digest nonce cache size with expiration. + * Wrapper for legacy http handlers (http.HandlerFunc interface) + +Example usage +------------- + +This is a complete working example for Basic auth: + + package main + + import ( + "fmt" + "net/http" + + auth "github.com/abbot/go-http-auth" + ) + + func Secret(user, realm string) string { + if user == "john" { + // password is "hello" + return "$1$dlPL2MqE$oQmn16q49SqdmhenQuNgs1" + } + return "" + } + + func handle(w http.ResponseWriter, r *auth.AuthenticatedRequest) { + fmt.Fprintf(w, "

Hello, %s!

", r.Username) + } + + func main() { + authenticator := auth.NewBasicAuthenticator("example.com", Secret) + http.HandleFunc("/", authenticator.Wrap(handle)) + http.ListenAndServe(":8080", nil) + } + +See more examples in the "examples" directory. + +Legal +----- + +This module is developed under Apache 2.0 license, and can be used for +open and proprietary projects. + +Copyright 2012-2013 Lev Shamardin + +Licensed under the Apache License, Version 2.0 (the "License"); you +may not use this file or any other part of this project except in +compliance with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +implied. See the License for the specific language governing +permissions and limitations under the License. diff --git a/vendor/github.com/abbot/go-http-auth/auth.go b/vendor/github.com/abbot/go-http-auth/auth.go new file mode 100644 index 00000000000..05ded165e67 --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/auth.go @@ -0,0 +1,109 @@ +// Package auth is an implementation of HTTP Basic and HTTP Digest authentication. +package auth + +import ( + "net/http" + + "golang.org/x/net/context" +) + +/* + Request handlers must take AuthenticatedRequest instead of http.Request +*/ +type AuthenticatedRequest struct { + http.Request + /* + Authenticated user name. Current API implies that Username is + never empty, which means that authentication is always done + before calling the request handler. + */ + Username string +} + +/* + AuthenticatedHandlerFunc is like http.HandlerFunc, but takes + AuthenticatedRequest instead of http.Request +*/ +type AuthenticatedHandlerFunc func(http.ResponseWriter, *AuthenticatedRequest) + +/* + Authenticator wraps an AuthenticatedHandlerFunc with + authentication-checking code. + + Typical Authenticator usage is something like: + + authenticator := SomeAuthenticator(...) + http.HandleFunc("/", authenticator(my_handler)) + + Authenticator wrapper checks the user authentication and calls the + wrapped function only after authentication has succeeded. Otherwise, + it returns a handler which initiates the authentication procedure. +*/ +type Authenticator func(AuthenticatedHandlerFunc) http.HandlerFunc + +// Info contains authentication information for the request. +type Info struct { + // Authenticated is set to true when request was authenticated + // successfully, i.e. username and password passed in request did + // pass the check. + Authenticated bool + + // Username contains a user name passed in the request when + // Authenticated is true. It's value is undefined if Authenticated + // is false. + Username string + + // ResponseHeaders contains extra headers that must be set by server + // when sending back HTTP response. + ResponseHeaders http.Header +} + +// UpdateHeaders updates headers with this Info's ResponseHeaders. It is +// safe to call this function on nil Info. +func (i *Info) UpdateHeaders(headers http.Header) { + if i == nil { + return + } + for k, values := range i.ResponseHeaders { + for _, v := range values { + headers.Add(k, v) + } + } +} + +type key int // used for context keys + +var infoKey key = 0 + +type AuthenticatorInterface interface { + // NewContext returns a new context carrying authentication + // information extracted from the request. + NewContext(ctx context.Context, r *http.Request) context.Context + + // Wrap returns an http.HandlerFunc which wraps + // AuthenticatedHandlerFunc with this authenticator's + // authentication checks. + Wrap(AuthenticatedHandlerFunc) http.HandlerFunc +} + +// FromContext returns authentication information from the context or +// nil if no such information present. +func FromContext(ctx context.Context) *Info { + info, ok := ctx.Value(infoKey).(*Info) + if !ok { + return nil + } + return info +} + +// AuthUsernameHeader is the header set by JustCheck functions. It +// contains an authenticated username (if authentication was +// successful). +const AuthUsernameHeader = "X-Authenticated-Username" + +func JustCheck(auth AuthenticatorInterface, wrapped http.HandlerFunc) http.HandlerFunc { + return auth.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) { + ar.Header.Set(AuthUsernameHeader, ar.Username) + wrapped(w, &ar.Request) + }) +} diff --git a/vendor/github.com/abbot/go-http-auth/basic.go b/vendor/github.com/abbot/go-http-auth/basic.go new file mode 100644 index 00000000000..b03dd582349 --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/basic.go @@ -0,0 +1,163 @@ +package auth + +import ( + "bytes" + "crypto/sha1" + "crypto/subtle" + "encoding/base64" + "errors" + "net/http" + "strings" + + "golang.org/x/crypto/bcrypt" + "golang.org/x/net/context" +) + +type compareFunc func(hashedPassword, password []byte) error + +var ( + errMismatchedHashAndPassword = errors.New("mismatched hash and password") + + compareFuncs = []struct { + prefix string + compare compareFunc + }{ + {"", compareMD5HashAndPassword}, // default compareFunc + {"{SHA}", compareShaHashAndPassword}, + // Bcrypt is complicated. According to crypt(3) from + // crypt_blowfish version 1.3 (fetched from + // http://www.openwall.com/crypt/crypt_blowfish-1.3.tar.gz), there + // are three different has prefixes: "$2a$", used by versions up + // to 1.0.4, and "$2x$" and "$2y$", used in all later + // versions. "$2a$" has a known bug, "$2x$" was added as a + // migration path for systems with "$2a$" prefix and still has a + // bug, and only "$2y$" should be used by modern systems. The bug + // has something to do with handling of 8-bit characters. Since + // both "$2a$" and "$2x$" are deprecated, we are handling them the + // same way as "$2y$", which will yield correct results for 7-bit + // character passwords, but is wrong for 8-bit character + // passwords. You have to upgrade to "$2y$" if you want sant 8-bit + // character password support with bcrypt. To add to the mess, + // OpenBSD 5.5. introduced "$2b$" prefix, which behaves exactly + // like "$2y$" according to the same source. + {"$2a$", bcrypt.CompareHashAndPassword}, + {"$2b$", bcrypt.CompareHashAndPassword}, + {"$2x$", bcrypt.CompareHashAndPassword}, + {"$2y$", bcrypt.CompareHashAndPassword}, + } +) + +type BasicAuth struct { + Realm string + Secrets SecretProvider + // Headers used by authenticator. Set to ProxyHeaders to use with + // proxy server. When nil, NormalHeaders are used. + Headers *Headers +} + +// check that BasicAuth implements AuthenticatorInterface +var _ = (AuthenticatorInterface)((*BasicAuth)(nil)) + +/* + Checks the username/password combination from the request. Returns + either an empty string (authentication failed) or the name of the + authenticated user. + + Supports MD5 and SHA1 password entries +*/ +func (a *BasicAuth) CheckAuth(r *http.Request) string { + s := strings.SplitN(r.Header.Get(a.Headers.V().Authorization), " ", 2) + if len(s) != 2 || s[0] != "Basic" { + return "" + } + + b, err := base64.StdEncoding.DecodeString(s[1]) + if err != nil { + return "" + } + pair := strings.SplitN(string(b), ":", 2) + if len(pair) != 2 { + return "" + } + user, password := pair[0], pair[1] + secret := a.Secrets(user, a.Realm) + if secret == "" { + return "" + } + compare := compareFuncs[0].compare + for _, cmp := range compareFuncs[1:] { + if strings.HasPrefix(secret, cmp.prefix) { + compare = cmp.compare + break + } + } + if compare([]byte(secret), []byte(password)) != nil { + return "" + } + return pair[0] +} + +func compareShaHashAndPassword(hashedPassword, password []byte) error { + d := sha1.New() + d.Write(password) + if subtle.ConstantTimeCompare(hashedPassword[5:], []byte(base64.StdEncoding.EncodeToString(d.Sum(nil)))) != 1 { + return errMismatchedHashAndPassword + } + return nil +} + +func compareMD5HashAndPassword(hashedPassword, password []byte) error { + parts := bytes.SplitN(hashedPassword, []byte("$"), 4) + if len(parts) != 4 { + return errMismatchedHashAndPassword + } + magic := []byte("$" + string(parts[1]) + "$") + salt := parts[2] + if subtle.ConstantTimeCompare(hashedPassword, MD5Crypt(password, salt, magic)) != 1 { + return errMismatchedHashAndPassword + } + return nil +} + +/* + http.Handler for BasicAuth which initiates the authentication process + (or requires reauthentication). +*/ +func (a *BasicAuth) RequireAuth(w http.ResponseWriter, r *http.Request) { + w.Header().Set(contentType, a.Headers.V().UnauthContentType) + w.Header().Set(a.Headers.V().Authenticate, `Basic realm="`+a.Realm+`"`) + w.WriteHeader(a.Headers.V().UnauthCode) + w.Write([]byte(a.Headers.V().UnauthResponse)) +} + +/* + BasicAuthenticator returns a function, which wraps an + AuthenticatedHandlerFunc converting it to http.HandlerFunc. This + wrapper function checks the authentication and either sends back + required authentication headers, or calls the wrapped function with + authenticated username in the AuthenticatedRequest. +*/ +func (a *BasicAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if username := a.CheckAuth(r); username == "" { + a.RequireAuth(w, r) + } else { + ar := &AuthenticatedRequest{Request: *r, Username: username} + wrapped(w, ar) + } + } +} + +// NewContext returns a context carrying authentication information for the request. +func (a *BasicAuth) NewContext(ctx context.Context, r *http.Request) context.Context { + info := &Info{Username: a.CheckAuth(r), ResponseHeaders: make(http.Header)} + info.Authenticated = (info.Username != "") + if !info.Authenticated { + info.ResponseHeaders.Set(a.Headers.V().Authenticate, `Basic realm="`+a.Realm+`"`) + } + return context.WithValue(ctx, infoKey, info) +} + +func NewBasicAuthenticator(realm string, secrets SecretProvider) *BasicAuth { + return &BasicAuth{Realm: realm, Secrets: secrets} +} diff --git a/vendor/github.com/abbot/go-http-auth/digest.go b/vendor/github.com/abbot/go-http-auth/digest.go new file mode 100644 index 00000000000..21b09334cf4 --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/digest.go @@ -0,0 +1,274 @@ +package auth + +import ( + "crypto/subtle" + "fmt" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" +) + +type digest_client struct { + nc uint64 + last_seen int64 +} + +type DigestAuth struct { + Realm string + Opaque string + Secrets SecretProvider + PlainTextSecrets bool + IgnoreNonceCount bool + // Headers used by authenticator. Set to ProxyHeaders to use with + // proxy server. When nil, NormalHeaders are used. + Headers *Headers + + /* + Approximate size of Client's Cache. When actual number of + tracked client nonces exceeds + ClientCacheSize+ClientCacheTolerance, ClientCacheTolerance*2 + older entries are purged. + */ + ClientCacheSize int + ClientCacheTolerance int + + clients map[string]*digest_client + mutex sync.Mutex +} + +// check that DigestAuth implements AuthenticatorInterface +var _ = (AuthenticatorInterface)((*DigestAuth)(nil)) + +type digest_cache_entry struct { + nonce string + last_seen int64 +} + +type digest_cache []digest_cache_entry + +func (c digest_cache) Less(i, j int) bool { + return c[i].last_seen < c[j].last_seen +} + +func (c digest_cache) Len() int { + return len(c) +} + +func (c digest_cache) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +/* + Remove count oldest entries from DigestAuth.clients +*/ +func (a *DigestAuth) Purge(count int) { + entries := make([]digest_cache_entry, 0, len(a.clients)) + for nonce, client := range a.clients { + entries = append(entries, digest_cache_entry{nonce, client.last_seen}) + } + cache := digest_cache(entries) + sort.Sort(cache) + for _, client := range cache[:count] { + delete(a.clients, client.nonce) + } +} + +/* + http.Handler for DigestAuth which initiates the authentication process + (or requires reauthentication). +*/ +func (a *DigestAuth) RequireAuth(w http.ResponseWriter, r *http.Request) { + if len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance { + a.Purge(a.ClientCacheTolerance * 2) + } + nonce := RandomKey() + a.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()} + w.Header().Set(contentType, a.Headers.V().UnauthContentType) + w.Header().Set(a.Headers.V().Authenticate, + fmt.Sprintf(`Digest realm="%s", nonce="%s", opaque="%s", algorithm="MD5", qop="auth"`, + a.Realm, nonce, a.Opaque)) + w.WriteHeader(a.Headers.V().UnauthCode) + w.Write([]byte(a.Headers.V().UnauthResponse)) +} + +/* + Parse Authorization header from the http.Request. Returns a map of + auth parameters or nil if the header is not a valid parsable Digest + auth header. +*/ +func DigestAuthParams(authorization string) map[string]string { + s := strings.SplitN(authorization, " ", 2) + if len(s) != 2 || s[0] != "Digest" { + return nil + } + + return ParsePairs(s[1]) +} + +/* + Check if request contains valid authentication data. Returns a pair + of username, authinfo where username is the name of the authenticated + user or an empty string and authinfo is the contents for the optional + Authentication-Info response header. +*/ +func (da *DigestAuth) CheckAuth(r *http.Request) (username string, authinfo *string) { + da.mutex.Lock() + defer da.mutex.Unlock() + username = "" + authinfo = nil + auth := DigestAuthParams(r.Header.Get(da.Headers.V().Authorization)) + if auth == nil { + return "", nil + } + // RFC2617 Section 3.2.1 specifies that unset value of algorithm in + // WWW-Authenticate Response header should be treated as + // "MD5". According to section 3.2.2 the "algorithm" value in + // subsequent Request Authorization header must be set to whatever + // was supplied in the WWW-Authenticate Response header. This + // implementation always returns an algorithm in WWW-Authenticate + // header, however there seems to be broken clients in the wild + // which do not set the algorithm. Assume the unset algorithm in + // Authorization header to be equal to MD5. + if _, ok := auth["algorithm"]; !ok { + auth["algorithm"] = "MD5" + } + if da.Opaque != auth["opaque"] || auth["algorithm"] != "MD5" || auth["qop"] != "auth" { + return "", nil + } + + // Check if the requested URI matches auth header + if r.RequestURI != auth["uri"] { + // We allow auth["uri"] to be a full path prefix of request-uri + // for some reason lost in history, which is probably wrong, but + // used to be like that for quite some time + // (https://tools.ietf.org/html/rfc2617#section-3.2.2 explicitly + // says that auth["uri"] is the request-uri). + // + // TODO: make an option to allow only strict checking. + switch u, err := url.Parse(auth["uri"]); { + case err != nil: + return "", nil + case r.URL == nil: + return "", nil + case len(u.Path) > len(r.URL.Path): + return "", nil + case !strings.HasPrefix(r.URL.Path, u.Path): + return "", nil + } + } + + HA1 := da.Secrets(auth["username"], da.Realm) + if da.PlainTextSecrets { + HA1 = H(auth["username"] + ":" + da.Realm + ":" + HA1) + } + HA2 := H(r.Method + ":" + auth["uri"]) + KD := H(strings.Join([]string{HA1, auth["nonce"], auth["nc"], auth["cnonce"], auth["qop"], HA2}, ":")) + + if subtle.ConstantTimeCompare([]byte(KD), []byte(auth["response"])) != 1 { + return "", nil + } + + // At this point crypto checks are completed and validated. + // Now check if the session is valid. + + nc, err := strconv.ParseUint(auth["nc"], 16, 64) + if err != nil { + return "", nil + } + + if client, ok := da.clients[auth["nonce"]]; !ok { + return "", nil + } else { + if client.nc != 0 && client.nc >= nc && !da.IgnoreNonceCount { + return "", nil + } + client.nc = nc + client.last_seen = time.Now().UnixNano() + } + + resp_HA2 := H(":" + auth["uri"]) + rspauth := H(strings.Join([]string{HA1, auth["nonce"], auth["nc"], auth["cnonce"], auth["qop"], resp_HA2}, ":")) + + info := fmt.Sprintf(`qop="auth", rspauth="%s", cnonce="%s", nc="%s"`, rspauth, auth["cnonce"], auth["nc"]) + return auth["username"], &info +} + +/* + Default values for ClientCacheSize and ClientCacheTolerance for DigestAuth +*/ +const DefaultClientCacheSize = 1000 +const DefaultClientCacheTolerance = 100 + +/* + Wrap returns an Authenticator which uses HTTP Digest + authentication. Arguments: + + realm: The authentication realm. + + secrets: SecretProvider which must return HA1 digests for the same + realm as above. +*/ +func (a *DigestAuth) Wrap(wrapped AuthenticatedHandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if username, authinfo := a.CheckAuth(r); username == "" { + a.RequireAuth(w, r) + } else { + ar := &AuthenticatedRequest{Request: *r, Username: username} + if authinfo != nil { + w.Header().Set(a.Headers.V().AuthInfo, *authinfo) + } + wrapped(w, ar) + } + } +} + +/* + JustCheck returns function which converts an http.HandlerFunc into a + http.HandlerFunc which requires authentication. Username is passed as + an extra X-Authenticated-Username header. +*/ +func (a *DigestAuth) JustCheck(wrapped http.HandlerFunc) http.HandlerFunc { + return a.Wrap(func(w http.ResponseWriter, ar *AuthenticatedRequest) { + ar.Header.Set(AuthUsernameHeader, ar.Username) + wrapped(w, &ar.Request) + }) +} + +// NewContext returns a context carrying authentication information for the request. +func (a *DigestAuth) NewContext(ctx context.Context, r *http.Request) context.Context { + username, authinfo := a.CheckAuth(r) + info := &Info{Username: username, ResponseHeaders: make(http.Header)} + if username != "" { + info.Authenticated = true + info.ResponseHeaders.Set(a.Headers.V().AuthInfo, *authinfo) + } else { + // return back digest WWW-Authenticate header + if len(a.clients) > a.ClientCacheSize+a.ClientCacheTolerance { + a.Purge(a.ClientCacheTolerance * 2) + } + nonce := RandomKey() + a.clients[nonce] = &digest_client{nc: 0, last_seen: time.Now().UnixNano()} + info.ResponseHeaders.Set(a.Headers.V().Authenticate, + fmt.Sprintf(`Digest realm="%s", nonce="%s", opaque="%s", algorithm="MD5", qop="auth"`, + a.Realm, nonce, a.Opaque)) + } + return context.WithValue(ctx, infoKey, info) +} + +func NewDigestAuthenticator(realm string, secrets SecretProvider) *DigestAuth { + da := &DigestAuth{ + Opaque: RandomKey(), + Realm: realm, + Secrets: secrets, + PlainTextSecrets: false, + ClientCacheSize: DefaultClientCacheSize, + ClientCacheTolerance: DefaultClientCacheTolerance, + clients: map[string]*digest_client{}} + return da +} diff --git a/vendor/github.com/abbot/go-http-auth/md5crypt.go b/vendor/github.com/abbot/go-http-auth/md5crypt.go new file mode 100644 index 00000000000..a7a031c414c --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/md5crypt.go @@ -0,0 +1,92 @@ +package auth + +import "crypto/md5" +import "strings" + +const itoa64 = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + +var md5_crypt_swaps = [16]int{12, 6, 0, 13, 7, 1, 14, 8, 2, 15, 9, 3, 5, 10, 4, 11} + +type MD5Entry struct { + Magic, Salt, Hash []byte +} + +func NewMD5Entry(e string) *MD5Entry { + parts := strings.SplitN(e, "$", 4) + if len(parts) != 4 { + return nil + } + return &MD5Entry{ + Magic: []byte("$" + parts[1] + "$"), + Salt: []byte(parts[2]), + Hash: []byte(parts[3]), + } +} + +/* + MD5 password crypt implementation +*/ +func MD5Crypt(password, salt, magic []byte) []byte { + d := md5.New() + + d.Write(password) + d.Write(magic) + d.Write(salt) + + d2 := md5.New() + d2.Write(password) + d2.Write(salt) + d2.Write(password) + + for i, mixin := 0, d2.Sum(nil); i < len(password); i++ { + d.Write([]byte{mixin[i%16]}) + } + + for i := len(password); i != 0; i >>= 1 { + if i&1 == 0 { + d.Write([]byte{password[0]}) + } else { + d.Write([]byte{0}) + } + } + + final := d.Sum(nil) + + for i := 0; i < 1000; i++ { + d2 := md5.New() + if i&1 == 0 { + d2.Write(final) + } else { + d2.Write(password) + } + + if i%3 != 0 { + d2.Write(salt) + } + + if i%7 != 0 { + d2.Write(password) + } + + if i&1 == 0 { + d2.Write(password) + } else { + d2.Write(final) + } + final = d2.Sum(nil) + } + + result := make([]byte, 0, 22) + v := uint(0) + bits := uint(0) + for _, i := range md5_crypt_swaps { + v |= (uint(final[i]) << bits) + for bits = bits + 8; bits > 6; bits -= 6 { + result = append(result, itoa64[v&0x3f]) + v >>= 6 + } + } + result = append(result, itoa64[v&0x3f]) + + return append(append(append(magic, salt...), '$'), result...) +} diff --git a/vendor/github.com/abbot/go-http-auth/misc.go b/vendor/github.com/abbot/go-http-auth/misc.go new file mode 100644 index 00000000000..4536ce67356 --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/misc.go @@ -0,0 +1,141 @@ +package auth + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "encoding/base64" + "fmt" + "net/http" + "strings" +) + +// RandomKey returns a random 16-byte base64 alphabet string +func RandomKey() string { + k := make([]byte, 12) + for bytes := 0; bytes < len(k); { + n, err := rand.Read(k[bytes:]) + if err != nil { + panic("rand.Read() failed") + } + bytes += n + } + return base64.StdEncoding.EncodeToString(k) +} + +// H function for MD5 algorithm (returns a lower-case hex MD5 digest) +func H(data string) string { + digest := md5.New() + digest.Write([]byte(data)) + return fmt.Sprintf("%x", digest.Sum(nil)) +} + +// ParseList parses a comma-separated list of values as described by +// RFC 2068 and returns list elements. +// +// Lifted from https://code.google.com/p/gorilla/source/browse/http/parser/parser.go +// which was ported from urllib2.parse_http_list, from the Python +// standard library. +func ParseList(value string) []string { + var list []string + var escape, quote bool + b := new(bytes.Buffer) + for _, r := range value { + switch { + case escape: + b.WriteRune(r) + escape = false + case quote: + if r == '\\' { + escape = true + } else { + if r == '"' { + quote = false + } + b.WriteRune(r) + } + case r == ',': + list = append(list, strings.TrimSpace(b.String())) + b.Reset() + case r == '"': + quote = true + b.WriteRune(r) + default: + b.WriteRune(r) + } + } + // Append last part. + if s := b.String(); s != "" { + list = append(list, strings.TrimSpace(s)) + } + return list +} + +// ParsePairs extracts key/value pairs from a comma-separated list of +// values as described by RFC 2068 and returns a map[key]value. The +// resulting values are unquoted. If a list element doesn't contain a +// "=", the key is the element itself and the value is an empty +// string. +// +// Lifted from https://code.google.com/p/gorilla/source/browse/http/parser/parser.go +func ParsePairs(value string) map[string]string { + m := make(map[string]string) + for _, pair := range ParseList(strings.TrimSpace(value)) { + if i := strings.Index(pair, "="); i < 0 { + m[pair] = "" + } else { + v := pair[i+1:] + if v[0] == '"' && v[len(v)-1] == '"' { + // Unquote it. + v = v[1 : len(v)-1] + } + m[pair[:i]] = v + } + } + return m +} + +// Headers contains header and error codes used by authenticator. +type Headers struct { + Authenticate string // WWW-Authenticate + Authorization string // Authorization + AuthInfo string // Authentication-Info + UnauthCode int // 401 + UnauthContentType string // text/plain + UnauthResponse string // Unauthorized. +} + +// V returns NormalHeaders when h is nil, or h otherwise. Allows to +// use uninitialized *Headers values in structs. +func (h *Headers) V() *Headers { + if h == nil { + return NormalHeaders + } + return h +} + +var ( + // NormalHeaders are the regular Headers used by an HTTP Server for + // request authentication. + NormalHeaders = &Headers{ + Authenticate: "WWW-Authenticate", + Authorization: "Authorization", + AuthInfo: "Authentication-Info", + UnauthCode: http.StatusUnauthorized, + UnauthContentType: "text/plain", + UnauthResponse: fmt.Sprintf("%d %s\n", http.StatusUnauthorized, http.StatusText(http.StatusUnauthorized)), + } + + // ProxyHeaders are Headers used by an HTTP Proxy server for proxy + // access authentication. + ProxyHeaders = &Headers{ + Authenticate: "Proxy-Authenticate", + Authorization: "Proxy-Authorization", + AuthInfo: "Proxy-Authentication-Info", + UnauthCode: http.StatusProxyAuthRequired, + UnauthContentType: "text/plain", + UnauthResponse: fmt.Sprintf("%d %s\n", http.StatusProxyAuthRequired, http.StatusText(http.StatusProxyAuthRequired)), + } +) + +const contentType = "Content-Type" diff --git a/vendor/github.com/abbot/go-http-auth/test.htdigest b/vendor/github.com/abbot/go-http-auth/test.htdigest new file mode 100644 index 00000000000..6c8c75b4e0d --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/test.htdigest @@ -0,0 +1 @@ +test:example.com:aa78524fceb0e50fd8ca96dd818b8cf9 diff --git a/vendor/github.com/abbot/go-http-auth/test.htpasswd b/vendor/github.com/abbot/go-http-auth/test.htpasswd new file mode 100644 index 00000000000..4844a3ccb5e --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/test.htpasswd @@ -0,0 +1,4 @@ +test:{SHA}qvTGHdzF6KLavt4PO0gs2a6pQ00= +test2:$apr1$a0j62R97$mYqFkloXH0/UOaUnAiV2b0 +test16:$apr1$JI4wh3am$AmhephVqLTUyAVpFQeHZC0 +test3:$2y$05$ih3C91zUBSTFcAh2mQnZYuob0UOZVEf16wl/ukgjDhjvj.xgM1WwS diff --git a/vendor/github.com/abbot/go-http-auth/users.go b/vendor/github.com/abbot/go-http-auth/users.go new file mode 100644 index 00000000000..377181243c8 --- /dev/null +++ b/vendor/github.com/abbot/go-http-auth/users.go @@ -0,0 +1,154 @@ +package auth + +import ( + "encoding/csv" + "os" + "sync" +) + +/* + SecretProvider is used by authenticators. Takes user name and realm + as an argument, returns secret required for authentication (HA1 for + digest authentication, properly encrypted password for basic). + + Returning an empty string means failing the authentication. +*/ +type SecretProvider func(user, realm string) string + +/* + Common functions for file auto-reloading +*/ +type File struct { + Path string + Info os.FileInfo + /* must be set in inherited types during initialization */ + Reload func() + mu sync.Mutex +} + +func (f *File) ReloadIfNeeded() { + info, err := os.Stat(f.Path) + if err != nil { + panic(err) + } + f.mu.Lock() + defer f.mu.Unlock() + if f.Info == nil || f.Info.ModTime() != info.ModTime() { + f.Info = info + f.Reload() + } +} + +/* + Structure used for htdigest file authentication. Users map realms to + maps of users to their HA1 digests. +*/ +type HtdigestFile struct { + File + Users map[string]map[string]string + mu sync.RWMutex +} + +func reload_htdigest(hf *HtdigestFile) { + r, err := os.Open(hf.Path) + if err != nil { + panic(err) + } + csv_reader := csv.NewReader(r) + csv_reader.Comma = ':' + csv_reader.Comment = '#' + csv_reader.TrimLeadingSpace = true + + records, err := csv_reader.ReadAll() + if err != nil { + panic(err) + } + + hf.mu.Lock() + defer hf.mu.Unlock() + hf.Users = make(map[string]map[string]string) + for _, record := range records { + _, exists := hf.Users[record[1]] + if !exists { + hf.Users[record[1]] = make(map[string]string) + } + hf.Users[record[1]][record[0]] = record[2] + } +} + +/* + SecretProvider implementation based on htdigest-formated files. Will + reload htdigest file on changes. Will panic on syntax errors in + htdigest files. +*/ +func HtdigestFileProvider(filename string) SecretProvider { + hf := &HtdigestFile{File: File{Path: filename}} + hf.Reload = func() { reload_htdigest(hf) } + return func(user, realm string) string { + hf.ReloadIfNeeded() + hf.mu.RLock() + defer hf.mu.RUnlock() + _, exists := hf.Users[realm] + if !exists { + return "" + } + digest, exists := hf.Users[realm][user] + if !exists { + return "" + } + return digest + } +} + +/* + Structure used for htdigest file authentication. Users map users to + their salted encrypted password +*/ +type HtpasswdFile struct { + File + Users map[string]string + mu sync.RWMutex +} + +func reload_htpasswd(h *HtpasswdFile) { + r, err := os.Open(h.Path) + if err != nil { + panic(err) + } + csv_reader := csv.NewReader(r) + csv_reader.Comma = ':' + csv_reader.Comment = '#' + csv_reader.TrimLeadingSpace = true + + records, err := csv_reader.ReadAll() + if err != nil { + panic(err) + } + + h.mu.Lock() + defer h.mu.Unlock() + h.Users = make(map[string]string) + for _, record := range records { + h.Users[record[0]] = record[1] + } +} + +/* + SecretProvider implementation based on htpasswd-formated files. Will + reload htpasswd file on changes. Will panic on syntax errors in + htpasswd files. Realm argument of the SecretProvider is ignored. +*/ +func HtpasswdFileProvider(filename string) SecretProvider { + h := &HtpasswdFile{File: File{Path: filename}} + h.Reload = func() { reload_htpasswd(h) } + return func(user, realm string) string { + h.ReloadIfNeeded() + h.mu.RLock() + password, exists := h.Users[user] + h.mu.RUnlock() + if !exists { + return "" + } + return password + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 12a33149970..639ba763097 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.25.2" +const goModuleVersion = "1.26.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go new file mode 100644 index 00000000000..8c78364105b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go @@ -0,0 +1,20 @@ +package ratelimit + +import "context" + +// None implements a no-op rate limiter which effectively disables client-side +// rate limiting (also known as "retry quotas"). +// +// GetToken does nothing and always returns a nil error. The returned +// token-release function does nothing, and always returns a nil error. +// +// AddTokens does nothing and always returns a nil error. +var None = &none{} + +type none struct{} + +func (*none) GetToken(ctx context.Context, cost uint) (func() error, error) { + return func() error { return nil }, nil +} + +func (*none) AddTokens(v uint) error { return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go index 25abffc8128..d5ea93222ed 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go @@ -123,6 +123,17 @@ type StandardOptions struct { // Provides the rate limiting strategy for rate limiting attempt retries // across all attempts the retryer is being used with. + // + // A RateLimiter operates as a token bucket with a set capacity, where + // attempt failures events consume tokens. A retry attempt that attempts to + // consume more tokens than what's available results in operation failure. + // The default implementation is parameterized as follows: + // - a capacity of 500 (DefaultRetryRateTokens) + // - a retry caused by a timeout costs 10 tokens (DefaultRetryCost) + // - a retry caused by other errors costs 5 tokens (DefaultRetryTimeoutCost) + // - an operation that succeeds on the 1st attempt adds 1 token (DefaultNoRetryIncrement) + // + // You can disable rate limiting by setting this field to ratelimit.None. RateLimiter RateLimiter // The cost to deduct from the RateLimiter's token bucket per retry. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index d15e8210a9c..e5977ad8b51 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.27.10 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.9 (2024-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.8 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.27.7 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.27.6 (2024-03-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index df00d36af02..10ba4c03aeb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.27.6" +const goModuleVersion = "1.27.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 983d17a4a6c..151fc48769b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.17.10 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.9 (2024-03-21) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.8 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.7 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.6 (2024-03-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 5ce0a471016..40b4fdefbac 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.6" +const goModuleVersion = "1.17.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/CHANGELOG.md index 2aa53c74808..ee2dffed1d6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/CHANGELOG.md @@ -1,3 +1,24 @@ +# v1.13.13 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2024-03-25) + +* **Bug Fix**: Removes some duplicated reflection-based calls in the marshaler. + +# v1.13.11 (2024-03-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.10 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.9 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.8 (2024-03-06) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/encode.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/encode.go index f8b2246c894..f62000a68f0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/encode.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/encode.go @@ -714,11 +714,6 @@ func (e *Encoder) encodeScalar(v reflect.Value, fieldTag tag) (types.AttributeVa } func (e *Encoder) encodeNumber(v reflect.Value) (types.AttributeValue, error) { - if av, err := tryMarshaler(v); err != nil { - return nil, err - } else if av != nil { - return av, nil - } var out string switch v.Kind() { @@ -742,11 +737,6 @@ func (e *Encoder) encodeNumber(v reflect.Value) (types.AttributeValue, error) { } func (e *Encoder) encodeString(v reflect.Value) (types.AttributeValue, error) { - if av, err := tryMarshaler(v); err != nil { - return nil, err - } else if av != nil { - return av, nil - } switch v.Kind() { case reflect.String: diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/go_module_metadata.go index 842156f9190..d066e1c2dc6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue/go_module_metadata.go @@ -3,4 +3,4 @@ package attributevalue // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.8" +const goModuleVersion = "1.13.13" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 2ba9a260bae..3807833dd43 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.16.1 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2024-03-21) + +* **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls. + +# v1.15.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.3 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.15.2 (2024-02-23) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go index 46e144d9363..3f4a10e2c16 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -185,6 +185,10 @@ type Options struct { // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html EnableFallback aws.Ternary + // By default, all IMDS client operations enforce a 5-second timeout. You + // can disable that behavior with this setting. + DisableDefaultTimeout bool + // provides the caching of API tokens used for operation calls. If unset, // the API token will not be retrieved for the operation. tokenProvider *tokenProvider diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go index bacdb5d21f2..d5765c36b17 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go @@ -3,8 +3,9 @@ // // All Client operation calls have a default timeout. If the operation is not // completed before this timeout expires, the operation will be canceled. This -// timeout can be overridden by providing Context with a timeout or deadline -// with calling the client's operations. +// timeout can be overridden through the following: +// - Set the options flag DisableDefaultTimeout +// - Provide a Context with a timeout or deadline with calling the client's operations. // // See the EC2 IMDS user guide for more information on using the API. // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 8010ded7b62..5642306f87b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.15.2" +const goModuleVersion = "1.16.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go index fc948c27d89..90cf4aeb3df 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go @@ -56,6 +56,7 @@ func addRequestMiddleware(stack *middleware.Stack, // Operation timeout err = stack.Initialize.Add(&operationTimeout{ + Disabled: options.DisableDefaultTimeout, DefaultTimeout: defaultOperationTimeout, }, middleware.Before) if err != nil { @@ -260,6 +261,7 @@ const ( // Otherwise the timeout cleanup will race the resource being consumed // upstream. type operationTimeout struct { + Disabled bool DefaultTimeout time.Duration } @@ -270,6 +272,10 @@ func (m *operationTimeout) HandleInitialize( ) ( output middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { + if m.Disabled { + return next.HandleInitialize(ctx, input) + } + if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 { var cancelFn func() ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index b62d57cb504..72e196dd9ea 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.3.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.3 (2024-03-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.2 (2024-02-23) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index a99e10d8a96..faf71cac3be 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.2" +const goModuleVersion = "1.3.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index b95cd39f422..6f6dafa8d19 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,16 @@ +# v2.6.5 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.4 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.6.3 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + # v2.6.2 (2024-02-23) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index 833b9115753..279816314e9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.6.2" +const goModuleVersion = "2.6.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md index 5fededf313b..0c2a6379356 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.31.1 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.31.0 (2024-03-20) + +* **Feature**: This release introduces 3 new APIs ('GetResourcePolicy', 'PutResourcePolicy' and 'DeleteResourcePolicy') and modifies the existing 'CreateTable' API for the resource-based policy support. It also modifies several APIs to accept a 'TableArn' for the 'TableName' parameter. + +# v1.30.5 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.30.4 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.30.3 (2024-03-06) * **Documentation**: Doc only updates for DynamoDB documentation diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go index cbc116bd7b7..bd39f8d3447 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go @@ -68,10 +68,10 @@ func (c *Client) BatchGetItem(ctx context.Context, params *BatchGetItemInput, op // Represents the input of a BatchGetItem operation. type BatchGetItemInput struct { - // A map of one or more table names and, for each table, a map that describes one - // or more items to retrieve from that table. Each table name can be used only once - // per BatchGetItem request. Each element in the map of items to retrieve consists - // of the following: + // A map of one or more table names or table ARNs and, for each table, a map that + // describes one or more items to retrieve from that table. Each table name or ARN + // can be used only once per BatchGetItem request. Each element in the map of + // items to retrieve consists of the following: // - ConsistentRead - If true , a strongly consistent read is used; if false (the // default), an eventually consistent read is used. // - ExpressionAttributeNames - One or more substitution tokens for attribute @@ -138,9 +138,9 @@ type BatchGetItemOutput struct { // - CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []types.ConsumedCapacity - // A map of table name to a list of items. Each object in Responses consists of a - // table name, along with a map of attribute data consisting of the data type and - // attribute value. + // A map of table name or table ARN to a list of items. Each object in Responses + // consists of a table name or ARN, along with a map of attribute data consisting + // of the data type and attribute value. Responses map[string][]map[string]types.AttributeValue // A map of tables and their respective keys that were not processed with the diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go index cbb7611fe53..cae206a0a91 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go @@ -86,9 +86,9 @@ func (c *Client) BatchWriteItem(ctx context.Context, params *BatchWriteItemInput // Represents the input of a BatchWriteItem operation. type BatchWriteItemInput struct { - // A map of one or more table names and, for each table, a list of operations to - // be performed ( DeleteRequest or PutRequest ). Each element in the map consists - // of the following: + // A map of one or more table names or table ARNs and, for each table, a list of + // operations to be performed ( DeleteRequest or PutRequest ). Each element in the + // map consists of the following: // - DeleteRequest - Perform a DeleteItem operation on the specified item. The // item to be deleted is identified by a Key subelement: // - Key - A map of primary key attribute values that uniquely identify the item. @@ -159,8 +159,8 @@ type BatchWriteItemOutput struct { // UnprocessedItems value is in the same form as RequestItems , so you can provide // this value directly to a subsequent BatchWriteItem operation. For more // information, see RequestItems in the Request Parameters section. Each - // UnprocessedItems entry consists of a table name and, for that table, a list of - // operations to perform ( DeleteRequest or PutRequest ). + // UnprocessedItems entry consists of a table name or table ARN and, for that + // table, a list of operations to perform ( DeleteRequest or PutRequest ). // - DeleteRequest - Perform a DeleteItem operation on the specified item. The // item to be deleted is identified by a Key subelement: // - Key - A map of primary key attribute values that uniquely identify the item. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go index 2700dd68211..19161558e0d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go @@ -52,7 +52,8 @@ type CreateBackupInput struct { // This member is required. BackupName *string - // The name of the table. + // The name of the table. You can also provide the Amazon Resource Name (ARN) of + // the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go index ea32e472470..92975ec6d3a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go @@ -72,7 +72,8 @@ type CreateTableInput struct { // This member is required. KeySchema []types.KeySchemaElement - // The name of the table to create. + // The name of the table to create. You can also provide the Amazon Resource Name + // (ARN) of the table in this parameter. // // This member is required. TableName *string @@ -148,6 +149,17 @@ type CreateTableInput struct { // in the Amazon DynamoDB Developer Guide. ProvisionedThroughput *types.ProvisionedThroughput + // An Amazon Web Services resource-based policy document in JSON format that will + // be attached to the table. When you attach a resource-based policy while creating + // a table, the policy creation is strongly consistent. The maximum size supported + // for a resource-based policy document is 20 KB. DynamoDB counts whitespaces when + // calculating the size of a policy against this limit. You can’t request an + // increase for this limit. For a full list of all considerations that you should + // keep in mind while attaching a resource-based policy, see Resource-based policy + // considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html) + // . + ResourcePolicy *string + // Represents the settings used to enable server-side encryption. SSESpecification *types.SSESpecification diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go index b78ba23027d..eab33851345 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go @@ -48,7 +48,8 @@ type DeleteItemInput struct { // This member is required. Key map[string]types.AttributeValue - // The name of the table from which to delete the item. + // The name of the table from which to delete the item. You can also provide the + // Amazon Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go new file mode 100644 index 00000000000..cd6720b0861 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go @@ -0,0 +1,212 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Deletes the resource-based policy attached to the resource, which can be a +// table or stream. DeleteResourcePolicy is an idempotent operation; running it +// multiple times on the same resource doesn't result in an error response, unless +// you specify an ExpectedRevisionId , which will then return a +// PolicyNotFoundException . To make sure that you don't inadvertently lock +// yourself out of your own resources, the root principal in your Amazon Web +// Services account can perform DeleteResourcePolicy requests, even if your +// resource-based policy explicitly denies the root principal's access. +// DeleteResourcePolicy is an asynchronous operation. If you issue a +// GetResourcePolicy request immediately after running the DeleteResourcePolicy +// request, DynamoDB might still return the deleted policy. This is because the +// policy for your resource might not have been deleted yet. Wait for a few +// seconds, and then try the GetResourcePolicy request again. +func (c *Client) DeleteResourcePolicy(ctx context.Context, params *DeleteResourcePolicyInput, optFns ...func(*Options)) (*DeleteResourcePolicyOutput, error) { + if params == nil { + params = &DeleteResourcePolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "DeleteResourcePolicy", params, optFns, c.addOperationDeleteResourcePolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*DeleteResourcePolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type DeleteResourcePolicyInput struct { + + // The Amazon Resource Name (ARN) of the DynamoDB resource from which the policy + // will be removed. The resources you can specify include tables and streams. If + // you remove the policy of a table, it will also remove the permissions for the + // table's indexes defined in that policy document. This is because index + // permissions are defined in the table's policy. + // + // This member is required. + ResourceArn *string + + // A string value that you can use to conditionally delete your policy. When you + // provide an expected revision ID, if the revision ID of the existing policy on + // the resource doesn't match or if there's no policy attached to the resource, the + // request will fail and return a PolicyNotFoundException . + ExpectedRevisionId *string + + noSmithyDocumentSerde +} + +type DeleteResourcePolicyOutput struct { + + // A unique string that represents the revision ID of the policy. If you are + // comparing revision IDs, make sure to always use string comparison logic. This + // value will be empty if you make a request against a resource without a policy. + RevisionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationDeleteResourcePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpDeleteResourcePolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDeleteResourcePolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteResourcePolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteResourcePolicyDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpDeleteResourcePolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteResourcePolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func addOpDeleteResourcePolicyDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpDeleteResourcePolicyDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpDeleteResourcePolicyDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*DeleteResourcePolicyInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opDeleteResourcePolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "DeleteResourcePolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go index f3219f40ec5..00d36f83d3c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go @@ -45,7 +45,8 @@ func (c *Client) DeleteTable(ctx context.Context, params *DeleteTableInput, optF // Represents the input of a DeleteTable operation. type DeleteTableInput struct { - // The name of the table to delete. + // The name of the table to delete. You can also provide the Amazon Resource Name + // (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go index 5813d3c9b49..2f60a65d3d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go @@ -39,7 +39,8 @@ func (c *Client) DescribeContinuousBackups(ctx context.Context, params *Describe type DescribeContinuousBackupsInput struct { // Name of the table for which the customer wants to check the continuous backups - // and point in time recovery settings. + // and point in time recovery settings. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go index 6e0f421b7fb..c983b639659 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go @@ -31,7 +31,8 @@ func (c *Client) DescribeContributorInsights(ctx context.Context, params *Descri type DescribeContributorInsightsInput struct { - // The name of the table to describe. + // The name of the table to describe. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go index 0e21756c50f..1c29302abdd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go @@ -30,7 +30,8 @@ func (c *Client) DescribeKinesisStreamingDestination(ctx context.Context, params type DescribeKinesisStreamingDestinationInput struct { - // The name of the table being described. + // The name of the table being described. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go index 36d723ab5a4..29861d9b443 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go @@ -43,7 +43,8 @@ func (c *Client) DescribeTable(ctx context.Context, params *DescribeTableInput, // Represents the input of a DescribeTable operation. type DescribeTableInput struct { - // The name of the table to describe. + // The name of the table to describe. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go index baca6aadae7..0008f9d1e83 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go @@ -31,7 +31,8 @@ func (c *Client) DescribeTableReplicaAutoScaling(ctx context.Context, params *De type DescribeTableReplicaAutoScalingInput struct { - // The name of the table. + // The name of the table. You can also provide the Amazon Resource Name (ARN) of + // the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go index 3baa36ccccb..3845703de28 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go @@ -30,7 +30,8 @@ func (c *Client) DescribeTimeToLive(ctx context.Context, params *DescribeTimeToL type DescribeTimeToLiveInput struct { - // The name of the table to be described. + // The name of the table to be described. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go index 70d221bd0a8..1d72751c58f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go @@ -36,7 +36,8 @@ type DisableKinesisStreamingDestinationInput struct { // This member is required. StreamArn *string - // The name of the DynamoDB table. + // The name of the DynamoDB table. You can also provide the Amazon Resource Name + // (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go index ac0469cdf21..7d0ff0d25de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go @@ -38,7 +38,8 @@ type EnableKinesisStreamingDestinationInput struct { // This member is required. StreamArn *string - // The name of the DynamoDB table. + // The name of the DynamoDB table. You can also provide the Amazon Resource Name + // (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go index d6b520a2283..6d5ef8eca93 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go @@ -46,7 +46,8 @@ type GetItemInput struct { // This member is required. Key map[string]types.AttributeValue - // The name of the table containing the requested item. + // The name of the table containing the requested item. You can also provide the + // Amazon Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go new file mode 100644 index 00000000000..baadeb90f2e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go @@ -0,0 +1,216 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Returns the resource-based policy document attached to the resource, which can +// be a table or stream, in JSON format. GetResourcePolicy follows an eventually +// consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html) +// model. The following list describes the outcomes when you issue the +// GetResourcePolicy request immediately after issuing another request: +// - If you issue a GetResourcePolicy request immediately after a +// PutResourcePolicy request, DynamoDB might return a PolicyNotFoundException . +// - If you issue a GetResourcePolicy request immediately after a +// DeleteResourcePolicy request, DynamoDB might return the policy that was +// present before the deletion request. +// - If you issue a GetResourcePolicy request immediately after a CreateTable +// request, which includes a resource-based policy, DynamoDB might return a +// ResourceNotFoundException or a PolicyNotFoundException . +// +// Because GetResourcePolicy uses an eventually consistent query, the metadata for +// your policy or table might not be available at that moment. Wait for a few +// seconds, and then retry the GetResourcePolicy request. After a GetResourcePolicy +// request returns a policy created using the PutResourcePolicy request, you can +// assume the policy will start getting applied in the authorization of requests to +// the resource. Because this process is eventually consistent, it will take some +// time to apply the policy to all requests to a resource. Policies that you attach +// while creating a table using the CreateTable request will always be applied to +// all requests for that table. +func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolicyInput, optFns ...func(*Options)) (*GetResourcePolicyOutput, error) { + if params == nil { + params = &GetResourcePolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "GetResourcePolicy", params, optFns, c.addOperationGetResourcePolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*GetResourcePolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type GetResourcePolicyInput struct { + + // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy is + // attached. The resources you can specify include tables and streams. + // + // This member is required. + ResourceArn *string + + noSmithyDocumentSerde +} + +type GetResourcePolicyOutput struct { + + // The resource-based policy document attached to the resource, which can be a + // table or stream, in JSON format. + Policy *string + + // A unique string that represents the revision ID of the policy. If you are + // comparing revision IDs, make sure to always use string comparison logic. + RevisionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationGetResourcePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpGetResourcePolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpGetResourcePolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "GetResourcePolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpGetResourcePolicyDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpGetResourcePolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetResourcePolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func addOpGetResourcePolicyDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpGetResourcePolicyDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpGetResourcePolicyDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*GetResourcePolicyInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opGetResourcePolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "GetResourcePolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go index c910c5886a9..50a5ca3f9d9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go @@ -58,7 +58,8 @@ type ListBackupsInput struct { // Maximum number of backups to return at once. Limit *int32 - // The backups from the table specified by TableName are listed. + // Lists the backups from the table specified in TableName . You can also provide + // the Amazon Resource Name (ARN) of the table in this parameter. TableName *string // Only backups created after this time are listed. TimeRangeLowerBound is diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go index 9e3e61770a7..da9825d50bc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go @@ -36,7 +36,8 @@ type ListContributorInsightsInput struct { // A token to for the desired page, if there is one. NextToken *string - // The name of the table. + // The name of the table. You can also provide the Amazon Resource Name (ARN) of + // the table in this parameter. TableName *string noSmithyDocumentSerde diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go index 598db5e9860..39336006bd7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go @@ -66,7 +66,8 @@ type PutItemInput struct { // This member is required. Item map[string]types.AttributeValue - // The name of the table to contain the item. + // The name of the table to contain the item. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go new file mode 100644 index 00000000000..064a2682052 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go @@ -0,0 +1,231 @@ +// Code generated by smithy-go-codegen DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" +) + +// Attaches a resource-based policy document to the resource, which can be a table +// or stream. When you attach a resource-based policy using this API, the policy +// application is eventually consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html) +// . PutResourcePolicy is an idempotent operation; running it multiple times on +// the same resource using the same policy document will return the same revision +// ID. If you specify an ExpectedRevisionId which doesn't match the current +// policy's RevisionId , the PolicyNotFoundException will be returned. +// PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy +// request immediately after a PutResourcePolicy request, DynamoDB might return +// your previous policy, if there was one, or return the PolicyNotFoundException . +// This is because GetResourcePolicy uses an eventually consistent query, and the +// metadata for your policy or table might not be available at that moment. Wait +// for a few seconds, and then try the GetResourcePolicy request again. +func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error) { + if params == nil { + params = &PutResourcePolicyInput{} + } + + result, metadata, err := c.invokeOperation(ctx, "PutResourcePolicy", params, optFns, c.addOperationPutResourcePolicyMiddlewares) + if err != nil { + return nil, err + } + + out := result.(*PutResourcePolicyOutput) + out.ResultMetadata = metadata + return out, nil +} + +type PutResourcePolicyInput struct { + + // An Amazon Web Services resource-based policy document in JSON format. The + // maximum size supported for a resource-based policy document is 20 KB. DynamoDB + // counts whitespaces when calculating the size of a policy against this limit. For + // a full list of all considerations that you should keep in mind while attaching a + // resource-based policy, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html) + // . + // + // This member is required. + Policy *string + + // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy + // will be attached. The resources you can specify include tables and streams. You + // can control index permissions using the base table's policy. To specify the same + // permission level for your table and its indexes, you can provide both the table + // and index Amazon Resource Name (ARN)s in the Resource field of a given Statement + // in your policy document. Alternatively, to specify different permissions for + // your table, indexes, or both, you can define multiple Statement fields in your + // policy document. + // + // This member is required. + ResourceArn *string + + // Set this parameter to true to confirm that you want to remove your permissions + // to change the policy of this resource in the future. + ConfirmRemoveSelfResourceAccess bool + + // A string value that you can use to conditionally update your policy. You can + // provide the revision ID of your existing policy to make mutating requests + // against that policy. When you provide an expected revision ID, if the revision + // ID of the existing policy on the resource doesn't match or if there's no policy + // attached to the resource, your request will be rejected with a + // PolicyNotFoundException . To conditionally put a policy when no policy exists + // for the resource, specify NO_POLICY for the revision ID. + ExpectedRevisionId *string + + noSmithyDocumentSerde +} + +type PutResourcePolicyOutput struct { + + // A unique string that represents the revision ID of the policy. If you are + // comparing revision IDs, make sure to always use string comparison logic. + RevisionId *string + + // Metadata pertaining to the operation's result. + ResultMetadata middleware.Metadata + + noSmithyDocumentSerde +} + +func (c *Client) addOperationPutResourcePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) { + if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil { + return err + } + err = stack.Serialize.Add(&awsAwsjson10_serializeOpPutResourcePolicy{}, middleware.After) + if err != nil { + return err + } + err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpPutResourcePolicy{}, middleware.After) + if err != nil { + return err + } + if err := addProtocolFinalizerMiddlewares(stack, options, "PutResourcePolicy"); err != nil { + return fmt.Errorf("add protocol finalizers: %v", err) + } + + if err = addlegacyEndpointContextSetter(stack, options); err != nil { + return err + } + if err = addSetLoggerMiddleware(stack, options); err != nil { + return err + } + if err = addClientRequestID(stack); err != nil { + return err + } + if err = addComputeContentLength(stack); err != nil { + return err + } + if err = addResolveEndpointMiddleware(stack, options); err != nil { + return err + } + if err = addComputePayloadSHA256(stack); err != nil { + return err + } + if err = addRetry(stack, options); err != nil { + return err + } + if err = addRawResponseToMetadata(stack); err != nil { + return err + } + if err = addRecordResponseTiming(stack); err != nil { + return err + } + if err = addClientUserAgent(stack, options); err != nil { + return err + } + if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { + return err + } + if err = addOpPutResourcePolicyDiscoverEndpointMiddleware(stack, options, c); err != nil { + return err + } + if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil { + return err + } + if err = addOpPutResourcePolicyValidationMiddleware(stack); err != nil { + return err + } + if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutResourcePolicy(options.Region), middleware.Before); err != nil { + return err + } + if err = addRecursionDetection(stack); err != nil { + return err + } + if err = addRequestIDRetrieverMiddleware(stack); err != nil { + return err + } + if err = addResponseErrorMiddleware(stack); err != nil { + return err + } + if err = addValidateResponseChecksum(stack, options); err != nil { + return err + } + if err = addAcceptEncodingGzip(stack, options); err != nil { + return err + } + if err = addRequestResponseLogging(stack, options); err != nil { + return err + } + if err = addDisableHTTPSMiddleware(stack, options); err != nil { + return err + } + return nil +} + +func addOpPutResourcePolicyDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error { + return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{ + Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){ + func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) { + opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS + opt.Logger = o.Logger + }, + }, + DiscoverOperation: c.fetchOpPutResourcePolicyDiscoverEndpoint, + EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery, + EndpointDiscoveryRequired: false, + Region: o.Region, + }, "ResolveEndpointV2", middleware.After) +} + +func (c *Client) fetchOpPutResourcePolicyDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) { + input := getOperationInput(ctx) + in, ok := input.(*PutResourcePolicyInput) + if !ok { + return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input) + } + _ = in + + identifierMap := make(map[string]string, 0) + identifierMap["sdk#Region"] = region + + key := fmt.Sprintf("DynamoDB.%v", identifierMap) + + if v, ok := c.endpointCache.Get(key); ok { + return v, nil + } + + discoveryOperationInput := &DescribeEndpointsInput{} + + opt := internalEndpointDiscovery.DiscoverEndpointOptions{} + for _, fn := range optFns { + fn(&opt) + } + + go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt) + return internalEndpointDiscovery.WeightedAddress{}, nil +} + +func newServiceMetadataMiddleware_opPutResourcePolicy(region string) *awsmiddleware.RegisterServiceMetadata { + return &awsmiddleware.RegisterServiceMetadata{ + Region: region, + ServiceID: ServiceID, + OperationName: "PutResourcePolicy", + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go index 84be7e41468..83805f8d8f1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go @@ -67,7 +67,8 @@ func (c *Client) Query(ctx context.Context, params *QueryInput, optFns ...func(* // Represents the input of a Query operation. type QueryInput struct { - // The name of the table containing the requested items. + // The name of the table containing the requested items. You can also provide the + // Amazon Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go index f675cf50f69..4a5387288e1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go @@ -63,8 +63,9 @@ func (c *Client) Scan(ctx context.Context, params *ScanInput, optFns ...func(*Op // Represents the input of a Scan operation. type ScanInput struct { - // The name of the table containing the requested items; or, if you provide - // IndexName , the name of the table to which that index belongs. + // The name of the table containing the requested items or if you provide IndexName + // , the name of the table to which that index belongs. You can also provide the + // Amazon Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go index ff25f810bed..8ced17d6b31 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go @@ -43,7 +43,8 @@ type UpdateContinuousBackupsInput struct { // This member is required. PointInTimeRecoverySpecification *types.PointInTimeRecoverySpecification - // The name of the table. + // The name of the table. You can also provide the Amazon Resource Name (ARN) of + // the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go index dd85434816c..403303e7a94 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go @@ -40,7 +40,8 @@ type UpdateContributorInsightsInput struct { // This member is required. ContributorInsightsAction types.ContributorInsightsAction - // The name of the table. + // The name of the table. You can also provide the Amazon Resource Name (ARN) of + // the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go index 3a3552314e7..12b11218b34 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go @@ -46,7 +46,8 @@ type UpdateItemInput struct { // This member is required. Key map[string]types.AttributeValue - // The name of the table containing the item to update. + // The name of the table containing the item to update. You can also provide the + // Amazon Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go index 080df59c225..3d2272ad7f5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go @@ -30,12 +30,13 @@ func (c *Client) UpdateKinesisStreamingDestination(ctx context.Context, params * type UpdateKinesisStreamingDestinationInput struct { - // The ARN for the Kinesis stream input. + // The Amazon Resource Name (ARN) for the Kinesis stream input. // // This member is required. StreamArn *string - // The table name for the Kinesis streaming destination input. + // The table name for the Kinesis streaming destination input. You can also + // provide the ARN of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go index b0cc285bfb8..a51de71d5a2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go @@ -23,8 +23,8 @@ import ( // // UpdateTable is an asynchronous operation; while it's executing, the table // status changes from ACTIVE to UPDATING . While it's UPDATING , you can't issue -// another UpdateTable request on the base table nor any replicas. When the table -// returns to the ACTIVE state, the UpdateTable operation is complete. +// another UpdateTable request. When the table returns to the ACTIVE state, the +// UpdateTable operation is complete. func (c *Client) UpdateTable(ctx context.Context, params *UpdateTableInput, optFns ...func(*Options)) (*UpdateTableOutput, error) { if params == nil { params = &UpdateTableInput{} @@ -43,7 +43,8 @@ func (c *Client) UpdateTable(ctx context.Context, params *UpdateTableInput, optF // Represents the input of an UpdateTable operation. type UpdateTableInput struct { - // The name of the table to be updated. + // The name of the table to be updated. You can also provide the Amazon Resource + // Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go index e66c0543d36..ff84b122e75 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go @@ -31,7 +31,8 @@ func (c *Client) UpdateTableReplicaAutoScaling(ctx context.Context, params *Upda type UpdateTableReplicaAutoScalingInput struct { - // The name of the global table to be updated. + // The name of the global table to be updated. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go index 865d0c898f8..e6f79845416 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go @@ -48,7 +48,8 @@ func (c *Client) UpdateTimeToLive(ctx context.Context, params *UpdateTimeToLiveI // Represents the input of an UpdateTimeToLive operation. type UpdateTimeToLiveInput struct { - // The name of the table to be configured. + // The name of the table to be configured. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go index 6a995f00e82..be3f239b434 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go @@ -980,6 +980,128 @@ func awsAwsjson10_deserializeOpErrorDeleteItem(response *smithyhttp.Response, me } } +type awsAwsjson10_deserializeOpDeleteResourcePolicy struct { +} + +func (*awsAwsjson10_deserializeOpDeleteResourcePolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpDeleteResourcePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorDeleteResourcePolicy(response, &metadata) + } + output := &DeleteResourcePolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentDeleteResourcePolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorDeleteResourcePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("PolicyNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson10_deserializeOpDeleteTable struct { } @@ -3276,6 +3398,122 @@ func awsAwsjson10_deserializeOpErrorGetItem(response *smithyhttp.Response, metad } } +type awsAwsjson10_deserializeOpGetResourcePolicy struct { +} + +func (*awsAwsjson10_deserializeOpGetResourcePolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpGetResourcePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorGetResourcePolicy(response, &metadata) + } + output := &GetResourcePolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentGetResourcePolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorGetResourcePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("PolicyNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson10_deserializeOpImportTable struct { } @@ -4287,6 +4525,128 @@ func awsAwsjson10_deserializeOpErrorPutItem(response *smithyhttp.Response, metad } } +type awsAwsjson10_deserializeOpPutResourcePolicy struct { +} + +func (*awsAwsjson10_deserializeOpPutResourcePolicy) ID() string { + return "OperationDeserializer" +} + +func (m *awsAwsjson10_deserializeOpPutResourcePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) ( + out middleware.DeserializeOutput, metadata middleware.Metadata, err error, +) { + out, metadata, err = next.HandleDeserialize(ctx, in) + if err != nil { + return out, metadata, err + } + + response, ok := out.RawResponse.(*smithyhttp.Response) + if !ok { + return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)} + } + + if response.StatusCode < 200 || response.StatusCode >= 300 { + return out, metadata, awsAwsjson10_deserializeOpErrorPutResourcePolicy(response, &metadata) + } + output := &PutResourcePolicyOutput{} + out.Result = output + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(response.Body, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + err = awsAwsjson10_deserializeOpDocumentPutResourcePolicyOutput(&output, shape) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return out, metadata, err + } + + return out, metadata, err +} + +func awsAwsjson10_deserializeOpErrorPutResourcePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error { + var errorBuffer bytes.Buffer + if _, err := io.Copy(&errorBuffer, response.Body); err != nil { + return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)} + } + errorBody := bytes.NewReader(errorBuffer.Bytes()) + + errorCode := "UnknownError" + errorMessage := errorCode + + headerCode := response.Header.Get("X-Amzn-ErrorType") + + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + bodyInfo, err := getProtocolErrorInfo(decoder) + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok { + errorCode = restjson.SanitizeErrorCode(typ) + } + if len(bodyInfo.Message) != 0 { + errorMessage = bodyInfo.Message + } + switch { + case strings.EqualFold("InternalServerError", errorCode): + return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody) + + case strings.EqualFold("InvalidEndpointException", errorCode): + return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody) + + case strings.EqualFold("LimitExceededException", errorCode): + return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody) + + case strings.EqualFold("PolicyNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorPolicyNotFoundException(response, errorBody) + + case strings.EqualFold("ResourceInUseException", errorCode): + return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody) + + case strings.EqualFold("ResourceNotFoundException", errorCode): + return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody) + + default: + genericError := &smithy.GenericAPIError{ + Code: errorCode, + Message: errorMessage, + } + return genericError + + } +} + type awsAwsjson10_deserializeOpQuery struct { } @@ -6996,6 +7356,41 @@ func awsAwsjson10_deserializeErrorPointInTimeRecoveryUnavailableException(respon return output } +func awsAwsjson10_deserializeErrorPolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error { + var buff [1024]byte + ringBuffer := smithyio.NewRingBuffer(buff[:]) + + body := io.TeeReader(errorBody, ringBuffer) + decoder := json.NewDecoder(body) + decoder.UseNumber() + var shape interface{} + if err := decoder.Decode(&shape); err != nil && err != io.EOF { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + output := &types.PolicyNotFoundException{} + err := awsAwsjson10_deserializeDocumentPolicyNotFoundException(&output, shape) + + if err != nil { + var snapshot bytes.Buffer + io.Copy(&snapshot, ringBuffer) + err = &smithy.DeserializationError{ + Err: fmt.Errorf("failed to decode response body, %w", err), + Snapshot: snapshot.Bytes(), + } + return err + } + + errorBody.Seek(0, io.SeekStart) + return output +} + func awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error { var buff [1024]byte ringBuffer := smithyio.NewRingBuffer(buff[:]) @@ -9116,7 +9511,7 @@ func awsAwsjson10_deserializeDocumentConsumedCapacity(v **types.ConsumedCapacity if value != nil { jtv, ok := value.(string) if !ok { - return fmt.Errorf("expected TableName to be of type string, got %T instead", value) + return fmt.Errorf("expected TableArn to be of type string, got %T instead", value) } sv.TableName = ptr.String(jtv) } @@ -12737,6 +13132,46 @@ func awsAwsjson10_deserializeDocumentPointInTimeRecoveryUnavailableException(v * return nil } +func awsAwsjson10_deserializeDocumentPolicyNotFoundException(v **types.PolicyNotFoundException, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *types.PolicyNotFoundException + if *v == nil { + sv = &types.PolicyNotFoundException{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "message": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value) + } + sv.Message = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeDocumentProjection(v **types.Projection, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -15881,6 +16316,46 @@ func awsAwsjson10_deserializeOpDocumentDeleteItemOutput(v **DeleteItemOutput, va return nil } +func awsAwsjson10_deserializeOpDocumentDeleteResourcePolicyOutput(v **DeleteResourcePolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *DeleteResourcePolicyOutput + if *v == nil { + sv = &DeleteResourcePolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "RevisionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyRevisionId to be of type string, got %T instead", value) + } + sv.RevisionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeOpDocumentDeleteTableOutput(v **DeleteTableOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -16797,6 +17272,55 @@ func awsAwsjson10_deserializeOpDocumentGetItemOutput(v **GetItemOutput, value in return nil } +func awsAwsjson10_deserializeOpDocumentGetResourcePolicyOutput(v **GetResourcePolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *GetResourcePolicyOutput + if *v == nil { + sv = &GetResourcePolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "Policy": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected ResourcePolicy to be of type string, got %T instead", value) + } + sv.Policy = ptr.String(jtv) + } + + case "RevisionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyRevisionId to be of type string, got %T instead", value) + } + sv.RevisionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeOpDocumentImportTableOutput(v **ImportTableOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) @@ -17194,6 +17718,46 @@ func awsAwsjson10_deserializeOpDocumentPutItemOutput(v **PutItemOutput, value in return nil } +func awsAwsjson10_deserializeOpDocumentPutResourcePolicyOutput(v **PutResourcePolicyOutput, value interface{}) error { + if v == nil { + return fmt.Errorf("unexpected nil of type %T", v) + } + if value == nil { + return nil + } + + shape, ok := value.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected JSON type %v", value) + } + + var sv *PutResourcePolicyOutput + if *v == nil { + sv = &PutResourcePolicyOutput{} + } else { + sv = *v + } + + for key, value := range shape { + switch key { + case "RevisionId": + if value != nil { + jtv, ok := value.(string) + if !ok { + return fmt.Errorf("expected PolicyRevisionId to be of type string, got %T instead", value) + } + sv.RevisionId = ptr.String(jtv) + } + + default: + _, _ = key, value + + } + } + *v = sv + return nil +} + func awsAwsjson10_deserializeOpDocumentQueryOutput(v **QueryOutput, value interface{}) error { if v == nil { return fmt.Errorf("unexpected nil of type %T", v) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json index 0c57399cff3..529544017eb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json @@ -6,7 +6,6 @@ "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery": "v0.0.0-00010101000000-000000000000", "github.com/aws/smithy-go": "v1.4.0", - "github.com/google/go-cmp": "v0.5.4", "github.com/jmespath/go-jmespath": "v0.4.0" }, "files": [ @@ -20,6 +19,7 @@ "api_op_CreateTable.go", "api_op_DeleteBackup.go", "api_op_DeleteItem.go", + "api_op_DeleteResourcePolicy.go", "api_op_DeleteTable.go", "api_op_DescribeBackup.go", "api_op_DescribeContinuousBackups.go", @@ -40,6 +40,7 @@ "api_op_ExecuteTransaction.go", "api_op_ExportTableToPointInTime.go", "api_op_GetItem.go", + "api_op_GetResourcePolicy.go", "api_op_ImportTable.go", "api_op_ListBackups.go", "api_op_ListContributorInsights.go", @@ -49,6 +50,7 @@ "api_op_ListTables.go", "api_op_ListTagsOfResource.go", "api_op_PutItem.go", + "api_op_PutResourcePolicy.go", "api_op_Query.go", "api_op_RestoreTableFromBackup.go", "api_op_RestoreTableToPointInTime.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go index 842506f3841..b60959f52f7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go @@ -3,4 +3,4 @@ package dynamodb // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.30.3" +const goModuleVersion = "1.31.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go index 833253c9199..0a5c1e813c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go @@ -457,6 +457,61 @@ func (m *awsAwsjson10_serializeOpDeleteItem) HandleSerialize(ctx context.Context return next.HandleSerialize(ctx, in) } +type awsAwsjson10_serializeOpDeleteResourcePolicy struct { +} + +func (*awsAwsjson10_serializeOpDeleteResourcePolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpDeleteResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*DeleteResourcePolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DeleteResourcePolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentDeleteResourcePolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson10_serializeOpDeleteTable struct { } @@ -1557,6 +1612,61 @@ func (m *awsAwsjson10_serializeOpGetItem) HandleSerialize(ctx context.Context, i return next.HandleSerialize(ctx, in) } +type awsAwsjson10_serializeOpGetResourcePolicy struct { +} + +func (*awsAwsjson10_serializeOpGetResourcePolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpGetResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*GetResourcePolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.GetResourcePolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentGetResourcePolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson10_serializeOpImportTable struct { } @@ -2052,6 +2162,61 @@ func (m *awsAwsjson10_serializeOpPutItem) HandleSerialize(ctx context.Context, i return next.HandleSerialize(ctx, in) } +type awsAwsjson10_serializeOpPutResourcePolicy struct { +} + +func (*awsAwsjson10_serializeOpPutResourcePolicy) ID() string { + return "OperationSerializer" +} + +func (m *awsAwsjson10_serializeOpPutResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( + out middleware.SerializeOutput, metadata middleware.Metadata, err error, +) { + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)} + } + + input, ok := in.Parameters.(*PutResourcePolicyInput) + _ = input + if !ok { + return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)} + } + + operationPath := "/" + if len(request.Request.URL.Path) == 0 { + request.Request.URL.Path = operationPath + } else { + request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath) + if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' { + request.Request.URL.Path += "/" + } + } + request.Request.Method = "POST" + httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header) + if err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0") + httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.PutResourcePolicy") + + jsonEncoder := smithyjson.NewEncoder() + if err := awsAwsjson10_serializeOpDocumentPutResourcePolicyInput(input, jsonEncoder.Value); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + + if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil { + return out, metadata, &smithy.SerializationError{Err: err} + } + in.Request = request + + return next.HandleSerialize(ctx, in) +} + type awsAwsjson10_serializeOpQuery struct { } @@ -5139,6 +5304,11 @@ func awsAwsjson10_serializeOpDocumentCreateTableInput(v *CreateTableInput, value } } + if v.ResourcePolicy != nil { + ok := object.Key("ResourcePolicy") + ok.String(*v.ResourcePolicy) + } + if v.SSESpecification != nil { ok := object.Key("SSESpecification") if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecification, ok); err != nil { @@ -5255,6 +5425,23 @@ func awsAwsjson10_serializeOpDocumentDeleteItemInput(v *DeleteItemInput, value s return nil } +func awsAwsjson10_serializeOpDocumentDeleteResourcePolicyInput(v *DeleteResourcePolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ExpectedRevisionId != nil { + ok := object.Key("ExpectedRevisionId") + ok.String(*v.ExpectedRevisionId) + } + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + func awsAwsjson10_serializeOpDocumentDeleteTableInput(v *DeleteTableInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -5646,6 +5833,18 @@ func awsAwsjson10_serializeOpDocumentGetItemInput(v *GetItemInput, value smithyj return nil } +func awsAwsjson10_serializeOpDocumentGetResourcePolicyInput(v *GetResourcePolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + func awsAwsjson10_serializeOpDocumentImportTableInput(v *ImportTableInput, value smithyjson.Value) error { object := value.Object() defer object.Close() @@ -5918,6 +6117,33 @@ func awsAwsjson10_serializeOpDocumentPutItemInput(v *PutItemInput, value smithyj return nil } +func awsAwsjson10_serializeOpDocumentPutResourcePolicyInput(v *PutResourcePolicyInput, value smithyjson.Value) error { + object := value.Object() + defer object.Close() + + if v.ConfirmRemoveSelfResourceAccess { + ok := object.Key("ConfirmRemoveSelfResourceAccess") + ok.Boolean(v.ConfirmRemoveSelfResourceAccess) + } + + if v.ExpectedRevisionId != nil { + ok := object.Key("ExpectedRevisionId") + ok.String(*v.ExpectedRevisionId) + } + + if v.Policy != nil { + ok := object.Key("Policy") + ok.String(*v.Policy) + } + + if v.ResourceArn != nil { + ok := object.Key("ResourceArn") + ok.String(*v.ResourceArn) + } + + return nil +} + func awsAwsjson10_serializeOpDocumentQueryInput(v *QueryInput, value smithyjson.Value) error { object := value.Object() defer object.Close() diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go index 4bbb047bfc2..d11b3b8b687 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go @@ -554,6 +554,34 @@ func (e *PointInTimeRecoveryUnavailableException) ErrorFault() smithy.ErrorFault return smithy.FaultClient } +// The operation tried to access a nonexistent resource-based policy. If you +// specified an ExpectedRevisionId , it's possible that a policy is present for the +// resource but its revision ID didn't match the expected value. +type PolicyNotFoundException struct { + Message *string + + ErrorCodeOverride *string + + noSmithyDocumentSerde +} + +func (e *PolicyNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage()) +} +func (e *PolicyNotFoundException) ErrorMessage() string { + if e.Message == nil { + return "" + } + return *e.Message +} +func (e *PolicyNotFoundException) ErrorCode() string { + if e == nil || e.ErrorCodeOverride == nil { + return "PolicyNotFoundException" + } + return *e.ErrorCodeOverride +} +func (e *PolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient } + // Your request rate is too high. The Amazon Web Services SDKs for DynamoDB // automatically retry requests that receive this exception. Your request is // eventually successful, unless your retry queue is too large to finish. Reduce diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go index 196842a1dde..437acc5dae9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go @@ -729,7 +729,8 @@ type ConditionCheck struct { // This member is required. Key map[string]AttributeValue - // Name of the table for the check item request. + // Name of the table for the check item request. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string @@ -775,7 +776,9 @@ type ConsumedCapacity struct { // The amount of throughput consumed on the table affected by the operation. Table *Capacity - // The name of the table that was affected by the operation. + // The name of the table that was affected by the operation. If you had specified + // the Amazon Resource Name (ARN) of a table in the input, you'll see the table ARN + // in the response. TableName *string // The total number of write capacity units consumed by the operation. @@ -907,7 +910,8 @@ type Delete struct { // This member is required. Key map[string]AttributeValue - // Name of the table in which the item to be deleted resides. + // Name of the table in which the item to be deleted resides. You can also provide + // the Amazon Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string @@ -1270,7 +1274,8 @@ type Get struct { // This member is required. Key map[string]AttributeValue - // The name of the table from which to retrieve the specified item. + // The name of the table from which to retrieve the specified item. You can also + // provide the Amazon Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string @@ -2054,7 +2059,8 @@ type Put struct { // This member is required. Item map[string]AttributeValue - // Name of the table in which to write the item. + // Name of the table in which to write the item. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string @@ -2983,7 +2989,8 @@ type Update struct { // This member is required. Key map[string]AttributeValue - // Name of the table for the UpdateItem request. + // Name of the table for the UpdateItem request. You can also provide the Amazon + // Resource Name (ARN) of the table in this parameter. // // This member is required. TableName *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go index 4762e130283..d1925ed8b4b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go @@ -170,6 +170,26 @@ func (m *validateOpDeleteItem) HandleInitialize(ctx context.Context, in middlewa return next.HandleInitialize(ctx, in) } +type validateOpDeleteResourcePolicy struct { +} + +func (*validateOpDeleteResourcePolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpDeleteResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*DeleteResourcePolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpDeleteResourcePolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpDeleteTable struct { } @@ -530,6 +550,26 @@ func (m *validateOpGetItem) HandleInitialize(ctx context.Context, in middleware. return next.HandleInitialize(ctx, in) } +type validateOpGetResourcePolicy struct { +} + +func (*validateOpGetResourcePolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpGetResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*GetResourcePolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpGetResourcePolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpImportTable struct { } @@ -590,6 +630,26 @@ func (m *validateOpPutItem) HandleInitialize(ctx context.Context, in middleware. return next.HandleInitialize(ctx, in) } +type validateOpPutResourcePolicy struct { +} + +func (*validateOpPutResourcePolicy) ID() string { + return "OperationInputValidation" +} + +func (m *validateOpPutResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( + out middleware.InitializeOutput, metadata middleware.Metadata, err error, +) { + input, ok := in.Parameters.(*PutResourcePolicyInput) + if !ok { + return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters) + } + if err := validateOpPutResourcePolicyInput(input); err != nil { + return out, metadata, err + } + return next.HandleInitialize(ctx, in) +} + type validateOpQuery struct { } @@ -962,6 +1022,10 @@ func addOpDeleteItemValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteItem{}, middleware.After) } +func addOpDeleteResourcePolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpDeleteResourcePolicy{}, middleware.After) +} + func addOpDeleteTableValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpDeleteTable{}, middleware.After) } @@ -1034,6 +1098,10 @@ func addOpGetItemValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpGetItem{}, middleware.After) } +func addOpGetResourcePolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpGetResourcePolicy{}, middleware.After) +} + func addOpImportTableValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpImportTable{}, middleware.After) } @@ -1046,6 +1114,10 @@ func addOpPutItemValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpPutItem{}, middleware.After) } +func addOpPutResourcePolicyValidationMiddleware(stack *middleware.Stack) error { + return stack.Initialize.Add(&validateOpPutResourcePolicy{}, middleware.After) +} + func addOpQueryValidationMiddleware(stack *middleware.Stack) error { return stack.Initialize.Add(&validateOpQuery{}, middleware.After) } @@ -2611,6 +2683,21 @@ func validateOpDeleteItemInput(v *DeleteItemInput) error { } } +func validateOpDeleteResourcePolicyInput(v *DeleteResourcePolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "DeleteResourcePolicyInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpDeleteTableInput(v *DeleteTableInput) error { if v == nil { return nil @@ -2897,6 +2984,21 @@ func validateOpGetItemInput(v *GetItemInput) error { } } +func validateOpGetResourcePolicyInput(v *GetResourcePolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "GetResourcePolicyInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpImportTableInput(v *ImportTableInput) error { if v == nil { return nil @@ -2959,6 +3061,24 @@ func validateOpPutItemInput(v *PutItemInput) error { } } +func validateOpPutResourcePolicyInput(v *PutResourcePolicyInput) error { + if v == nil { + return nil + } + invalidParams := smithy.InvalidParamsError{Context: "PutResourcePolicyInput"} + if v.ResourceArn == nil { + invalidParams.Add(smithy.NewErrParamRequired("ResourceArn")) + } + if v.Policy == nil { + invalidParams.Add(smithy.NewErrParamRequired("Policy")) + } + if invalidParams.Len() > 0 { + return invalidParams + } else { + return nil + } +} + func validateOpQueryInput(v *QueryInput) error { if v == nil { return nil diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index cac6f926eb8..9cf6cf22b40 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.11.2 (2024-03-29) + +* No change notes available for this release. + # v1.11.1 (2024-02-21) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index c5ae0f8735d..6339b54191a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.1" +const goModuleVersion = "1.11.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md index bd35aba12dc..bb5a864a02f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.9.6 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.5 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.4 (2024-03-07) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.3 (2024-03-04) * **Bug Fix**: Fix misaligned struct member used in atomic operation. This fixes a panic caused by attempting to atomically access a struct member which is not 64-bit aligned when running on 32-bit arch, due to the smaller sync.Map struct. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go index 0449175c61d..d35a8bfcd2e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go @@ -3,4 +3,4 @@ package endpointdiscovery // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.3" +const goModuleVersion = "1.9.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 2f2c4a92b7d..35c7050dd1b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.11.7 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.6 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.5 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.4 (2024-03-05) * **Bug Fix**: Restore typo'd API `AddAsIsInternalPresigingMiddleware` as an alias for backwards compatibility. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 8eb9e407a3b..daf77b5c38c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.4" +const goModuleVersion = "1.11.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index f0a4e60daae..a30600b098d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.20.4 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.3 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.2 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.20.1 (2024-02-23) * **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json index 62aba0d0552..936253d7cae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -3,8 +3,7 @@ "github.com/aws/aws-sdk-go-v2": "v1.4.0", "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0", - "github.com/google/go-cmp": "v0.5.4" + "github.com/aws/smithy-go": "v1.4.0" }, "files": [ "api_client.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 250762b75ee..280a6132520 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.1" +const goModuleVersion = "1.20.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index c8f7c09e46d..0a00b256e10 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -211,6 +211,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-3", }, }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{ + Hostname: "portal.sso.ap-southeast-4.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-4", + }, + }, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 13cffac4449..053f180bf68 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.23.4 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.3 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.2 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.23.1 (2024-02-23) * **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json index 62007829b60..b2a52633ba6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -3,8 +3,7 @@ "github.com/aws/aws-sdk-go-v2": "v1.4.0", "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", - "github.com/aws/smithy-go": "v1.4.0", - "github.com/google/go-cmp": "v0.5.4" + "github.com/aws/smithy-go": "v1.4.0" }, "files": [ "api_client.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 8c5f455b146..cbc7e8415f9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.23.1" +const goModuleVersion = "1.23.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go index cbd77fd291c..843edb07428 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -187,6 +187,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-south-1", }, }, + endpoints.EndpointKey{ + Region: "ap-south-2", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-south-2", + }, + }, endpoints.EndpointKey{ Region: "ap-southeast-1", }: endpoints.Endpoint{ @@ -211,6 +219,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "ap-southeast-3", }, }, + endpoints.EndpointKey{ + Region: "ap-southeast-4", + }: endpoints.Endpoint{ + Hostname: "oidc.ap-southeast-4.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "ap-southeast-4", + }, + }, endpoints.EndpointKey{ Region: "ca-central-1", }: endpoints.Endpoint{ @@ -251,6 +267,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-south-1", }, }, + endpoints.EndpointKey{ + Region: "eu-south-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-south-2", + }, + }, endpoints.EndpointKey{ Region: "eu-west-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 2ca8c78fd57..2fd5d5a649b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,16 @@ +# v1.28.6 (2024-03-29) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.5 (2024-03-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.28.4 (2024-03-07) + +* **Bug Fix**: Remove dependency on go-cmp. +* **Dependency Update**: Updated to the latest SDK module versions + # v1.28.3 (2024-03-05) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json index 54fac4bd5b7..6b6e839e6c9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -5,8 +5,7 @@ "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000", "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5", "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7", - "github.com/aws/smithy-go": "v1.4.0", - "github.com/google/go-cmp": "v0.5.4" + "github.com/aws/smithy-go": "v1.4.0" }, "files": [ "api_client.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index e6c712b1a32..6e0f31d271c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.28.3" +const goModuleVersion = "1.28.6" diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 00000000000..899129ecc46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 00000000000..1c496742903 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go new file mode 100644 index 00000000000..dd950a286fb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go @@ -0,0 +1,50 @@ +package bearer + +import ( + "github.com/aws/aws-sdk-go/aws" + "time" +) + +// Token provides a type wrapping a bearer token and expiration metadata. +type Token struct { + Value string + + CanExpire bool + Expires time.Time +} + +// Expired returns if the token's Expires time is before or equal to the time +// provided. If CanExpire is false, Expired will always return false. +func (t Token) Expired(now time.Time) bool { + if !t.CanExpire { + return false + } + now = now.Round(0) + return now.Equal(t.Expires) || now.After(t.Expires) +} + +// TokenProvider provides interface for retrieving bearer tokens. +type TokenProvider interface { + RetrieveBearerToken(aws.Context) (Token, error) +} + +// TokenProviderFunc provides a helper utility to wrap a function as a type +// that implements the TokenProvider interface. +type TokenProviderFunc func(aws.Context) (Token, error) + +// RetrieveBearerToken calls the wrapped function, returning the Token or +// error. +func (fn TokenProviderFunc) RetrieveBearerToken(ctx aws.Context) (Token, error) { + return fn(ctx) +} + +// StaticTokenProvider provides a utility for wrapping a static bearer token +// value within an implementation of a token provider. +type StaticTokenProvider struct { + Token Token +} + +// RetrieveBearerToken returns the static token specified. +func (s StaticTokenProvider) RetrieveBearerToken(aws.Context) (Token, error) { + return s.Token, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 00000000000..99849c0e19c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,164 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 00000000000..9cf7eaf4007 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,221 @@ +package awserr + +import ( + "encoding/hex" + "fmt" +) + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string + bytes []byte +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += e[i].Error() + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 00000000000..1a3d106d5c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 00000000000..142a7a01c52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type they are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 00000000000..a4eb6a7f43a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,221 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.EqualFold(name, c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + rvals := rValuesAtPath(i, path, true, false, v == nil) + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 00000000000..11d4240d614 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,123 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + ft, ok := v.Type().FieldByName(n) + if !ok { + panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type())) + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + prettify(val, indent+2, buf) + } + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 00000000000..3f7cffd9579 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,90 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +// +// Deprecated: Use Prettify instead. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + for i := 0; i < v.Type().NumField(); i++ { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { + continue // ignore unexported fields + } + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { + continue // ignore unset fields + } + + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(ft.Name + ": ") + + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) + } + + buf.WriteString(",\n") + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 00000000000..b147f103ce1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,94 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + PartitionID string + Endpoint string + SigningRegion string + SigningName string + ResolvedRegion string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = DefaultRetryerMaxNumRetries + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 00000000000..9f6af19dd45 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,177 @@ +package client + +import ( + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, you can implement the +// request.Retryer interface. +// +type DefaultRetryer struct { + // Num max Retries is the number of max retries that will be performed. + // By default, this is zero. + NumMaxRetries int + + // MinRetryDelay is the minimum retry delay after which retry will be performed. + // If not set, the value is 0ns. + MinRetryDelay time.Duration + + // MinThrottleRetryDelay is the minimum retry delay when throttled. + // If not set, the value is 0ns. + MinThrottleDelay time.Duration + + // MaxRetryDelay is the maximum retry delay before which retry must be performed. + // If not set, the value is 0ns. + MaxRetryDelay time.Duration + + // MaxThrottleDelay is the maximum retry delay when throttled. + // If not set, the value is 0ns. + MaxThrottleDelay time.Duration +} + +const ( + // DefaultRetryerMaxNumRetries sets maximum number of retries + DefaultRetryerMaxNumRetries = 3 + + // DefaultRetryerMinRetryDelay sets minimum retry delay + DefaultRetryerMinRetryDelay = 30 * time.Millisecond + + // DefaultRetryerMinThrottleDelay sets minimum delay when throttled + DefaultRetryerMinThrottleDelay = 500 * time.Millisecond + + // DefaultRetryerMaxRetryDelay sets maximum retry delay + DefaultRetryerMaxRetryDelay = 300 * time.Second + + // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled + DefaultRetryerMaxThrottleDelay = 300 * time.Second +) + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +// setRetryerDefaults sets the default values of the retryer if not set +func (d *DefaultRetryer) setRetryerDefaults() { + if d.MinRetryDelay == 0 { + d.MinRetryDelay = DefaultRetryerMinRetryDelay + } + if d.MaxRetryDelay == 0 { + d.MaxRetryDelay = DefaultRetryerMaxRetryDelay + } + if d.MinThrottleDelay == 0 { + d.MinThrottleDelay = DefaultRetryerMinThrottleDelay + } + if d.MaxThrottleDelay == 0 { + d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay + } +} + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + + // if number of max retries is zero, no retries will be performed. + if d.NumMaxRetries == 0 { + return 0 + } + + // Sets default value for retryer members + d.setRetryerDefaults() + + // minDelay is the minimum retryer delay + minDelay := d.MinRetryDelay + + var initialDelay time.Duration + + isThrottle := r.IsErrorThrottle() + if isThrottle { + if delay, ok := getRetryAfterDelay(r); ok { + initialDelay = delay + } + minDelay = d.MinThrottleDelay + } + + retryCount := r.RetryCount + + // maxDelay the maximum retryer delay + maxDelay := d.MaxRetryDelay + + if isThrottle { + maxDelay = d.MaxThrottleDelay + } + + var delay time.Duration + + // Logic to cap the retry count based on the minDelay provided + actualRetryCount := int(math.Log2(float64(minDelay))) + 1 + if actualRetryCount < 63-retryCount { + delay = time.Duration(1< maxDelay { + delay = getJitterDelay(maxDelay / 2) + } + } else { + delay = getJitterDelay(maxDelay / 2) + } + return delay + initialDelay +} + +// getJitterDelay returns a jittered delay for retry +func getJitterDelay(duration time.Duration) time.Duration { + return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration)) +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + + // ShouldRetry returns false if number of max retries is 0. + if d.NumMaxRetries == 0 { + return false + } + + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + return r.IsErrorRetryable() || r.IsErrorThrottle() +} + +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryAfterDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true +} + +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 00000000000..5ac5c24a1b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,206 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the + // r.HTTPRequest's Body as a NoOpCloser and will not be reset after + // read by the HTTP client reader. + if err := r.Error; err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 00000000000..a7530ebb389 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,15 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + ServiceID string + APIVersion string + PartitionID string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string + ResolvedRegion string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go new file mode 100644 index 00000000000..881d575f010 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go @@ -0,0 +1,28 @@ +package client + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// NoOpRetryer provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type NoOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d NoOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration { + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 00000000000..c483e0cb8e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,670 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig structure. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `nil` or the value to `""` to use the default generated endpoint. + // + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the client.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disable 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + + // Set this to `true` to have the S3 service client to use the region specified + // in the ARN, when an ARN is provided as an argument to a bucket parameter. + S3UseARNRegion *bool + + // Set this to `true` to enable the SDK to unmarshal API response header maps to + // normalized lower case map keys. + // + // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case + // Metadata member's map keys. The value of the header in the map is unaffected. + // + // The AWS SDK for Go v2, uses lower case header maps by default. The v1 + // SDK provides this opt-in for this option, for backwards compatibility. + LowerCaseHeaderMaps *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDisableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Set this to `false` to disable EC2Metadata client from falling back to IMDSv1. + // By default, EC2 role credentials will fall back to IMDSv1 as needed for backwards compatibility. + // You can disable this behavior by explicitly setting this flag to `false`. When false, the EC2Metadata + // client will return any errors encountered from attempting to fetch a token instead of silently + // using the insecure data flow of IMDSv1. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataEnableFallback(false))) + // + // svc := s3.New(sess) + // + // See [configuring IMDS] for more information. + // + // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html + EC2MetadataEnableFallback *bool + + // Instructs the endpoint to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requests. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + // + // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. + // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients + // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher + // precedence then this option. + UseDualStack *bool + + // Sets the resolver to resolve a dual-stack endpoint for the service. + UseDualStackEndpoint endpoints.DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint endpoints.FIPSEndpointState + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // To use EndpointDiscovery, Endpoint should be unset or set to an empty string. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool + + // STSRegionalEndpoint will enable regional or legacy endpoint resolving + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetries configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + +} + +// WithS3UseARNRegion sets a config S3UseARNRegion value and +// returning a Config pointer for chaining +func (c *Config) WithS3UseARNRegion(enable bool) *Config { + c.S3UseARNRegion = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithUseFIPSEndpoint sets a config UseFIPSEndpoint value returning a Config +// pointer for chaining. +func (c *Config) WithUseFIPSEndpoint(enable bool) *Config { + if enable { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } else { + c.UseFIPSEndpoint = endpoints.FIPSEndpointStateDisabled + } + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithEC2MetadataEnableFallback sets a config EC2MetadataEnableFallback value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataEnableFallback(v bool) *Config { + c.EC2MetadataEnableFallback = &v + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + +// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config { + c.STSRegionalEndpoint = sre + return c +} + +// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag +// when resolving the endpoint for a service +func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config { + c.S3UsEast1RegionalEndpoint = sre + return c +} + +// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value +// returning a Config pointer for chaining. +func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config { + c.LowerCaseHeaderMaps = &t + return c +} + +// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value +// returning a Config pointer for chaining. +func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config { + c.DisableRestProtocolURICleaning = &t + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + + if other.S3UseARNRegion != nil { + dst.S3UseARNRegion = other.S3UseARNRegion + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + dst.UseDualStackEndpoint = other.UseDualStackEndpoint + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.EC2MetadataEnableFallback != nil { + dst.EC2MetadataEnableFallback = other.EC2MetadataEnableFallback + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } + + if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint { + dst.STSRegionalEndpoint = other.STSRegionalEndpoint + } + + if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint { + dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint + } + + if other.LowerCaseHeaderMaps != nil { + dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps + } + + if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + dst.UseDualStackEndpoint = other.UseDualStackEndpoint + } + + if other.UseFIPSEndpoint != endpoints.FIPSEndpointStateUnset { + dst.UseFIPSEndpoint = other.UseFIPSEndpoint + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 00000000000..89aad2c6771 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,38 @@ +//go:build !go1.9 +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 00000000000..6ee9ddd18bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,12 @@ +//go:build go1.9 +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 00000000000..31321819048 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,23 @@ +//go:build !go1.7 +// +build !go1.7 + +package aws + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 00000000000..9975d561bb2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,21 @@ +//go:build go1.7 +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 00000000000..304fd156120 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 00000000000..4e076c1837a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,918 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint returns a pointer to the uint value passed in. +func Uint(v uint) *uint { + return &v +} + +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. +func UintValue(v *uint) uint { + if v != nil { + return *v + } + return 0 +} + +// UintSlice converts a slice of uint values uinto a slice of +// uint pointers +func UintSlice(src []uint) []*uint { + dst := make([]*uint, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// UintValueSlice converts a slice of uint pointers uinto a slice of +// uint values +func UintValueSlice(src []*uint) []uint { + dst := make([]uint, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// UintMap converts a string map of uint values uinto a string +// map of uint pointers +func UintMap(src map[string]uint) map[string]*uint { + dst := make(map[string]*uint) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// UintValueMap converts a string map of uint pointers uinto a string +// map of uint values +func UintValueMap(src map[string]*uint) map[string]uint { + dst := make(map[string]uint) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int8 returns a pointer to the int8 value passed in. +func Int8(v int8) *int8 { + return &v +} + +// Int8Value returns the value of the int8 pointer passed in or +// 0 if the pointer is nil. +func Int8Value(v *int8) int8 { + if v != nil { + return *v + } + return 0 +} + +// Int8Slice converts a slice of int8 values into a slice of +// int8 pointers +func Int8Slice(src []int8) []*int8 { + dst := make([]*int8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int8ValueSlice converts a slice of int8 pointers into a slice of +// int8 values +func Int8ValueSlice(src []*int8) []int8 { + dst := make([]int8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int8Map converts a string map of int8 values into a string +// map of int8 pointers +func Int8Map(src map[string]int8) map[string]*int8 { + dst := make(map[string]*int8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int8ValueMap converts a string map of int8 pointers into a string +// map of int8 values +func Int8ValueMap(src map[string]*int8) map[string]int8 { + dst := make(map[string]int8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int16 returns a pointer to the int16 value passed in. +func Int16(v int16) *int16 { + return &v +} + +// Int16Value returns the value of the int16 pointer passed in or +// 0 if the pointer is nil. +func Int16Value(v *int16) int16 { + if v != nil { + return *v + } + return 0 +} + +// Int16Slice converts a slice of int16 values into a slice of +// int16 pointers +func Int16Slice(src []int16) []*int16 { + dst := make([]*int16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int16ValueSlice converts a slice of int16 pointers into a slice of +// int16 values +func Int16ValueSlice(src []*int16) []int16 { + dst := make([]int16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int16Map converts a string map of int16 values into a string +// map of int16 pointers +func Int16Map(src map[string]int16) map[string]*int16 { + dst := make(map[string]*int16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int16ValueMap converts a string map of int16 pointers into a string +// map of int16 values +func Int16ValueMap(src map[string]*int16) map[string]int16 { + dst := make(map[string]int16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int32 returns a pointer to the int32 value passed in. +func Int32(v int32) *int32 { + return &v +} + +// Int32Value returns the value of the int32 pointer passed in or +// 0 if the pointer is nil. +func Int32Value(v *int32) int32 { + if v != nil { + return *v + } + return 0 +} + +// Int32Slice converts a slice of int32 values into a slice of +// int32 pointers +func Int32Slice(src []int32) []*int32 { + dst := make([]*int32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int32ValueSlice converts a slice of int32 pointers into a slice of +// int32 values +func Int32ValueSlice(src []*int32) []int32 { + dst := make([]int32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int32Map converts a string map of int32 values into a string +// map of int32 pointers +func Int32Map(src map[string]int32) map[string]*int32 { + dst := make(map[string]*int32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int32ValueMap converts a string map of int32 pointers into a string +// map of int32 values +func Int32ValueMap(src map[string]*int32) map[string]int32 { + dst := make(map[string]int32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint8 returns a pointer to the uint8 value passed in. +func Uint8(v uint8) *uint8 { + return &v +} + +// Uint8Value returns the value of the uint8 pointer passed in or +// 0 if the pointer is nil. +func Uint8Value(v *uint8) uint8 { + if v != nil { + return *v + } + return 0 +} + +// Uint8Slice converts a slice of uint8 values into a slice of +// uint8 pointers +func Uint8Slice(src []uint8) []*uint8 { + dst := make([]*uint8, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint8ValueSlice converts a slice of uint8 pointers into a slice of +// uint8 values +func Uint8ValueSlice(src []*uint8) []uint8 { + dst := make([]uint8, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint8Map converts a string map of uint8 values into a string +// map of uint8 pointers +func Uint8Map(src map[string]uint8) map[string]*uint8 { + dst := make(map[string]*uint8) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint8ValueMap converts a string map of uint8 pointers into a string +// map of uint8 values +func Uint8ValueMap(src map[string]*uint8) map[string]uint8 { + dst := make(map[string]uint8) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint16 returns a pointer to the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint32 returns a pointer to the uint32 value passed in. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. +func Uint32Value(v *uint32) uint32 { + if v != nil { + return *v + } + return 0 +} + +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers +func Uint32Slice(src []uint32) []*uint32 { + dst := make([]*uint32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of +// uint32 values +func Uint32ValueSlice(src []*uint32) []uint32 { + dst := make([]uint32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers +func Uint32Map(src map[string]uint32) map[string]*uint32 { + dst := make(map[string]*uint32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint32ValueMap converts a string map of uint32 pointers into a string +// map of uint32 values +func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { + dst := make(map[string]uint32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Uint64 returns a pointer to the uint64 value passed in. +func Uint64(v uint64) *uint64 { + return &v +} + +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. +func Uint64Value(v *uint64) uint64 { + if v != nil { + return *v + } + return 0 +} + +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers +func Uint64Slice(src []uint64) []*uint64 { + dst := make([]*uint64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of +// uint64 values +func Uint64ValueSlice(src []*uint64) []uint64 { + dst := make([]uint64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers +func Uint64Map(src map[string]uint64) map[string]*uint64 { + dst := make(map[string]*uint64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Uint64ValueMap converts a string map of uint64 pointers into a string +// map of uint64 values +func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { + dst := make(map[string]uint64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float32 returns a pointer to the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go new file mode 100644 index 00000000000..140242dd1b8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go @@ -0,0 +1,4 @@ +// DO NOT EDIT +package corehandlers + +const isAwsInternal = "" \ No newline at end of file diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 00000000000..36a915efea8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,232 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 5 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(5 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all request errors, and let the default retrier determine + // if the error is retryable. + r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err) + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", r.Error) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{ + Name: "core.AfterRetryHandler", + Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } + }} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + // Was any endpoint provided by the user, or one was derived by the + // SDK's endpoint resolver? + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 00000000000..7d50b1557cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 00000000000..ac842c55d89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,47 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} + +var AddAwsInternal = request.NamedHandler{ + Name: "core.AddAwsInternal", + Fn: func(r *request.Request) { + if len(isAwsInternal) == 0 { + return + } + request.AddToUserAgent(r, isAwsInternal) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 00000000000..3ad1e798df8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true. + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go new file mode 100644 index 00000000000..6e3406b1f76 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go @@ -0,0 +1,23 @@ +//go:build !go1.7 +// +build !go1.7 + +package credentials + +import ( + "github.com/aws/aws-sdk-go/internal/context" +) + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.BackgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go new file mode 100644 index 00000000000..a68df0ee73f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go @@ -0,0 +1,21 @@ +//go:build go1.7 +// +build go1.7 + +package credentials + +import "context" + +// backgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func backgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go new file mode 100644 index 00000000000..0345fab2d97 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go @@ -0,0 +1,40 @@ +//go:build !go1.9 +// +build !go1.9 + +package credentials + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go new file mode 100644 index 00000000000..79018aba738 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go @@ -0,0 +1,14 @@ +//go:build go1.9 +// +build go1.9 + +package credentials + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// This type, aws.Context, and context.Context are equivalent. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 00000000000..a880a3de8fe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,383 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := credentials.NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := credentials.NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "fmt" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/sync/singleflight" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) +// // Access public S3 buckets. +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// HasKeys returns if the credentials Value has both AccessKeyID and +// SecretAccessKey value set. +func (v Value) HasKeys() bool { + return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0 +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Retrieve returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// ProviderWithContext is a Provider that can retrieve credentials with a Context +type ProviderWithContext interface { + Provider + + RetrieveWithContext(Context) (Value, error) +} + +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + // Passed in expirations should have the monotonic clock values stripped. + // This ensures time comparisons will be based on wall-time. + e.expiration = expiration.Round(0) + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now + } + return e.expiration.Before(curTime()) +} + +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + sf singleflight.Group + + m sync.RWMutex + creds Value + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + c := &Credentials{ + provider: provider, + } + return c +} + +// GetWithContext returns the credentials value, or error if the credentials +// Value failed to be retrieved. Will return early if the passed in context is +// canceled. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +// +// Passed in Context is equivalent to aws.Context, and context.Context. +func (c *Credentials) GetWithContext(ctx Context) (Value, error) { + // Check if credentials are cached, and not expired. + select { + case curCreds, ok := <-c.asyncIsExpired(): + // ok will only be true, of the credentials were not expired. ok will + // be false and have no value if the credentials are expired. + if ok { + return curCreds, nil + } + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } + + // Cannot pass context down to the actual retrieve, because the first + // context would cancel the whole group when there is not direct + // association of items in the group. + resCh := c.sf.DoChan("", func() (interface{}, error) { + return c.singleRetrieve(&suppressedContext{ctx}) + }) + select { + case res := <-resCh: + return res.Val.(Value), res.Err + case <-ctx.Done(): + return Value{}, awserr.New("RequestCanceled", + "request context canceled", ctx.Err()) + } +} + +func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) { + c.m.Lock() + defer c.m.Unlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + return curCreds, nil + } + + var creds Value + var err error + if p, ok := c.provider.(ProviderWithContext); ok { + creds, err = p.RetrieveWithContext(ctx) + } else { + creds, err = c.provider.Retrieve() + } + if err == nil { + c.creds = creds + } + + return creds, err +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + return c.GetWithContext(backgroundContext()) +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.creds = Value{} +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.RLock() + defer c.m.RUnlock() + + return c.isExpiredLocked(c.creds) +} + +// asyncIsExpired returns a channel of credentials Value. If the channel is +// closed the credentials are expired and credentials value are not empty. +func (c *Credentials) asyncIsExpired() <-chan Value { + ch := make(chan Value, 1) + go func() { + c.m.RLock() + defer c.m.RUnlock() + + if curCreds := c.creds; !c.isExpiredLocked(curCreds) { + ch <- curCreds + } + + close(ch) + }() + + return ch +} + +// isExpiredLocked helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpiredLocked(creds interface{}) bool { + return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired() +} + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", + c.creds.ProviderName), + nil) + } + if c.creds == (Value{}) { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} + +type suppressedContext struct { + Context +} + +func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) { + return time.Time{}, false +} + +func (s *suppressedContext) Done() <-chan struct{} { + return nil +} + +func (s *suppressedContext) Err() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 00000000000..92af5b7250a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,188 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + return m.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + credsList, err := requestCredList(ctx, m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(ctx, m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "iam/security-credentials/" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New(request.ErrCodeSerialization, + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 00000000000..329f788a38a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,255 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + // + // When constructed from environment, the provider will use the value of + // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token + // + // Will be overridden if AuthorizationTokenProvider is configured + AuthorizationToken string + + // Optional auth provider func to dynamically load the auth token from a file + // everytime a credential is retrieved + // + // When constructed from environment, the provider will read and use the content + // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable + // as the auth token everytime credentials are retrieved + // + // Will override AuthorizationToken if configured + AuthorizationTokenProvider AuthTokenProvider +} + +// AuthTokenProvider defines an interface to dynamically load a value to be passed +// for the Authorization header of a credentials request. +type AuthTokenProvider interface { + GetToken() (string, error) +} + +// TokenProviderFunc is a func type implementing AuthTokenProvider interface +// and enables customizing token provider behavior +type TokenProviderFunc func() (string, error) + +// GetToken func retrieves auth token according to TokenProviderFunc implementation +func (p TokenProviderFunc) GetToken() (string, error) { + return p() +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a pointer to a new Credentials object +// wrapping the endpoint credentials Provider. +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + resp, err := p.getCredentials(ctx) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.SetContext(ctx) + req.HTTPRequest.Header.Set("Accept", "application/json") + + authToken := p.AuthorizationToken + var err error + if p.AuthorizationTokenProvider != nil { + authToken, err = p.AuthorizationTokenProvider.GetToken() + if err != nil { + return nil, fmt.Errorf("get authorization token: %v", err) + } + } + + if strings.ContainsAny(authToken, "\r\n") { + return nil, fmt.Errorf("authorization token contains invalid newline sequence") + } + if len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 00000000000..54c5cf7333f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,74 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 00000000000..7fc91d9d204 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 00000000000..18694f07f7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,438 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = int(8 * sdkio.KibiByte) + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// A CredentialProcessResponse is the AWS credentials format that must be +// returned when executing an external credential_process. +type CredentialProcessResponse struct { + // As of this writing, the Version key must be set to 1. This might + // increment over time as the structure evolves. + Version int + + // The access key ID that identifies the temporary security credentials. + AccessKeyID string `json:"AccessKeyId"` + + // The secret access key that can be used to sign requests. + SecretAccessKey string + + // The token that users must pass to the service API to use the temporary credentials. + SessionToken string + + // The date on which the current credentials expire. + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &CredentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 00000000000..22b5c5d9f32 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves access key pair (access key ID, +// secret access key, and session token if present) credentials from the current +// user's home directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.OpenFile(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) + } + + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + nil) + } + + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.String("aws_session_token") + + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil + } + + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go new file mode 100644 index 00000000000..18c940ab3c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go @@ -0,0 +1,60 @@ +// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token. +// +// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider +// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by +// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in +// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned. +// +// Loading AWS SSO credentials with the AWS shared configuration file +// +// You can use configure AWS SSO credentials from the AWS shared configuration file by +// providing the specifying the required keys in the profile: +// +// sso_account_id +// sso_region +// sso_role_name +// sso_start_url +// +// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target +// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be +// provided, or an error will be returned. +// +// [profile devsso] +// sso_start_url = https://my-sso-portal.awsapps.com/start +// sso_role_name = SSOReadOnlyRole +// sso_region = us-east-1 +// sso_account_id = 123456789012 +// +// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to +// retrieve credentials. For example: +// +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// Profile: "devsso", +// }) +// if err != nil { +// return err +// } +// +// Programmatically loading AWS SSO credentials directly +// +// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information +// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache. +// +// svc := sso.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region +// }) +// +// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start") +// +// credentials, err := provider.Get() +// if err != nil { +// return err +// } +// +// Additional Resources +// +// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +// +// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html +package ssocreds diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go new file mode 100644 index 00000000000..d4df39a7a22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go @@ -0,0 +1,10 @@ +//go:build !windows +// +build !windows + +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go new file mode 100644 index 00000000000..eb48f61e5bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go @@ -0,0 +1,7 @@ +package ssocreds + +import "os" + +func getHomeDirectory() string { + return os.Getenv("USERPROFILE") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go new file mode 100644 index 00000000000..4138e725dde --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go @@ -0,0 +1,187 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "io/ioutil" + "path/filepath" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sso" + "github.com/aws/aws-sdk-go/service/sso/ssoiface" +) + +// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid. +// To refresh the SSO session run aws sso login with the corresponding profile. +const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken" + +const invalidTokenMessage = "the SSO session has expired or is invalid" + +func init() { + nowTime = time.Now + defaultCacheLocation = defaultCacheLocationImpl +} + +var nowTime func() time.Time + +// ProviderName is the name of the provider used to specify the source of credentials. +const ProviderName = "SSOProvider" + +var defaultCacheLocation func() string + +func defaultCacheLocationImpl() string { + return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache") +} + +// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token. +type Provider struct { + credentials.Expiry + + // The Client which is configured for the AWS Region where the AWS SSO user portal is located. + Client ssoiface.SSOAPI + + // The AWS account that is assigned to the user. + AccountID string + + // The role name that is assigned to the user. + RoleName string + + // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal. + StartURL string + + // The filepath the cached token will be retrieved from. If unset Provider will + // use the startURL to determine the filepath at. + // + // ~/.aws/sso/cache/.json + // + // If custom cached token filepath is used, the Provider's startUrl + // parameter will be ignored. + CachedTokenFilepath string + + // Used by the SSOCredentialProvider if a token configuration + // profile is used in the shared config + TokenProvider bearer.TokenProvider +} + +// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...) +} + +// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured +// for the AWS Region where the AWS SSO user portal is located. +func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials { + p := &Provider{ + Client: client, + AccountID: accountID, + RoleName: roleName, + StartURL: startURL, + } + + for _, fn := range optFns { + fn(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal +// by exchanging the accessToken present in ~/.aws/sso/cache. +func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + var accessToken *string + if p.TokenProvider != nil { + token, err := p.TokenProvider.RetrieveBearerToken(ctx) + if err != nil { + return credentials.Value{}, err + } + accessToken = &token.Value + } else { + if p.CachedTokenFilepath == "" { + cachedTokenFilePath, err := getCachedFilePath(p.StartURL) + if err != nil { + return credentials.Value{}, err + } + p.CachedTokenFilepath = cachedTokenFilePath + } + + tokenFile, err := loadTokenFile(p.CachedTokenFilepath) + if err != nil { + return credentials.Value{}, err + } + accessToken = &tokenFile.AccessToken + } + + output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{ + AccessToken: accessToken, + AccountId: &p.AccountID, + RoleName: &p.RoleName, + }) + if err != nil { + return credentials.Value{}, err + } + + expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC() + p.SetExpiration(expireTime, 0) + + return credentials.Value{ + AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId), + SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey), + SessionToken: aws.StringValue(output.RoleCredentials.SessionToken), + ProviderName: ProviderName, + }, nil +} + +func getCachedFilePath(startUrl string) (string, error) { + hash := sha1.New() + _, err := hash.Write([]byte(startUrl)) + if err != nil { + return "", err + } + return filepath.Join(defaultCacheLocation(), strings.ToLower(hex.EncodeToString(hash.Sum(nil)))+".json"), nil +} + +type token struct { + AccessToken string `json:"accessToken"` + ExpiresAt rfc3339 `json:"expiresAt"` + Region string `json:"region,omitempty"` + StartURL string `json:"startUrl,omitempty"` +} + +func (t token) Expired() bool { + return nowTime().Round(0).After(time.Time(t.ExpiresAt)) +} + +func loadTokenFile(cachedTokenPath string) (t token, err error) { + fileBytes, err := ioutil.ReadFile(cachedTokenPath) + if err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if err := json.Unmarshal(fileBytes, &t); err != nil { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err) + } + + if len(t.AccessToken) == 0 { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + if t.Expired() { + return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil) + } + + return t, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go new file mode 100644 index 00000000000..f6fa88451af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go @@ -0,0 +1,237 @@ +package ssocreds + +import ( + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/aws/aws-sdk-go/internal/shareddefaults" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +var resolvedOsUserHomeDir = shareddefaults.UserHomeDir + +// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or +// error if unable get derive the path. Key that will be used to compute a SHA1 +// value that is hex encoded. +// +// Derives the filepath using the Key as: +// +// ~/.aws/sso/cache/.json +func StandardCachedTokenFilepath(key string) (string, error) { + homeDir := resolvedOsUserHomeDir() + if len(homeDir) == 0 { + return "", fmt.Errorf("unable to get USER's home directory for cached token") + } + hash := sha1.New() + if _, err := hash.Write([]byte(key)); err != nil { + return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %v", err) + } + + cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json" + + return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil +} + +type tokenKnownFields struct { + AccessToken string `json:"accessToken,omitempty"` + ExpiresAt *rfc3339 `json:"expiresAt,omitempty"` + + RefreshToken string `json:"refreshToken,omitempty"` + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` +} + +type cachedToken struct { + tokenKnownFields + UnknownFields map[string]interface{} `json:"-"` +} + +// MarshalJSON provides custom marshalling because the standard library Go marshaller ignores unknown/unspecified fields +// when marshalling from a struct: https://pkg.go.dev/encoding/json#Marshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t cachedToken) MarshalJSON() ([]byte, error) { + fields := map[string]interface{}{} + + setTokenFieldString(fields, "accessToken", t.AccessToken) + setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt) + + setTokenFieldString(fields, "refreshToken", t.RefreshToken) + setTokenFieldString(fields, "clientId", t.ClientID) + setTokenFieldString(fields, "clientSecret", t.ClientSecret) + + for k, v := range t.UnknownFields { + if _, ok := fields[k]; ok { + return nil, fmt.Errorf("unknown token field %v, duplicates known field", k) + } + fields[k] = v + } + + return json.Marshal(fields) +} + +func setTokenFieldString(fields map[string]interface{}, key, value string) { + if value == "" { + return + } + fields[key] = value +} +func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) { + if value == nil { + return + } + fields[key] = value +} + +// UnmarshalJSON provides custom unmarshalling because the standard library Go unmarshaller ignores unknown/unspecified +// fields when unmarshalling from a struct: https://pkg.go.dev/encoding/json#Unmarshal +// This function adds some extra validation to the known fields and captures unknown fields. +func (t *cachedToken) UnmarshalJSON(b []byte) error { + var fields map[string]interface{} + if err := json.Unmarshal(b, &fields); err != nil { + return nil + } + + t.UnknownFields = map[string]interface{}{} + + for k, v := range fields { + var err error + switch k { + case "accessToken": + err = getTokenFieldString(v, &t.AccessToken) + case "expiresAt": + err = getTokenFieldRFC3339(v, &t.ExpiresAt) + case "refreshToken": + err = getTokenFieldString(v, &t.RefreshToken) + case "clientId": + err = getTokenFieldString(v, &t.ClientID) + case "clientSecret": + err = getTokenFieldString(v, &t.ClientSecret) + default: + t.UnknownFields[k] = v + } + + if err != nil { + return fmt.Errorf("field %q, %v", k, err) + } + } + + return nil +} + +func getTokenFieldString(v interface{}, value *string) error { + var ok bool + *value, ok = v.(string) + if !ok { + return fmt.Errorf("expect value to be string, got %T", v) + } + return nil +} + +func getTokenFieldRFC3339(v interface{}, value **rfc3339) error { + var stringValue string + if err := getTokenFieldString(v, &stringValue); err != nil { + return err + } + + timeValue, err := parseRFC3339(stringValue) + if err != nil { + return err + } + + *value = &timeValue + return nil +} + +func loadCachedToken(filename string) (cachedToken, error) { + fileBytes, err := ioutil.ReadFile(filename) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to read cached SSO token file, %v", err) + } + + var t cachedToken + if err := json.Unmarshal(fileBytes, &t); err != nil { + return cachedToken{}, fmt.Errorf("failed to parse cached SSO token file, %v", err) + } + + if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() { + return cachedToken{}, fmt.Errorf( + "cached SSO token must contain accessToken and expiresAt fields") + } + + return t, nil +} + +func storeCachedToken(filename string, t cachedToken, fileMode os.FileMode) (err error) { + tmpFilename := filename + ".tmp-" + strconv.FormatInt(nowTime().UnixNano(), 10) + if err := writeCacheFile(tmpFilename, fileMode, t); err != nil { + return err + } + + if err := os.Rename(tmpFilename, filename); err != nil { + return fmt.Errorf("failed to replace old cached SSO token file, %v", err) + } + + return nil +} + +func writeCacheFile(filename string, fileMode os.FileMode, t cachedToken) (err error) { + var f *os.File + f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode) + if err != nil { + return fmt.Errorf("failed to create cached SSO token file %v", err) + } + + defer func() { + closeErr := f.Close() + if err == nil && closeErr != nil { + err = fmt.Errorf("failed to close cached SSO token file, %v", closeErr) + } + }() + + encoder := json.NewEncoder(f) + + if err = encoder.Encode(t); err != nil { + return fmt.Errorf("failed to serialize cached SSO token, %v", err) + } + + return nil +} + +type rfc3339 time.Time + +// UnmarshalJSON decode rfc3339 from JSON format +func (r *rfc3339) UnmarshalJSON(bytes []byte) error { + var value string + var err error + + if err = json.Unmarshal(bytes, &value); err != nil { + return err + } + + *r, err = parseRFC3339(value) + return err +} + +func parseRFC3339(v string) (rfc3339, error) { + parsed, err := time.Parse(time.RFC3339, v) + if err != nil { + return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %v", err) + } + + return rfc3339(parsed), nil +} + +// MarshalJSON encode rfc3339 to JSON format time +func (r *rfc3339) MarshalJSON() ([]byte, error) { + value := time.Time(*r).Format(time.RFC3339) + + // Use JSON unmarshal to unescape the quoted value making use of JSON's + // quoting rules. + return json.Marshal(value) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go new file mode 100644 index 00000000000..3388b78b4f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go @@ -0,0 +1,148 @@ +package ssocreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/auth/bearer" + "github.com/aws/aws-sdk-go/service/ssooidc" +) + +// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API +// client for calling CreateToken operation to refresh the SSO token. +type CreateTokenAPIClient interface { + CreateToken(input *ssooidc.CreateTokenInput) (*ssooidc.CreateTokenOutput, error) +} + +// SSOTokenProviderOptions provides the options for configuring the +// SSOTokenProvider. +type SSOTokenProviderOptions struct { + // Client that can be overridden + Client CreateTokenAPIClient + + // The path the file containing the cached SSO token will be read from. + // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter. + CachedTokenFilepath string +} + +// SSOTokenProvider provides a utility for refreshing SSO AccessTokens for +// Bearer Authentication. The SSOTokenProvider can only be used to refresh +// already cached SSO Tokens. This utility cannot perform the initial SSO +// create token. +// +// The initial SSO create token should be preformed with the AWS CLI before the +// Go application using the SSOTokenProvider will need to retrieve the SSO +// token. If the AWS CLI has not created the token cache file, this provider +// will return an error when attempting to retrieve the cached token. +// +// This provider will attempt to refresh the cached SSO token periodically if +// needed when RetrieveBearerToken is called. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. +// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +type SSOTokenProvider struct { + options SSOTokenProviderOptions +} + +// NewSSOTokenProvider returns an initialized SSOTokenProvider that will +// periodically refresh the SSO token cached stored in the cachedTokenFilepath. +// The cachedTokenFilepath file's content will be rewritten by the token +// provider when the token is refreshed. +// +// The client must be configured for the AWS region the SSO token was created for. +func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider { + options := SSOTokenProviderOptions{ + Client: client, + CachedTokenFilepath: cachedTokenFilepath, + } + for _, fn := range optFns { + fn(&options) + } + + provider := &SSOTokenProvider{ + options: options, + } + + return provider +} + +// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath +// the SSOTokenProvider was created with. If the token has expired +// RetrieveBearerToken will attempt to refresh it. If the token cannot be +// refreshed or is not present an error will be returned. +// +// A utility such as the AWS CLI must be used to initially create the SSO +// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html +func (p *SSOTokenProvider) RetrieveBearerToken(ctx aws.Context) (bearer.Token, error) { + cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath) + if err != nil { + return bearer.Token{}, err + } + + if cachedToken.ExpiresAt != nil && nowTime().After(time.Time(*cachedToken.ExpiresAt)) { + cachedToken, err = p.refreshToken(cachedToken) + if err != nil { + return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %v", err) + } + } + + expiresAt := toTime((*time.Time)(cachedToken.ExpiresAt)) + return bearer.Token{ + Value: cachedToken.AccessToken, + CanExpire: !expiresAt.IsZero(), + Expires: expiresAt, + }, nil +} + +func (p *SSOTokenProvider) refreshToken(token cachedToken) (cachedToken, error) { + if token.ClientSecret == "" || token.ClientID == "" || token.RefreshToken == "" { + return cachedToken{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed") + } + + createResult, err := p.options.Client.CreateToken(&ssooidc.CreateTokenInput{ + ClientId: &token.ClientID, + ClientSecret: &token.ClientSecret, + RefreshToken: &token.RefreshToken, + GrantType: aws.String("refresh_token"), + }) + if err != nil { + return cachedToken{}, fmt.Errorf("unable to refresh SSO token, %v", err) + } + if createResult.ExpiresIn == nil { + return cachedToken{}, fmt.Errorf("missing required field ExpiresIn") + } + if createResult.AccessToken == nil { + return cachedToken{}, fmt.Errorf("missing required field AccessToken") + } + if createResult.RefreshToken == nil { + return cachedToken{}, fmt.Errorf("missing required field RefreshToken") + } + + expiresAt := nowTime().Add(time.Duration(*createResult.ExpiresIn) * time.Second) + + token.AccessToken = *createResult.AccessToken + token.ExpiresAt = (*rfc3339)(&expiresAt) + token.RefreshToken = *createResult.RefreshToken + + fileInfo, err := os.Stat(p.options.CachedTokenFilepath) + if err != nil { + return cachedToken{}, fmt.Errorf("failed to stat cached SSO token file %v", err) + } + + if err = storeCachedToken(p.options.CachedTokenFilepath, token, fileInfo.Mode()); err != nil { + return cachedToken{}, fmt.Errorf("unable to cache refreshed SSO token, %v", err) + } + + return token, nil +} + +func toTime(p *time.Time) (v time.Time) { + if p == nil { + return v + } + + return *p +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 00000000000..cbba1e3d560 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. Token is only required +// for temporary security credentials retrieved via STS, otherwise an empty +// string can be passed for this parameter. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 00000000000..86db488defa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,371 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +# Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +# Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +# Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) +*/ +package stscreds + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function to read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +type assumeRolerWithContext interface { + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Optional, you can pass tag key-value pairs to your session. These tags are called session tags. + Tags []*sts.Tag + + // A list of keys for session tags that you want to set as transitive. + // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain. + TransitiveTagKeys []*string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The ARNs of IAM managed policies you want to use as managed session policies. + // The policies must exist in the same account as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies can't exceed 2,048 characters. + // + // An AWS conversion compresses the passed session policies and session tags + // into a packed binary format that has a separate limit. Your request can fail + // for this limit even if your plain text meets the other requirements. The + // PackedPolicySize response element indicates by percentage how close the policies + // and tags for your request are to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*sts.PolicyDescriptorType + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The SourceIdentity which is used to identity a persistent identity through the whole session. + // For more details see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html + SourceIdentity *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 +} + +// NewCredentials returns a pointer to a new Credentials value wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. The +// Credentials value will attempt to refresh the credentials using the provider +// when Credentials.Get is called, if the cached credentials are expiring. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + Tags: p.Tags, + PolicyArns: p.PolicyArns, + TransitiveTagKeys: p.TransitiveTagKeys, + SourceIdentity: p.SourceIdentity, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + var roleOutput *sts.AssumeRoleOutput + var err error + + if c, ok := p.Client.(assumeRolerWithContext); ok { + roleOutput, err = c.AssumeRoleWithContext(ctx, input) + } else { + roleOutput, err = p.Client.AssumeRole(input) + } + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go new file mode 100644 index 00000000000..19ad619aa3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go @@ -0,0 +1,182 @@ +package stscreds + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/aws/aws-sdk-go/service/sts/stsiface" +) + +const ( + // ErrCodeWebIdentity will be used as an error code when constructing + // a new error to be returned during session creation or retrieval. + ErrCodeWebIdentity = "WebIdentityErr" + + // WebIdentityProviderName is the web identity provider name + WebIdentityProviderName = "WebIdentityCredentials" +) + +// now is used to return a time.Time object representing +// the current time. This can be used to easily test and +// compare test values. +var now = time.Now + +// TokenFetcher should return WebIdentity token bytes or an error +type TokenFetcher interface { + FetchToken(credentials.Context) ([]byte, error) +} + +// FetchTokenPath is a path to a WebIdentity token file +type FetchTokenPath string + +// FetchToken returns a token by reading from the filesystem +func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) { + data, err := ioutil.ReadFile(string(f)) + if err != nil { + errMsg := fmt.Sprintf("unable to read file at %s", f) + return nil, awserr.New(ErrCodeWebIdentity, errMsg, err) + } + return data, nil +} + +// WebIdentityRoleProvider is used to retrieve credentials using +// an OIDC token. +type WebIdentityRoleProvider struct { + credentials.Expiry + + // The policy ARNs to use with the web identity assumed role. + PolicyArns []*sts.PolicyDescriptorType + + // Duration the STS credentials will be valid for. Truncated to seconds. + // If unset, the assumed role will use AssumeRoleWithWebIdentity's default + // expiry duration. See + // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity + // for more information. + Duration time.Duration + + // The amount of time the credentials will be refreshed before they expire. + // This is useful refresh credentials before they expire to reduce risk of + // using credentials as they expire. If unset, will default to no expiry + // window. + ExpiryWindow time.Duration + + client stsiface.STSAPI + + tokenFetcher TokenFetcher + roleARN string + roleSessionName string +} + +// NewWebIdentityCredentials will return a new set of credentials with a given +// configuration, role arn, and token file path. +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options, and wrap with credentials.NewCredentials helper. +func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials { + svc := sts.New(c) + p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path) + return credentials.NewCredentials(p) +} + +// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. +func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, FetchTokenPath(path)) +} + +// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the +// provided stsiface.STSAPI and a TokenFetcher +// +// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible +// functional options. +func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider { + return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, tokenFetcher) +} + +// NewWebIdentityRoleProviderWithOptions will return an initialize +// WebIdentityRoleProvider with the provided stsiface.STSAPI, role ARN, and a +// TokenFetcher. Additional options can be provided as functional options. +// +// TokenFetcher is the implementation that will retrieve the JWT token from to +// assume the role with. Use the provided FetchTokenPath implementation to +// retrieve the JWT token using a file system path. +func NewWebIdentityRoleProviderWithOptions(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher, optFns ...func(*WebIdentityRoleProvider)) *WebIdentityRoleProvider { + p := WebIdentityRoleProvider{ + client: svc, + tokenFetcher: tokenFetcher, + roleARN: roleARN, + roleSessionName: roleSessionName, + } + + for _, fn := range optFns { + fn(&p) + } + + return &p +} + +// Retrieve will attempt to assume a role from a token which is located at +// 'WebIdentityTokenFilePath' specified destination and if that is empty an +// error will be returned. +func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) { + return p.RetrieveWithContext(aws.BackgroundContext()) +} + +// RetrieveWithContext will attempt to assume a role from a token which is +// located at 'WebIdentityTokenFilePath' specified destination and if that is +// empty an error will be returned. +func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) { + b, err := p.tokenFetcher.FetchToken(ctx) + if err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err) + } + + sessionName := p.roleSessionName + if len(sessionName) == 0 { + // session name is used to uniquely identify a session. This simply + // uses unix time in nanoseconds to uniquely identify sessions. + sessionName = strconv.FormatInt(now().UnixNano(), 10) + } + + var duration *int64 + if p.Duration != 0 { + duration = aws.Int64(int64(p.Duration / time.Second)) + } + + req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{ + PolicyArns: p.PolicyArns, + RoleArn: &p.roleARN, + RoleSessionName: &sessionName, + WebIdentityToken: aws.String(string(b)), + DurationSeconds: duration, + }) + + req.SetContext(ctx) + + // InvalidIdentityToken error is a temporary error that can occur + // when assuming an Role with a JWT web identity token. + req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException) + if err := req.Send(); err != nil { + return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err) + } + + p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow) + + value := credentials.Value{ + AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId), + SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey), + SessionToken: aws.StringValue(resp.Credentials.SessionToken), + ProviderName: WebIdentityProviderName, + } + return value, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 00000000000..25a66d1dda2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 00000000000..4b19e2800e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 00000000000..5bacc791a1e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 00000000000..82a3e345e93 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,55 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused *int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + paused: new(int64), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 00000000000..54a99280ce9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 00000000000..835bcd49cba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,264 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case request.ErrCodeRequestError, + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 00000000000..1ba80b57609 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,252 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddAwsInternal) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: CredProviders(cfg, handlers), + }) +} + +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// direct representation of the IPv4 address for the ECS container +// "169.254.170.2" +var ecsContainerIPv4 net.IP = []byte{ + 169, 254, 170, 2, +} + +// direct representation of the IPv4 address for the EKS container +// "169.254.170.23" +var eksContainerIPv4 net.IP = []byte{ + 169, 254, 170, 23, +} + +// direct representation of the IPv6 address for the EKS container +// "fd00:ec2::23" +var eksContainerIPv6 net.IP = []byte{ + 0xFD, 0, 0xE, 0xC2, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0x23, +} + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +var lookupHostFn = net.LookupHost + +// isAllowedHost allows host to be loopback or known ECS/EKS container IPs +// +// host can either be an IP address OR an unresolved hostname - resolution will +// be automatically performed in the latter case +func isAllowedHost(host string) (bool, error) { + if ip := net.ParseIP(host); ip != nil { + return isIPAllowed(ip), nil + } + + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + + for _, addr := range addrs { + if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) { + return false, nil + } + } + + return true, nil +} + +func isIPAllowed(ip net.IP) bool { + return ip.IsLoopback() || + ip.Equal(ecsContainerIPv4) || + ip.Equal(eksContainerIPv4) || + ip.Equal(eksContainerIPv6) +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if parsed.Scheme == "http" { + if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr) + } else if !isAllowedHost { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host) + } + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) + if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" { + p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) { + if contents, err := ioutil.ReadFile(authFilePath); err != nil { + return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err) + } else { + return string(contents), nil + } + }) + } + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 00000000000..ca0ee1dcc78 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 00000000000..4fcb6161848 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 00000000000..69fa63dc08f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,250 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" +) + +// getToken uses the duration to return a token for EC2 metadata service, +// or an error if the request failed. +func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) { + op := &request.Operation{ + Name: "GetToken", + HTTPMethod: "PUT", + HTTPPath: "/latest/api/token", + } + + var output tokenOutput + req := c.NewRequest(op, nil, &output) + req.SetContext(ctx) + + // remove the fetch token handler from the request handlers to avoid infinite recursion + req.Handlers.Sign.RemoveByName(fetchTokenHandlerName) + + // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request. + req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler) + + ttl := strconv.FormatInt(int64(duration/time.Second), 10) + req.HTTPRequest.Header.Set(ttlHeader, ttl) + + err := req.Send() + + // Errors with bad request status should be returned. + if err != nil { + err = awserr.NewRequestFailure( + awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err), + req.HTTPResponse.StatusCode, req.RequestID) + } + + return output, err +} + +// GetMetadata uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + return c.GetMetadataWithContext(aws.BackgroundContext(), p) +} + +// GetMetadataWithContext uses the path provided to request information from the EC2 +// instance metadata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/meta-data", p), + } + output := &metadataOutput{} + + req := c.NewRequest(op, nil, output) + + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + return c.GetUserDataWithContext(aws.BackgroundContext()) +} + +// GetUserDataWithContext returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: "/latest/user-data", + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + return c.GetDynamicDataWithContext(aws.BackgroundContext(), p) +} + +// GetDynamicDataWithContext uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: sdkuri.PathJoin("/latest/dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.SetContext(ctx) + + err := req.Send() + return output.Content, err +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext()) +} + +// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + return c.IAMInfoWithContext(aws.BackgroundContext()) +} + +// IAMInfoWithContext retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) { + resp, err := c.GetMetadataWithContext(ctx, "iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New(request.ErrCodeSerialization, + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + return c.RegionWithContext(aws.BackgroundContext()) +} + +// RegionWithContext returns the region the instance is running in. +func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) { + ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx) + if err != nil { + return "", err + } + // extract region from the ec2InstanceIdentityDocument + region := ec2InstanceIdentityDocument.Region + if len(region) == 0 { + return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil) + } + // returns region + return region, nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + return c.AvailableWithContext(aws.BackgroundContext()) +} + +// AvailableWithContext returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool { + if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + MarketplaceProductCodes []string `json:"marketplaceProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 00000000000..f4cc8751d04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,245 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). +// +// The endpoint of the EC2 IMDS client can be configured via the environment +// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +// Session. See aws/session#Options.EC2IMDSEndpoint for more details. +package ec2metadata + +import ( + "bytes" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ServiceName is the name of the service. + ServiceName = "ec2metadata" + disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" + + // Headers for Token and TTL + ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds" + tokenHeader = "x-aws-ec2-metadata-token" + + // Named Handler constants + fetchTokenHandlerName = "FetchTokenHandler" + unmarshalMetadataHandlerName = "unmarshalMetadataHandler" + unmarshalTokenHandlerName = "unmarshalTokenHandler" + enableTokenProviderHandlerName = "enableTokenProviderHandler" + + // TTL constants + defaultTTL = 21600 * time.Second + ttlExpirationWindow = 30 * time.Second +) + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// Example: +// +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS +// client is able to communicate with the EC2 IMDS API. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 1 * time.Second, + } + // max number of retries on the client operation + cfg.MaxRetries = aws.Int(2) + } + + if u, err := url.Parse(endpoint); err == nil { + // Remove path from the endpoint since it will be added by requests. + // This is an artifact of the SDK adding `/latest` to the endpoint for + // EC2 IMDS, but this is now moved to the operation definition. + u.Path = "" + u.RawPath = "" + endpoint = u.String() + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + // token provider instance + tp := newTokenProvider(svc, defaultTTL) + + // NamedHandler for fetching token + svc.Handlers.Sign.PushBackNamed(request.NamedHandler{ + Name: fetchTokenHandlerName, + Fn: tp.fetchTokenHandler, + }) + // NamedHandler for enabling token provider + svc.Handlers.Complete.PushBackNamed(request.NamedHandler{ + Name: enableTokenProviderHandlerName, + Fn: tp.enableTokenProviderHandler, + }) + + svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Disable the EC2 Metadata service if the environment variable is set. + // This short-circuits the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +type tokenOutput struct { + Token string + TTL time.Duration +} + +// unmarshal token handler is used to parse the response of a getToken operation +var unmarshalTokenHandler = request.NamedHandler{ + Name: unmarshalTokenHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + v := r.HTTPResponse.Header.Get(ttlHeader) + data, ok := r.Data.(*tokenOutput) + if !ok { + return + } + + data.Token = b.String() + // TTL is in seconds + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode, + "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + t := time.Duration(i) * time.Second + data.TTL = t + }, +} + +var unmarshalHandler = request.NamedHandler{ + Name: unmarshalMetadataHandlerName, + Fn: func(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization, + "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } + }, +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + var b bytes.Buffer + + if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err), + r.HTTPResponse.StatusCode, r.RequestID) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.NewRequestFailure( + awserr.New("EC2MetadataError", "failed to make EC2Metadata request\n"+b.String(), nil), + r.HTTPResponse.StatusCode, r.RequestID) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go new file mode 100644 index 00000000000..f1f9ba4ec58 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go @@ -0,0 +1,99 @@ +package ec2metadata + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "net/http" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A tokenProvider struct provides access to EC2Metadata client +// and atomic instance of a token, along with configuredTTL for it. +// tokenProvider also provides an atomic flag to disable the +// fetch token operation. +// The disabled member will use 0 as false, and 1 as true. +type tokenProvider struct { + client *EC2Metadata + token atomic.Value + configuredTTL time.Duration + disabled uint32 +} + +// A ec2Token struct helps use of token in EC2 Metadata service ops +type ec2Token struct { + token string + credentials.Expiry +} + +// newTokenProvider provides a pointer to a tokenProvider instance +func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider { + return &tokenProvider{client: c, configuredTTL: duration} +} + +// check if fallback is enabled +func (t *tokenProvider) fallbackEnabled() bool { + return t.client.Config.EC2MetadataEnableFallback == nil || *t.client.Config.EC2MetadataEnableFallback +} + +// fetchTokenHandler fetches token for EC2Metadata service client by default. +func (t *tokenProvider) fetchTokenHandler(r *request.Request) { + // short-circuits to insecure data flow if tokenProvider is disabled. + if v := atomic.LoadUint32(&t.disabled); v == 1 && t.fallbackEnabled() { + return + } + + if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + return + } + + output, err := t.client.getToken(r.Context(), t.configuredTTL) + + if err != nil { + // only attempt fallback to insecure data flow if IMDSv1 is enabled + if !t.fallbackEnabled() { + r.Error = awserr.New("EC2MetadataError", "failed to get IMDSv2 token and fallback to IMDSv1 is disabled", err) + return + } + + // change the disabled flag on token provider to true and fallback + if requestFailureError, ok := err.(awserr.RequestFailure); ok { + switch requestFailureError.StatusCode() { + case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: + atomic.StoreUint32(&t.disabled, 1) + if t.client.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) { + t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError)) + } + case http.StatusBadRequest: + r.Error = requestFailureError + } + } + return + } + + newToken := ec2Token{ + token: output.Token, + } + newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow) + t.token.Store(newToken) + + // Inject token header to the request. + if ec2Token, ok := t.token.Load().(ec2Token); ok { + r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token) + } +} + +// enableTokenProviderHandler enables the token provider +func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) { + // If the error code status is 401, we enable the token provider + if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil && + e.StatusCode() == http.StatusUnauthorized { + t.token.Store(ec2Token{}) + atomic.StoreUint32(&t.disabled, 0) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 00000000000..cad3b9a4883 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,193 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custRegionalS3(p) + custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) + } + + return ps, nil +} + +func custRegionalS3(p *partition) { + if p.ID != "aws" { + return + } + + service, ok := p.Services["s3"] + if !ok { + return + } + + const awsGlobal = "aws-global" + const usEast1 = "us-east-1" + + // If global endpoint already exists no customization needed. + if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok { + return + } + + service.PartitionEndpoint = awsGlobal + if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok { + service.Endpoints[endpointKey{Region: usEast1}] = endpoint{} + } + service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{ + Hostname: "s3.amazonaws.com", + CredentialScope: credentialScope{ + Region: usEast1, + }, + } + + p.Services["s3"] = service +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + serviceDefault := s.Defaults[defaultKey{}] + if e, a := expectHostname, serviceDefault.Hostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + serviceDefault.Hostname = expectHostname + ".cn" + s.Defaults[defaultKey{}] = serviceDefault + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + serviceDefault := s.Defaults[defaultKey{}] + if a := serviceDefault.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := serviceDefault.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + serviceDefault.CredentialScope.Service = "application-autoscaling" + serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com" + + if s.Defaults == nil { + s.Defaults = make(endpointDefaults) + } + + s.Defaults[defaultKey{}] = serviceDefault + + p.Services[serviceName] = s +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 00000000000..ece2e0dd4d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,46097 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. + AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition. + AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. + AwsIsoEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition. + AwsIsoFPartitionID = "aws-iso-f" // AWS ISOF partition. +) + +// AWS Standard partition's regions. +const ( + AfSouth1RegionID = "af-south-1" // Africa (Cape Town). + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). + ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + CaWest1RegionID = "ca-west-1" // Canada West (Calgary). + EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EuCentral2RegionID = "eu-central-2" // Europe (Zurich). + EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EuSouth1RegionID = "eu-south-1" // Europe (Milan). + EuSouth2RegionID = "eu-south-2" // Europe (Spain). + EuWest1RegionID = "eu-west-1" // Europe (Ireland). + EuWest2RegionID = "eu-west-2" // Europe (London). + EuWest3RegionID = "eu-west-3" // Europe (Paris). + IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv). + MeCentral1RegionID = "me-central-1" // Middle East (UAE). + MeSouth1RegionID = "me-south-1" // Middle East (Bahrain). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). +) + +// AWS ISO (US) partition's regions. +const ( + UsIsoEast1RegionID = "us-iso-east-1" // US ISO East. + UsIsoWest1RegionID = "us-iso-west-1" // US ISO WEST. +) + +// AWS ISOB (US) partition's regions. +const ( + UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). +) + +// AWS ISOE (Europe) partition's regions. +const () + +// AWS ISOF partition's regions. +const () + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF. +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, + awsisoPartition, + awsisobPartition, + awsisoePartition, + awsisofPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "af-south-1": region{ + Description: "Africa (Cape Town)", + }, + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-northeast-3": region{ + Description: "Asia Pacific (Osaka)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-south-2": region{ + Description: "Asia Pacific (Hyderabad)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ap-southeast-3": region{ + Description: "Asia Pacific (Jakarta)", + }, + "ap-southeast-4": region{ + Description: "Asia Pacific (Melbourne)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "ca-west-1": region{ + Description: "Canada West (Calgary)", + }, + "eu-central-1": region{ + Description: "Europe (Frankfurt)", + }, + "eu-central-2": region{ + Description: "Europe (Zurich)", + }, + "eu-north-1": region{ + Description: "Europe (Stockholm)", + }, + "eu-south-1": region{ + Description: "Europe (Milan)", + }, + "eu-south-2": region{ + Description: "Europe (Spain)", + }, + "eu-west-1": region{ + Description: "Europe (Ireland)", + }, + "eu-west-2": region{ + Description: "Europe (London)", + }, + "eu-west-3": region{ + Description: "Europe (Paris)", + }, + "il-central-1": region{ + Description: "Israel (Tel Aviv)", + }, + "me-central-1": region{ + Description: "Middle East (UAE)", + }, + "me-south-1": region{ + Description: "Middle East (Bahrain)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "a4b": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer-fips.us-west-2.amazonaws.com", + }, + }, + }, + "account": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "account.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "acm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "acm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "acm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "acm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "acm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "acm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "acm-pca-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "acm-pca-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca-fips.us-west-2.amazonaws.com", + }, + }, + }, + "agreement-marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "airflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplify": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplifybackend": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "amplifyuibuilder": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "aoss": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "api.detective": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.ecr": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "api.ecr.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "api.ecr.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "api.ecr.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "api.ecr.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "api.ecr.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, + endpointKey{ + Region: "dkr-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "api.ecr.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "api.ecr.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "api.ecr.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-dkr-us-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-east-2", + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-west-2", + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "api.ecr.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "api.ecr.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "api.ecr.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.ecr-public": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.ecr-public.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.ecr-public.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.elastic-inference": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.elastic-inference.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.elastic-inference.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "api.elastic-inference.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.elastic-inference.us-west-2.amazonaws.com", + }, + }, + }, + "api.fleethub.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "api.iotdeviceadvisor": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotdeviceadvisor.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "api.iotwireless.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "api.mediatailor": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apigateway-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apigateway-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "apigateway-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apigateway-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.us-west-2.amazonaws.com", + }, + }, + }, + "app-integrations": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + }, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "appmesh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "appmesh-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "apprunner": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apprunner-fips.us-west-2.amazonaws.com", + }, + }, + }, + "appstream2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "appsync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "aps": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-west-2.api.aws", + }, + }, + }, + "auditmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "auditmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.us-west-2.amazonaws.com", + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "batch": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.batch.us-west-2.amazonaws.com", + }, + }, + }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "bedrock-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-central-1", + }: endpoint{ + Hostname: "bedrock.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-us-east-1", + }: endpoint{ + Hostname: "bedrock.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-us-west-2", + }: endpoint{ + Hostname: "bedrock.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "billingconductor": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "billingconductor.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "braket": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cases": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cassandra-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cassandra-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra-fips.us-west-2.amazonaws.com", + }, + }, + }, + "catalog.marketplace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "chime.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cleanrooms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloud9": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", + }, + }, + }, + "clouddirectory": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "cloudformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudsearch": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail-fips.us-west-2.amazonaws.com", + }, + }, + }, + "cloudtrail-data": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codeartifact": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codecatalyst": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "codecatalyst.global.api.aws", + }, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codeguru-reviewer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "codepipeline-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-west-2.amazonaws.com", + }, + }, + }, + "codestar": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "codestar-notifications": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-west-2.amazonaws.com", + }, + }, + }, + "cognito-idp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-west-2.amazonaws.com", + }, + }, + }, + "cognito-sync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-west-2.amazonaws.com", + }, + }, + }, + "comprehendmedical": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com", + }, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "compute-optimizer.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "compute-optimizer.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "compute-optimizer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "compute-optimizer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "compute-optimizer.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "compute-optimizer.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "compute-optimizer.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "compute-optimizer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "compute-optimizer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "compute-optimizer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "compute-optimizer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "compute-optimizer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + }, + }, + }, + "connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-fips.us-west-2.amazonaws.com", + }, + }, + }, + "connect-campaigns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com", + }, + }, + }, + "contact-lens": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "controltower-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "controltower-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "controltower-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "controltower-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cost-optimization-hub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "cost-optimization-hub.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cur": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "data.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "data.mediastore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "databrew-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "databrew-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "databrew-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "databrew-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew-fips.us-west-2.amazonaws.com", + }, + }, + }, + "dataexchange": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "datapipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "datasync-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-west-2.amazonaws.com", + }, + }, + }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "datazone.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "datazone.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "datazone.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "datazone.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "datazone.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "datazone.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "datazone.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "datazone.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "datazone.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "datazone.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "datazone.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "datazone.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "datazone.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "datazone.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "datazone.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "datazone.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "datazone.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "datazone.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "datazone.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "datazone.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "datazone.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "datazone.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "datazone.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "datazone.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "datazone.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "datazone.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "datazone.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "datazone.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "datazone.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-west-2.amazonaws.com", + }, + }, + }, + "dax": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "devicefarm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "devops-guru": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "devops-guru-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "devops-guru-fips.us-west-2.amazonaws.com", + }, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-west-2.amazonaws.com", + }, + }, + }, + "discovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "dms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "dms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "dms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "dms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-west-2.amazonaws.com", + }, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ebs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ec2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ec2-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "edge.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.{region}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.eks.us-west-2.amazonaws.com", + }, + }, + }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "eks-auth.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "eks-auth.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "eks-auth.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "eks-auth.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "eks-auth.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "eks-auth.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "eks-auth.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "eks-auth.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "eks-auth.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "eks-auth.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "eks-auth.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "eks-auth.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "eks-auth.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "eks-auth.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "eks-auth.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "eks-auth.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "eks-auth.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "eks-auth.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "eks-auth.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "eks-auth.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "eks-auth.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "eks-auth.us-west-2.api.aws", + }, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "elasticache-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com", + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com", + }, + }, + }, + "elastictranscoder": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "email": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-2.amazonaws.com", + }, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "emr-containers-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "emr-containers-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "emr-containers-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "emr-containers-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-containers-fips.us-west-2.amazonaws.com", + }, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "emr-serverless-fips.us-west-2.amazonaws.com", + }, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "es-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "es-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "es-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events-fips.us-west-2.amazonaws.com", + }, + }, + }, + "evidently": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "evidently.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "evidently.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "evidently.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "evidently.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "evidently.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "evidently.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "evidently.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "evidently.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "evidently.us-west-2.amazonaws.com", + }, + }, + }, + "finspace": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "finspace-api": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-west-2.amazonaws.com", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "fms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "fms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "fms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "fms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "fms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "fms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "fms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "fms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "fms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-west-2.amazonaws.com", + }, + }, + }, + "forecast": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "forecast-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "forecast-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "forecast-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecast-fips.us-west-2.amazonaws.com", + }, + }, + }, + "forecastquery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "forecastquery-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "forecastquery-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "forecastquery-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "forecastquery-fips.us-west-2.amazonaws.com", + }, + }, + }, + "frauddetector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-ca-central-1", + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-east-2", + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-west-2", + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "prod-ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-west-2.amazonaws.com", + }, + }, + }, + "gamelift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "glacier-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier-fips.us-west-2.amazonaws.com", + }, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-west-2.amazonaws.com", + }, + }, + }, + "grafana": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "grafana.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "grafana.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "grafana.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "grafana.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "grafana.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "grafana.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "grafana.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "grafana.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "grafana.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "grafana.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "greengrass-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass-fips.us-west-2.amazonaws.com", + }, + }, + }, + "groundstation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "groundstation-fips.us-west-2.amazonaws.com", + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "guardduty-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "health": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "health-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "healthlake": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "honeycode": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-fips", + }: endpoint{ + Hostname: "iam-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "identity-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identity-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "identity-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "ingest-fips-us-east-1", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-fips-us-east-2", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-fips-us-west-2", + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ingest-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "inspector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-west-2.amazonaws.com", + }, + }, + }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "inspector2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "inspector2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "inspector2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "inspector2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "internetmonitor.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "internetmonitor.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "internetmonitor.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "internetmonitor.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "internetmonitor.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "internetmonitor.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "internetmonitor.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "internetmonitor.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "internetmonitor.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "internetmonitor.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "internetmonitor.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "internetmonitor.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "internetmonitor.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "internetmonitor.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "internetmonitor.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "internetmonitor.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "internetmonitor.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "internetmonitor.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "internetmonitor.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "internetmonitor.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "internetmonitor.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "internetmonitor-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iot-fips.ca-central-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iot-fips.us-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iot-fips.us-east-2.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "iot-fips.us-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iot-fips.us-west-2.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "data.iotevents.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "data.iotevents.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "data.iotevents.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "data.iotevents.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "data.iotevents.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "data.iotevents.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "data.iotevents.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "data.iotevents.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "data.iotevents.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "iotfleetwise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "iotsecuredtunneling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotthingsgraph": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "api-ap-northeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "api-ap-northeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "api-ap-south-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "api-ap-southeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "api-ap-southeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "api-eu-central-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "api-eu-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "data-ap-northeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "data-ap-northeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "data-ap-south-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "data-ap-southeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "data-ap-southeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "data-eu-central-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "data-eu-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-api-us-east-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-api-us-west-2", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-data-us-east-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-data-us-west-2", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com", + }, + }, + }, + "iotwireless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.iotwireless.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "ivs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ivschat": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ivsrealtime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "kafka-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka-fips.us-west-2.amazonaws.com", + }, + }, + }, + "kafkaconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kendra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-west-2.amazonaws.com", + }, + }, + }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "kendra-ranking.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "kendra-ranking.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "kendra-ranking.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "kendra-ranking.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "kendra-ranking.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "kendra-ranking.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "kendra-ranking.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "kendra-ranking.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "kendra-ranking.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "kendra-ranking.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-ranking-fips.us-west-2.api.aws", + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis-fips.us-west-2.amazonaws.com", + }, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "af-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4-fips", + }: endpoint{ + Hostname: "kms-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3-fips", + }: endpoint{ + Hostname: "kms-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "il-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1-fips", + }: endpoint{ + Hostname: "kms-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1-fips", + }: endpoint{ + Hostname: "kms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "sa-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "kms-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "kms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-west-2.amazonaws.com", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-west-2.amazonaws.com", + }, + }, + }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "license-manager-user-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lightsail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.eu-west-3.api.aws", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "logs-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "logs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "logs.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "lookoutequipment": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "lookoutmetrics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "lookoutvision": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "machinelearning": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "macie2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "macie2-fips.us-west-2.amazonaws.com", + }, + }, + }, + "managedblockchain": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "managedblockchain-query": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "media-pipelines-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "mediaconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert-fips.us-west-2.amazonaws.com", + }, + }, + }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "medialive-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackage-vod": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediapackagev2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mediastore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "memory-db-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "messaging-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "messaging-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-west-2.amazonaws.com", + }, + }, + }, + "migrationhub-orchestrator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "migrationhub-strategy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "mobileanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-west-2.amazonaws.com", + }, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "sandbox", + }: endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "rds.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "rds.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "rds.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "rds.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "rds.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-west-2.amazonaws.com", + }, + }, + }, + "networkmanager": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "networkmanager.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "networkmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "nimble": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "oidc.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "oidc.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "oidc.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "oidc.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "oidc.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "oidc.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "oidc.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "oidc.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "oidc.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "oidc.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "oidc.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "oidc.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "oidc.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "oidc.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "oidc.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "oidc.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "oidc.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "oidc.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "oidc.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "oidc.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "oidc.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "oidc.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "oidc.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "oidc.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "oidc.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "oidc.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "omics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "omics.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "omics.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "omics.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "omics.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "omics.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "omics.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "omics.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "omics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "opsworks-cm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "organizations-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "osis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "outposts-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts-fips.us-west-2.amazonaws.com", + }, + }, + }, + "participant.connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "participant.connect-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "participant.connect-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect-fips.us-west-2.amazonaws.com", + }, + }, + }, + "personalize": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "pinpoint.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pinpoint-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pinpoint-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "pinpoint.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "pinpoint.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "pinpoint.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-west-2.amazonaws.com", + }, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "portal.sso.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "portal.sso.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "portal.sso.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "portal.sso.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "portal.sso.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "portal.sso.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "portal.sso.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "portal.sso.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "portal.sso.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "portal.sso.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "portal.sso.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "portal.sso.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "portal.sso.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "portal.sso.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "portal.sso.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "portal.sso.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "portal.sso.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "portal.sso.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "portal.sso.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "private-networks": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "profile": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "profile-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "profile-fips.us-west-2.amazonaws.com", + }, + }, + }, + "projects.iot1click": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "proton": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "qbusiness.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "qbusiness.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "qbusiness.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "qbusiness.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "qbusiness.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "qbusiness.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "qbusiness.ca-west-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "qbusiness.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "qbusiness.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "qbusiness.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "qbusiness.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "qbusiness.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "qbusiness.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "qbusiness.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "qbusiness.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "qbusiness.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "qbusiness.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "qbusiness.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "qbusiness.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "qbusiness.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "qbusiness.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "qbusiness.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "qbusiness.us-west-2.api.aws", + }, + }, + }, + "qldb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "qldb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "qldb-fips.us-west-2.amazonaws.com", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ram-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-west-2.amazonaws.com", + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "rbin-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rbin-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rbin-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-west-2.amazonaws.com", + }, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "rds-fips.ca-central-1", + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.ca-west-1", + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-east-1", + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-east-2", + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-west-1", + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-west-2", + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + SSLCommonName: "{service}.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "rds-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "rds-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "rds-data": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rds-data-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rds-data-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rds-data-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rds-data-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-data-fips.us-west-2.amazonaws.com", + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-west-2.amazonaws.com", + }, + }, + }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com", + }, + }, + }, + "rekognition": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "rekognition-fips.ca-central-1", + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-east-1", + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-east-2", + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-west-1", + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition-fips.us-west-2", + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "resource-explorer-2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups-fips.us-west-2.amazonaws.com", + }, + }, + }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + }, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "route53-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "route53-recovery-control-config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "route53domains": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "rum": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "runtime.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.af-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-east-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-south-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com", + }, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-central-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-north-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-south-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "s3-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.il-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.me-central-1.amazonaws.com", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.me-south-1.amazonaws.com", + }, + endpointKey{ + Region: "s3-external-1", + }: endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "s3.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "s3-control.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-northeast-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + }, + }, + "sagemaker-geospatial": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "savingsplans": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "savingsplans.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "scheduler": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sdb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "sdb.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-west-2.amazonaws.com", + }, + }, + }, + "securitylake": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "securitylake-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "securitylake-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "securitylake-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "securitylake-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securitylake-fips.us-west-2.amazonaws.com", + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog-appregistry": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com", + }, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.api.aws", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "session.qldb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "session.qldb-fips.us-west-2.amazonaws.com", + }, + }, + }, + "shield": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "shield.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-aws-global", + }: endpoint{ + Hostname: "shield-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "fips-verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "signer-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "verification-af-south-1", + }: endpoint{ + Hostname: "verification.signer.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-east-1", + }: endpoint{ + Hostname: "verification.signer.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-northeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "verification-ap-south-1", + }: endpoint{ + Hostname: "verification.signer.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-1", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "verification-ap-southeast-2", + }: endpoint{ + Hostname: "verification.signer.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "verification-ca-central-1", + }: endpoint{ + Hostname: "verification.signer.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-central-1", + }: endpoint{ + Hostname: "verification.signer.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "verification-eu-north-1", + }: endpoint{ + Hostname: "verification.signer.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "verification-eu-south-1", + }: endpoint{ + Hostname: "verification.signer.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-1", + }: endpoint{ + Hostname: "verification.signer.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "verification-eu-west-2", + }: endpoint{ + Hostname: "verification.signer.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "verification-eu-west-3", + }: endpoint{ + Hostname: "verification.signer.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "verification-me-south-1", + }: endpoint{ + Hostname: "verification.signer.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "verification-sa-east-1", + }: endpoint{ + Hostname: "verification.signer.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-1", + }: endpoint{ + Hostname: "verification.signer.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "verification-us-east-2", + }: endpoint{ + Hostname: "verification.signer.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "verification-us-west-1", + }: endpoint{ + Hostname: "verification.signer.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "verification-us-west-2", + }: endpoint{ + Hostname: "verification.signer.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sms-voice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-west-2.amazonaws.com", + }, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "snowball-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "snowball-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "snowball-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "snowball-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "snowball-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "snowball-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "snowball-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "snowball-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.sa-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + SSLCommonName: "queue.{dnsSuffix}", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-contacts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-incidents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com", + }, + }, + }, + "ssm-sap": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-sap-fips.us-west-2.amazonaws.com", + }, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-west-2.amazonaws.com", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "local", + }: endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "supportapp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf-fips.us-west-2.amazonaws.com", + }, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-west-2.amazonaws.com", + }, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "textract-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "textract-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "textract-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "textract-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "textract-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.amazonaws.com", + }, + }, + }, + "thinclient": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "tnb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "fips.transcribe.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-west-2.amazonaws.com", + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "transcribestreaming-ca-central-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-ca-central-1", + }: endpoint{ + Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-east-1", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-east-2", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-fips-us-west-2", + }: endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-west-2", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "transcribestreaming-us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "transfer-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-west-2.amazonaws.com", + }, + }, + }, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "verifiedpermissions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com", + }, + }, + }, + "voice-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1-fips", + }: endpoint{ + Hostname: "voice-chime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2-fips", + }: endpoint{ + Hostname: "voice-chime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + }, + }, + "voiceid": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "voiceid-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "voiceid-fips.us-west-2.amazonaws.com", + }, + }, + }, + "vpc-lattice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "aws-global-fips", + }: endpoint{ + Hostname: "waf-fips.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "waf-regional.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "waf-regional.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "waf-regional.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "waf-regional.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "waf-regional.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "waf-regional.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "waf-regional.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "waf-regional.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "waf-regional.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "waf-regional.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "waf-regional.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "waf-regional.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "waf-regional.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "waf-regional-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "waf-regional.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "waf-regional.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "waf-regional.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "waf-regional.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "waf-regional.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "waf-regional.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "waf-regional.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "waf-regional.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "wafv2.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "af-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "wafv2.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "wafv2.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "wafv2.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "wafv2.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "wafv2.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "wafv2.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "wafv2.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "wafv2.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "wafv2.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "wafv2.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "wafv2.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "wafv2.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "wafv2.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-central-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "wafv2.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "wafv2.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "wafv2.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-south-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "wafv2.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "wafv2.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "wafv2.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "eu-west-3", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + endpointKey{ + Region: "fips-af-south-1", + }: endpoint{ + Hostname: "wafv2-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-east-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-northeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-south-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-1", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-2", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-3", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ap-southeast-4", + }: endpoint{ + Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-4", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "wafv2-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-central-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-north-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-south-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-south-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-1", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-2", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-eu-west-3", + }: endpoint{ + Hostname: "wafv2-fips.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-il-central-1", + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-central-1", + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-me-south-1", + }: endpoint{ + Hostname: "wafv2-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-sa-east-1", + }: endpoint{ + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "wafv2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "wafv2.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "il-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "wafv2.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "wafv2.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "me-south-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "wafv2.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "sa-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "wafv2.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "wafv2.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "wafv2.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "wafv2.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "wisdom": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "ui-ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "ui-eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "ui-us-east-1", + }: endpoint{}, + endpointKey{ + Region: "ui-us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "workdocs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workdocs-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workdocs-fips.us-west-2.amazonaws.com", + }, + }, + }, + "workmail": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-west-2.amazonaws.com", + }, + }, + }, + "workspaces-web": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + }, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "account": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "account.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "acm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "airflow": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.ecr": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "api.tunneling.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "appmesh": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "appmesh.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "appsync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "batch": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "budgets.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "ce.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "cur": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "data.ats.iot.cn-north-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "dax": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "firehose.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "gamelift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "health": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "global.health.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "iotanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-cn-north-1", + }: endpoint{ + Hostname: "api.iottwinmaker.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "data-cn-north-1", + }: endpoint{ + Hostname: "data.iottwinmaker.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "memory-db": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "rds.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "oam": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "personalize": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "redshift-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "route53.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "route53resolver": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn", + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com.cn", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "savingsplans": service{ + IsRegionalized: boxedTrue, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "savingsplans.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "schemas": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{}, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "verification-cn-north-1", + }: endpoint{ + Hostname: "verification.signer.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "verification-cn-northwest-1", + }: endpoint{ + Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "sts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-cn-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-cn-global", + }: endpoint{ + Hostname: "support.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "waf-regional.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "wafv2.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-north-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "wafv2.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + endpointKey{ + Region: "fips-cn-north-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-cn-northwest-1", + }: endpoint{ + Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US-West)", + }, + }, + Services: services{ + "access-analyzer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "access-analyzer.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "acm": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "acm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "acm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-pca.us-gov-west-1.amazonaws.com", + }, + }, + }, + "api.detective": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.ecr": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dkr-us-gov-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dkr-us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-gov-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-dkr-us-gov-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecr-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "api.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-fips-secondary", + }: endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-secondary", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1-secondary", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "api.tunneling.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfig.us-gov-west-1.amazonaws.com", + }, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appconfigdata.us-gov-west-1.amazonaws.com", + }, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + + Deprecated: boxedTrue, + }, + }, + }, + "applicationinsights": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "applicationinsights.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "applicationinsights.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "appstream2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "athena.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "athena-fips.us-gov-west-1.api.aws", + }, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "autoscaling-plans": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "backup": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "backup-gateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "backupstorage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "batch": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "batch.us-gov-west-1.amazonaws.com", + }, + }, + }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "bedrock-runtime-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "bedrock-us-gov-west-1", + }: endpoint{ + Hostname: "bedrock.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "cassandra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cassandra.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "clouddirectory": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "clouddirectory.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "cloudformation.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "cloudhsm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "cloudhsmv2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudtrail.us-gov-west-1.amazonaws.com", + }, + }, + }, + "codebuild": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codecommit": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "codepipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + }, + }, + "cognito-identity": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "cognito-idp": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "comprehendmedical": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "compute-optimizer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "compute-optimizer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "config": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", + }, + }, + }, + "connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "connect.us-gov-west-1.amazonaws.com", + }, + }, + }, + "controltower": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "data-ats.iot": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "iotdata", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Service: "iotdata", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "data.jobs.iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "databrew": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "databrew.us-gov-west-1.amazonaws.com", + }, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "datazone.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "datazone.us-gov-west-1.api.aws", + }, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "directconnect.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "directconnect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dlm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "dms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dms.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dms.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "docdb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "drs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "drs-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ds-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ec2.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "ec2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-gov-east-1.api.aws", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "ec2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "ec2.us-gov-west-1.api.aws", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ecs-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.{region}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "eks.us-gov-west-1.amazonaws.com", + }, + }, + }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-west-1.api.aws", + }, + }, + }, + "elasticache": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticache.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticbeanstalk": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + }, + "email": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "emr-containers": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "emr-serverless": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "es-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "events.us-gov-west-1.amazonaws.com", + }, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "firehose-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "fms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fms-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "fsx": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-prod-us-gov-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-prod-us-gov-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "prod-us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "glacier": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glacier.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.api.aws", + }, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dataplane-us-gov-east-1", + }: endpoint{ + Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "dataplane-us-gov-west-1", + }: endpoint{ + Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "greengrass.us-gov-west-1.amazonaws.com", + }, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "guardduty.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "health": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "health.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "global.health.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "health-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global-fips", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "iam-govcloud-fips", + }: endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "identitystore": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "identitystore.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "identitystore.us-gov-west-1.amazonaws.com", + }, + }, + }, + "ingest.timestream": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "inspector": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "inspector2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "internetmonitor": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "internetmonitor.us-gov-west-1.api.aws", + }, + }, + }, + "iot": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iotevents": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "ioteventsdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "data.iotevents.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "iotsecuredtunneling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iotsitewise": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "iottwinmaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-api-us-gov-west-1", + }: endpoint{ + Hostname: "api.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-data-us-gov-west-1", + }: endpoint{ + Hostname: "data.iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "kafka": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "kafka.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "kendra": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kendra-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "kendra-ranking": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kendra-ranking.us-gov-west-1.api.aws", + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesis.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesisanalytics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lakeformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.api.aws", + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "lambda-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "license-manager-linux-subscriptions": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "logs.us-gov-west-1.amazonaws.com", + }, + }, + }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "managedblockchain": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "mediaconvert": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mediaconvert.us-gov-west-1.amazonaws.com", + }, + }, + }, + "meetings-chime": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "mgn": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mgn-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "models-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "monitoring": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "monitoring.us-gov-west-1.amazonaws.com", + }, + }, + }, + "mq": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "mq-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "mq-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "mq-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "neptune": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "network-firewall": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "networkmanager": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "networkmanager.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "oidc.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "oidc.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "organizations.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "outposts.us-gov-west-1.amazonaws.com", + }, + }, + }, + "participant.connect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "participant.connect.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "participant.connect.us-gov-west-1.amazonaws.com", + }, + }, + }, + "pi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "pinpoint.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "polly": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "polly-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "portal.sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "qbusiness": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "qbusiness.us-gov-west-1.api.aws", + }, + }, + }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "api", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "ram.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "ram.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "rds": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds.us-gov-east-1", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-gov-west-1", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "rds.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "rds.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "redshift.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "redshift.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "rekognition": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rekognition-fips.us-gov-west-1", + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rekognition.us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "resource-groups": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resource-groups.us-gov-west-1.amazonaws.com", + }, + }, + }, + "robomaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "aws-us-gov-global", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-aws-us-gov-global", + }: endpoint{ + Hostname: "route53.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-east-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "route53resolver.us-gov-west-1.amazonaws.com", + + Deprecated: boxedTrue, + }, + }, + }, + "runtime-v2-lex": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "runtime.sagemaker": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "s3.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: dualStackVariant, + }: endpoint{ + Hostname: "{service}.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + defaultKey{ + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}", + DNSSuffix: "amazonaws.com", + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3-control.dualstack.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-control-fips.dualstack.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, + }, + }, + "securityhub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicecatalog-appregistry": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicecatalog-appregistry.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "servicediscovery": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "servicediscovery", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "servicediscovery-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "servicequotas": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "servicequotas.us-gov-west-1.amazonaws.com", + }, + }, + }, + "signer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "simspaceweaver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sms-voice": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "snowball-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sqs.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sqs.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sqs.us-gov-west-1.amazonaws.com", + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "ssm": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm.us-gov-west-1.amazonaws.com", + }, + }, + }, + "sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "sso.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "states.us-gov-west-1.amazonaws.com", + }, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.{region}.{dnsSuffix}", + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "sts": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "sts.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "sts.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-us-gov-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-us-gov-global", + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "support.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "swf.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com", + }, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "transfer": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "transfer-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "waf-regional": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "waf-regional.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "waf-regional.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "wafv2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "wafv2.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "wafv2.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "wellarchitected": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + "xray": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, + }, +} + +// AwsIsoPartition returns the Resolver for AWS ISO (US). +func AwsIsoPartition() Partition { + return awsisoPartition.Partition() +} + +var awsisoPartition = partition{ + ID: "aws-iso", + Name: "AWS ISO (US)", + DNSSuffix: "c2s.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "c2s.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "us-iso-east-1": region{ + Description: "US ISO East", + }, + "us-iso-west-1": region{ + Description: "US ISO WEST", + }, + }, + Services: services{ + "api.ecr": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{ + Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + }, + }, + }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "athena": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "autoscaling": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "comprehend": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "datapipeline": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "datasync": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "dms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "dms.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "dms.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "dynamodb": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "elasticloadbalancing": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "elasticmapreduce": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "firehose": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "glacier": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "glue": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "health": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ + Hostname: "iam.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "monitoring": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds-fips.us-iso-east-1", + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds-fips.us-iso-west-1", + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-west-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ + Hostname: "route53.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "sns": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "sqs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{ + Protocols: []string{"http", "https"}, + }, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "sts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-global", + }: endpoint{ + Hostname: "support.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + "textract": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "transcribe": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "transcribestreaming": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "translate": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, + }, + }, + }, +} + +// AwsIsoBPartition returns the Resolver for AWS ISOB (US). +func AwsIsoBPartition() Partition { + return awsisobPartition.Partition() +} + +var awsisobPartition = partition{ + ID: "aws-iso-b", + Name: "AWS ISOB (US)", + DNSSuffix: "sc2s.sgov.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "sc2s.sgov.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{ + "us-isob-east-1": region{ + Description: "US ISOB East (Ohio)", + }, + }, + Services: services{ + "api.ecr": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "appconfig": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "appconfigdata": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "arc-zonal-shift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "cloudcontrolapi": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "cloudformation": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "cloudtrail": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "codedeploy": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "config": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "directconnect": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "dlm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "dms": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{}, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.{region}.{dnsSuffix}", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "dms", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "dms-fips", + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "dms.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "ds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ebs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ecs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "eks": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "elasticache": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "elasticfilesystem": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "elasticloadbalancing": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "elasticmapreduce": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "es": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "events": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "glacier": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "health": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ + Hostname: "iam.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "kinesis": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "kms": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ProdFips", + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "lambda": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "license-manager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "logs": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "medialive": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "mediapackage": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "metrics.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "monitoring": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ram": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "rbin": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "rds": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "rds-fips.us-isob-east-1", + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-isob-east-1", + }: endpoint{ + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "rds.us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "rds-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "redshift": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "resource-groups": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-iso-b-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ + Hostname: "route53.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "route53resolver": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "runtime.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "s3": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + }, + }, + }, + "s3-outposts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, + "secretsmanager": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "snowball": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "sns": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "ssm": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "storagegateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1-fips", + }: endpoint{ + Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "sts": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "support": service{ + PartitionEndpoint: "aws-iso-b-global", + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-iso-b-global", + }: endpoint{ + Hostname: "support.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + }, + }, + }, + "swf": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "synthetics": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "tagging": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, + }, +} + +// AwsIsoEPartition returns the Resolver for AWS ISOE (Europe). +func AwsIsoEPartition() Partition { + return awsisoePartition.Partition() +} + +var awsisoePartition = partition{ + ID: "aws-iso-e", + Name: "AWS ISOE (Europe)", + DNSSuffix: "cloud.adc-e.uk", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^eu\\-isoe\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "cloud.adc-e.uk", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} + +// AwsIsoFPartition returns the Resolver for AWS ISOF. +func AwsIsoFPartition() Partition { + return awsisofPartition.Partition() +} + +var awsisofPartition = partition{ + ID: "aws-iso-f", + Name: "AWS ISOF", + DNSSuffix: "csp.hci.ic.gov", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-isof\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "csp.hci.ic.gov", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + }, + Regions: regions{}, + Services: services{}, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 00000000000..ca8fc828e15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 00000000000..66dec6bebf0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,65 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// # Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.ID()) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.ID()) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// # Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 00000000000..a686a48fa27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,708 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// A Logger is a minimalistic interface for the SDK to log messages to. +type Logger interface { + Log(...interface{}) +} + +// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution +// behavior. +type DualStackEndpointState uint + +const ( + // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint + // resolution. + DualStackEndpointStateUnset DualStackEndpointState = iota + + // DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints. + DualStackEndpointStateEnabled + + // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints. + DualStackEndpointStateDisabled +) + +// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior. +type FIPSEndpointState uint + +const ( + // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution. + FIPSEndpointStateUnset FIPSEndpointState = iota + + // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints. + FIPSEndpointStateEnabled + + // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints. + FIPSEndpointStateDisabled +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + // + // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility. + // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients + // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher + // precedence then this option. + UseDualStack bool + + // Sets the resolver to resolve a dual-stack endpoint for the service. + UseDualStackEndpoint DualStackEndpointState + + // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint. + UseFIPSEndpoint FIPSEndpointState + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unknown and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + EC2MetadataEndpointMode EC2IMDSEndpointModeState + + // STS Regional Endpoint flag helps with resolving the STS endpoint + STSRegionalEndpoint STSRegionalEndpoint + + // S3 Regional Endpoint flag helps with resolving the S3 endpoint + S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint + + // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority + // over the region name passed to the ResolveEndpoint call. + ResolvedRegion string + + // Logger is the logger that will be used to log messages. + Logger Logger + + // Determines whether logging of deprecated endpoints usage is enabled. + LogDeprecated bool +} + +func (o Options) getEndpointVariant(service string) (v endpointVariant) { + const s3 = "s3" + const s3Control = "s3-control" + + if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) || + ((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) { + v |= dualStackVariant + } + if o.UseFIPSEndpoint == FIPSEndpointStateEnabled { + v |= fipsVariant + } + return v +} + +// EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode. +type EC2IMDSEndpointModeState uint + +// Enumeration values for EC2IMDSEndpointModeState +const ( + EC2IMDSEndpointModeStateUnset EC2IMDSEndpointModeState = iota + EC2IMDSEndpointModeStateIPv4 + EC2IMDSEndpointModeStateIPv6 +) + +// SetFromString sets the EC2IMDSEndpointModeState based on the provided string value. Unknown values will default to EC2IMDSEndpointModeStateUnset +func (e *EC2IMDSEndpointModeState) SetFromString(v string) error { + v = strings.TrimSpace(v) + + switch { + case len(v) == 0: + *e = EC2IMDSEndpointModeStateUnset + case strings.EqualFold(v, "IPv6"): + *e = EC2IMDSEndpointModeStateIPv6 + case strings.EqualFold(v, "IPv4"): + *e = EC2IMDSEndpointModeStateIPv4 + default: + return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4") + } + return nil +} + +// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint +// options. +type STSRegionalEndpoint int + +func (e STSRegionalEndpoint) String() string { + switch e { + case LegacySTSEndpoint: + return "legacy" + case RegionalSTSEndpoint: + return "regional" + case UnsetSTSEndpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified. + UnsetSTSEndpoint STSRegionalEndpoint = iota + + // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified + // to use legacy endpoints. + LegacySTSEndpoint + + // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified + // to use regional endpoints. + RegionalSTSEndpoint +) + +// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the STS regional Endpoint flag. +func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacySTSEndpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalSTSEndpoint, nil + default: + return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s) + } +} + +// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1 +// Regional Endpoint options. +type S3UsEast1RegionalEndpoint int + +func (e S3UsEast1RegionalEndpoint) String() string { + switch e { + case LegacyS3UsEast1Endpoint: + return "legacy" + case RegionalS3UsEast1Endpoint: + return "regional" + case UnsetS3UsEast1Endpoint: + return "" + default: + return "unknown" + } +} + +const ( + + // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not + // specified. + UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota + + // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use legacy endpoints. + LegacyS3UsEast1Endpoint + + // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is + // specified to use regional endpoints. + RegionalS3UsEast1Endpoint +) + +// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based +// on the input string provided in env config or shared config by the user. +// +// `legacy`, `regional` are the only case-insensitive valid strings for +// resolving the S3 regional Endpoint flag. +func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) { + switch { + case strings.EqualFold(s, "legacy"): + return LegacyS3UsEast1Endpoint, nil + case strings.EqualFold(s, "regional"): + return RegionalS3UsEast1Endpoint, nil + default: + return UnsetS3UsEast1Endpoint, + fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s) + } +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +// +// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint. +// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional +// option when resolving endpoints. +func UseDualStackEndpointOption(o *Options) { + o.UseDualStackEndpoint = DualStackEndpointStateEnabled +} + +// UseFIPSEndpointOption sets the UseFIPSEndpoint option to enabled. Can be used as a functional +// option when resolving endpoints. +func UseFIPSEndpointOption(o *Options) { + o.UseFIPSEndpoint = FIPSEndpointStateEnabled +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve +// STS endpoint to their regional endpoint, instead of the global endpoint. +func STSRegionalEndpointOption(o *Options) { + o.STSRegionalEndpoint = RegionalSTSEndpoint +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !(ok || serviceID == Ec2metadataServiceID) { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id, dnsSuffix string + p *partition +} + +// DNSSuffix returns the base domain name of the partition. +func (p Partition) DNSSuffix() string { return p.dnsSuffix } + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned may look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// - UnknownServiceError +// - UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := make(map[string]Region, len(p.p.Regions)) + for id, r := range p.p.Regions { + rs[id] = Region{ + id: id, + desc: r.Description, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := make(map[string]Service, len(p.p.Services)) + + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + // Since we have removed the customization that injected this into the model + // we still need to pretend that this is a modeled service. + if _, ok := ss[Ec2metadataServiceID]; !ok { + ss[Ec2metadataServiceID] = Service{ + id: Ec2metadataServiceID, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + + service, ok := s.p.Services[s.id] + + // Since ec2metadata customization has been removed we need to check + // if it was defined in non-standard endpoints.json file. If it's not + // then we can return the empty map as there is no regional-endpoints for IMDS. + // Otherwise, we iterate need to iterate the non-standard model. + if s.id == Ec2metadataServiceID && !ok { + return rs + } + + for id := range service.Endpoints { + if id.Variant != 0 { + continue + } + if r, ok := s.p.Regions[id.Region]; ok { + rs[id.Region] = Region{ + id: id.Region, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints)) + for id := range s.p.Services[s.id].Endpoints { + if id.Variant != 0 { + continue + } + es[id.Region] = Endpoint{ + id: id.Region, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The endpoint partition + PartitionID string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go new file mode 100644 index 00000000000..df75e899adb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go @@ -0,0 +1,24 @@ +package endpoints + +var legacyGlobalRegions = map[string]map[string]struct{}{ + "sts": { + "ap-northeast-1": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {}, + }, + "s3": { + "us-east-1": {}, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 00000000000..89f6627dc62 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,594 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" +) + +const ( + ec2MetadataEndpointIPv6 = "http://[fd00:ec2::254]/latest" + ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest" +) + +const dnsSuffixTemplateKey = "{dnsSuffix}" + +// defaultKey is a compound map key of a variant and other values. +type defaultKey struct { + Variant endpointVariant + ServiceVariant serviceVariant +} + +// endpointKey is a compound map key of a region and associated variant value. +type endpointKey struct { + Region string + Variant endpointVariant +} + +// endpointVariant is a bit field to describe the endpoints attributes. +type endpointVariant uint64 + +// serviceVariant is a bit field to describe the service endpoint attributes. +type serviceVariant uint64 + +const ( + // fipsVariant indicates that the endpoint is FIPS capable. + fipsVariant endpointVariant = 1 << (64 - 1 - iota) + + // dualStackVariant indicates that the endpoint is DualStack capable. + dualStackVariant +) + +var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + if len(opt.ResolvedRegion) > 0 { + region = opt.ResolvedRegion + } + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type endpointWithVariants struct { + endpoint + Variants []endpointWithTags `json:"variants"` +} + +type endpointWithTags struct { + endpoint + Tags []string `json:"tags"` +} + +type endpointDefaults map[defaultKey]endpoint + +func (p *endpointDefaults) UnmarshalJSON(data []byte) error { + if *p == nil { + *p = make(endpointDefaults) + } + + var e endpointWithVariants + if err := json.Unmarshal(data, &e); err != nil { + return err + } + + (*p)[defaultKey{Variant: 0}] = e.endpoint + + e.Hostname = "" + e.DNSSuffix = "" + + for _, variant := range e.Variants { + endpointVariant, unknown := parseVariantTags(variant.Tags) + if unknown { + continue + } + + var ve endpoint + ve.mergeIn(e.endpoint) + ve.mergeIn(variant.endpoint) + + (*p)[defaultKey{Variant: endpointVariant}] = ve + } + + return nil +} + +func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) { + if len(tags) == 0 { + unknown = true + return + } + + for _, tag := range tags { + switch { + case strings.EqualFold("fips", tag): + ev |= fipsVariant + case strings.EqualFold("dualstack", tag): + ev |= dualStackVariant + default: + unknown = true + } + } + return ev, unknown +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpointDefaults `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + dnsSuffix: p.DNSSuffix, + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, options Options) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[endpointKey{ + Region: region, + Variant: options.getEndpointVariant(service), + }] + + if hasEndpoint && hasService { + return true + } + + if options.StrictMatching { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func allowLegacyEmptyRegion(service string) bool { + legacy := map[string]struct{}{ + "budgets": {}, + "ce": {}, + "chime": {}, + "cloudfront": {}, + "ec2metadata": {}, + "iam": {}, + "importexport": {}, + "organizations": {}, + "route53": {}, + "sts": {}, + "support": {}, + "waf": {}, + } + + _, allowed := legacy[service] + return allowed +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + if len(opt.ResolvedRegion) > 0 { + region = opt.ResolvedRegion + } + + s, hasService := p.Services[service] + + if service == Ec2metadataServiceID && !hasService { + endpoint := getEC2MetadataEndpoint(p.ID, service, opt.EC2MetadataEndpointMode) + return endpoint, nil + } + + if len(service) == 0 || !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 { + region = s.PartitionEndpoint + } + + if r, ok := isLegacyGlobalRegion(service, region, opt); ok { + region = r + } + + variant := opt.getEndpointVariant(service) + + endpoints := s.Endpoints + + serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}] + // If we searched for a variant which may have no explicit service defaults, + // then we need to inherit the standard service defaults except the hostname and dnsSuffix + if variant != 0 && !hasServiceDefault { + serviceDefaults = s.Defaults[defaultKey{}] + serviceDefaults.Hostname = "" + serviceDefaults.DNSSuffix = "" + } + + partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}] + + var dnsSuffix string + if len(serviceDefaults.DNSSuffix) > 0 { + dnsSuffix = serviceDefaults.DNSSuffix + } else if variant == 0 { + // For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for + // a non-variant endpoint then we need to set the dnsSuffix. + dnsSuffix = p.DNSSuffix + } + + noDefaults := !hasServiceDefault && !hasPartitionDefault + + e, hasEndpoint := s.endpointForRegion(region, endpoints, variant) + if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant)) + } + + defs := []endpoint{partitionDefaults, serviceDefaults} + + return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt) +} + +func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint { + switch mode { + case EC2IMDSEndpointModeStateIPv6: + return ResolvedEndpoint{ + URL: ec2MetadataEndpointIPv6, + PartitionID: partitionID, + SigningRegion: "aws-global", + SigningName: service, + SigningNameDerived: true, + SigningMethod: "v4", + } + case EC2IMDSEndpointModeStateIPv4: + fallthrough + default: + return ResolvedEndpoint{ + URL: ec2MetadataEndpointIPv4, + PartitionID: partitionID, + SigningRegion: "aws-global", + SigningName: service, + SigningNameDerived: true, + SigningMethod: "v4", + } + } +} + +func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) { + if opt.getEndpointVariant(service) != 0 { + return "", false + } + + const ( + sts = "sts" + s3 = "s3" + awsGlobal = "aws-global" + ) + + switch { + case service == sts && opt.STSRegionalEndpoint == RegionalSTSEndpoint: + return region, false + case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint: + return region, false + default: + if _, ok := legacyGlobalRegions[service][region]; ok { + return awsGlobal, true + } + } + + return region, false +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es serviceEndpoints, variant endpointVariant) []string { + list := make([]string, 0, len(es)) + for k := range es { + if k.Variant != variant { + continue + } + list = append(list, k.Region) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpointDefaults `json:"defaults"` + Endpoints serviceEndpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) { + if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok { + return e, true + } + + if s.IsRegionalized == boxedFalse { + return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type serviceEndpoints map[endpointKey]endpoint + +func (s *serviceEndpoints) UnmarshalJSON(data []byte) error { + if *s == nil { + *s = make(serviceEndpoints) + } + + var regionToEndpoint map[string]endpointWithVariants + + if err := json.Unmarshal(data, ®ionToEndpoint); err != nil { + return err + } + + for region, e := range regionToEndpoint { + (*s)[endpointKey{Region: region}] = e.endpoint + + e.Hostname = "" + e.DNSSuffix = "" + + for _, variant := range e.Variants { + endpointVariant, unknown := parseVariantTags(variant.Tags) + if unknown { + continue + } + + var ve endpoint + ve.mergeIn(e.endpoint) + ve.mergeIn(variant.endpoint) + + (*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve + } + } + + return nil +} + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + DNSSuffix string `json:"dnsSuffix"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` + + Deprecated boxedBool `json:"deprecated"` +} + +// isZero returns whether the endpoint structure is an empty (zero) value. +func (e endpoint) isZero() bool { + switch { + case len(e.Hostname) != 0: + return false + case len(e.Protocols) != 0: + return false + case e.CredentialScope != (credentialScope{}): + return false + case len(e.SignatureVersions) != 0: + return false + case len(e.SSLCommonName) != 0: + return false + } + return true +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + + signingName := e.CredentialScope.Service + var signingNameDerived bool + if len(signingName) == 0 { + signingName = service + signingNameDerived = true + } + + hostname := e.Hostname + + if !validateInputRegion(region) { + return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided") + } + + if len(merged.DNSSuffix) > 0 { + dnsSuffix = merged.DNSSuffix + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil { + opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u)) + } + + return ResolvedEndpoint{ + URL: u, + PartitionID: partitionID, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + }, nil +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if len(other.DNSSuffix) > 0 { + e.DNSSuffix = other.DNSSuffix + } + if other.Deprecated != boxedBoolUnset { + e.Deprecated = other.Deprecated + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) + +func validateInputRegion(region string) bool { + return regionValidationRegex.MatchString(region) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 00000000000..84922bca8a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,412 @@ +//go:build codegen +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +func endpointVariantSetter(variant endpointVariant) (string, error) { + if variant == 0 { + return "0", nil + } + + if variant > (fipsVariant | dualStackVariant) { + return "", fmt.Errorf("unknown endpoint variant") + } + + var symbols []string + if variant&fipsVariant != 0 { + symbols = append(symbols, "fipsVariant") + } + if variant&dualStackVariant != 0 { + symbols = append(symbols, "dualStackVariant") + } + v := strings.Join(symbols, "|") + + return v, nil +} + +func endpointKeySetter(e endpointKey) (string, error) { + var sb strings.Builder + sb.WriteString("endpointKey{\n") + sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region)) + if e.Variant != 0 { + variantSetter, err := endpointVariantSetter(e.Variant) + if err != nil { + return "", err + } + sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) + } + sb.WriteString("}") + return sb.String(), nil +} + +func defaultKeySetter(e defaultKey) (string, error) { + var sb strings.Builder + sb.WriteString("defaultKey{\n") + if e.Variant != 0 { + variantSetter, err := endpointVariantSetter(e.Variant) + if err != nil { + return "", err + } + sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter)) + } + sb.WriteString("}") + return sb.String(), nil +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, + "EndpointVariantSetter": endpointVariantSetter, + "EndpointKeySetter": endpointKeySetter, + "DefaultKeySetter": defaultKeySetter, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" $.Resolver }} + + {{ range $_, $partition := $.Resolver }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} + + {{ template "endpoint resolvers" $.Resolver }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if (gt (len .Defaults) 0) -}} + Defaults: {{ template "gocode Defaults" .Defaults -}}, + {{ end -}} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if (gt (len .Defaults) 0) -}} + Defaults: {{ template "gocode Defaults" .Defaults -}}, + {{ end -}} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Defaults" -}} +endpointDefaults{ + {{ range $id, $endpoint := . -}} + {{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +serviceEndpoints{ + {{ range $id, $endpoint := . -}} + {{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}} +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 00000000000..fa06f7a8f8b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,13 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 00000000000..91a6f277a7e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 00000000000..49674cc79eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,121 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nil, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody + + // LogDebugWithDeprecated states the SDK should log details about deprecated functionality. + LogDebugWithDeprecated +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 00000000000..2ba3c56c11f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "use of closed network connection") || + strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 00000000000..9556332b65e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,346 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + BuildStream HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalStream HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList +} + +// Copy returns a copy of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + BuildStream: h.BuildStream.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers. +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.BuildStream.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalStream.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.BuildStream.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} + +// WithSetRequestHeaders updates the operation request's HTTP header to contain +// the header key value pairs provided. If the header key already exists in the +// request's HTTP header set, the existing value(s) will be replaced. +// +// Header keys added will be added as canonical format with title casing +// applied via http.Header.Set method. +func WithSetRequestHeaders(h map[string]string) Option { + return withRequestHeader(h).SetRequestHeaders +} + +type withRequestHeader map[string]string + +func (h withRequestHeader) SetRequestHeaders(r *Request) { + for k, v := range h { + r.HTTPRequest.Header.Set(k, v) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 00000000000..79f79602b03 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 00000000000..9370fa50c38 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,65 @@ +package request + +import ( + "io" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) { + reader := &offsetReader{} + _, err := buf.Seek(offset, sdkio.SeekStart) + if err != nil { + return nil, err + } + + reader.buf = buf + return reader, nil +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) { + if err := o.Close(); err != nil { + return nil, err + } + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 00000000000..636d9ec943b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,722 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" + + // ErrCodeRequestError is an error preventing the SDK from continuing to + // process the request. + ErrCodeRequestError = "RequestError" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + streamingBody io.ReadCloser + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // Additional API error codes that should be retried. IsErrorRetryable + // will consider these codes in addition to its built in cases. + RetryErrorCodes []string + + // Additional API error codes that should be retried with throttle backoff + // delay. IsErrorThrottle will consider these codes in addition to its + // built in cases. + ThrottleErrorCodes []string + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API operation and +// parameters. +// +// A Retryer should be provided to direct how the request is retried. If +// Retryer is nil, a default no retry value will be used. You can use +// NoOpRetryer in the Client package to disable retry behavior directly. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + if retryer == nil { + retryer = noOpRetryer{} + } + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + if len(operation.HTTPPath) != 0 { + opHTTPPath := operation.HTTPPath + var opQueryString string + if idx := strings.Index(opHTTPPath, "?"); idx >= 0 { + opQueryString = opHTTPPath[idx+1:] + opHTTPPath = opHTTPPath[:idx] + } + + if strings.HasSuffix(httpReq.URL.Path, "/") && strings.HasPrefix(opHTTPPath, "/") { + opHTTPPath = opHTTPPath[1:] + } + httpReq.URL.Path += opHTTPPath + httpReq.URL.RawQuery = opQueryString + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + + if aws.IsReaderSeekable(reader) { + var err error + // Get the Bodies current offset so retries will start from the same + // initial position. + r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to determine start of request body", err) + return + } + } + r.ResetBody() +} + +// SetStreamingBody set the reader to be used for the request that will stream +// bytes to the server. Request's Body must not be set to any reader. +func (r *Request) SetStreamingBody(reader io.ReadCloser) { + r.streamingBody = reader + r.SetReaderBody(aws.ReadSeekCloser(reader)) +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. + r.NotHoist = false + + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + + if r.Operation.BeforePresignFn != nil { + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err + } + } + + if err := r.Sign(); err != nil { + return "", nil, err + } + + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +const ( + notRetrying = "not retrying" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Any additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", notRetrying, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request, returning error if errors are encountered. +// +// Sign will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", notRetrying, r.Error) + return r.Error + } + + SanitizeHostForHeader(r.HTTPRequest) + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) { + if r.streamingBody != nil { + return r.streamingBody, nil + } + + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody, err = newOffsetReader(r.Body, r.BodyStart) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to get next request body reader", err) + } + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := aws.SeekerLen(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, + "failed to compute request body size", err) + } + + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request, returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Ensure a non-nil HTTPResponse parameter is set to ensure handlers + // checking for HTTPResponse values, don't fail. + if r.HTTPResponse == nil { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + Body: ioutil.NopCloser(&bytes.Buffer{}), + } + } + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } + + for { + r.Error = nil + r.AttemptTime = time.Now() + + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err + } + + if err := r.sendRequest(); err == nil { + return nil + } + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + + if r.Error != nil || !aws.BoolValue(r.Retryable) { + return r.Error + } + + if err := r.prepareRetry(); err != nil { + r.Error = err + return err + } + } +} + +func (r *Request) prepareRetry() error { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + if err := r.Error; err != nil { + return awserr.New(ErrCodeSerialization, + "failed to prepare body for retry", err) + + } + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + + return nil +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + if r.URL == nil { + return "" + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 00000000000..5921b8ff2ab --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,40 @@ +//go:build !go1.8 +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 00000000000..ea643c9c44b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,37 @@ +//go:build go1.8 +// +build go1.8 + +package request + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = awserr.New(ErrCodeSerialization, + "failed to reset request body", err) + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 00000000000..d8c5053025c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,15 @@ +//go:build go1.7 +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 00000000000..49a243ef2d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,15 @@ +//go:build !go1.7 +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 00000000000..64784e16f3d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,266 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// for p.Next() { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// // ... +// // break out of loop to stop fetching additional pages +// } +// +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool + + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if !v { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { + tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } + } + + tokenAdded = true + tokens = append(tokens, v) + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 00000000000..3f0001f9181 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,309 @@ +package request + +import ( + "net" + "net/url" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer provides the interface drive the SDK's request retry behavior. The +// Retryer implementation is responsible for implementing exponential backoff, +// and determine if a request API error should be retried. +// +// client.DefaultRetryer is the SDK's default implementation of the Retryer. It +// uses the Request.IsErrorRetryable and Request.IsErrorThrottle methods to +// determine if the request is retried. +type Retryer interface { + // RetryRules return the retry delay that should be used by the SDK before + // making another request attempt for the failed request. + RetryRules(*Request) time.Duration + + // ShouldRetry returns if the failed request is retryable. + // + // Implementations may consider request attempt count when determining if a + // request is retryable, but the SDK will use MaxRetries to limit the + // number of attempts a request are made. + ShouldRetry(*Request) bool + + // MaxRetries is the number of times a request may be retried before + // failing. + MaxRetries() int +} + +// WithRetryer sets a Retryer value to the given Config returning the Config +// value for chaining. The value must not be nil. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + if retryer == nil { + if cfg.Logger != nil { + cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.") + } + retryer = noOpRetryer{} + } + cfg.Retryer = retryer + return cfg + +} + +// noOpRetryer is a internal no op retryer used when a request is created +// without a retryer. +// +// Provides a retryer that performs no retries. +// It should be used when we do not want retries to be performed. +type noOpRetryer struct{} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API; For NoOpRetryer the MaxRetries will always be zero. +func (d noOpRetryer) MaxRetries() int { + return 0 +} + +// ShouldRetry will always return false for NoOpRetryer, as it should never retry. +func (d noOpRetryer) ShouldRetry(_ *Request) bool { + return false +} + +// RetryRules returns the delay duration before retrying this request again; +// since NoOpRetryer does not retry, RetryRules always returns 0. +func (d noOpRetryer) RetryRules(_ *Request) time.Duration { + return 0 +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + ErrCodeRequestError: {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "RequestThrottledException": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, + "EC2ThrottledException": {}, // EC2 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporary); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err == nil { + return false + } + return shouldRetryError(err) +} + +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + if isNestedErrorRetryable(err) { + return true + } + + origErr := err.OrigErr() + var shouldRetry bool + if origErr != nil { + shouldRetry = shouldRetryError(origErr) + if err.Code() == ErrCodeRequestError && !shouldRetry { + return false + } + } + if isCodeRetryable(err.Code()) { + return true + } + return shouldRetry + + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true + } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true + } +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeThrottle(aerr.Code()) + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry +// error. Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + return isCodeExpiredCreds(aerr.Code()) + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + if isErrCode(r.Error, r.RetryErrorCodes) { + return true + } + + // HTTP response status code 501 should not be retried. + // 501 represents Not Implemented which means the request method is not + // supported by the server and cannot be handled. + if r.HTTPResponse != nil { + // HTTP response status code 500 represents internal server error and + // should be retried without any throttle. + if r.HTTPResponse.StatusCode == 500 { + return true + } + } + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its +// code. Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + if isErrCode(r.Error, r.ThrottleErrorCodes) { + return true + } + + if r.HTTPResponse != nil { + switch r.HTTPResponse.StatusCode { + case + 429, // error caused due to too many requests + 502, // Bad Gateway error should be throttled + 503, // caused when service is unavailable + 504: // error occurred due to gateway timeout + return true + } + } + + return IsErrorThrottle(r.Error) +} + +func isErrCode(err error, codes []string) bool { + if aerr, ok := err.(awserr.Error); ok && aerr != nil { + for _, code := range codes { + if code == aerr.Code() { + return true + } + } + } + + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 00000000000..09a44eb987a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 00000000000..8630683f317 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,286 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinLenErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 00000000000..4601f883cc5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 00000000000..ea8e3537658 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,333 @@ +package session + +import ( + "fmt" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/ssocreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" + "github.com/aws/aws-sdk-go/service/ssooidc" + "github.com/aws/aws-sdk-go/service/sts" +) + +// CredentialsProviderOptions specifies additional options for configuring +// credentials providers. +type CredentialsProviderOptions struct { + // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider, + // such as setting its ExpiryWindow. + WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider) + + // ProcessProviderOptions configures a ProcessProvider, + // such as setting its Timeout. + ProcessProviderOptions func(*processcreds.ProcessProvider) +} + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + switch { + case len(sessOpts.Profile) != 0: + // User explicitly provided a Profile in the session's configuration + // so load that profile from shared config first. + // Github(aws/aws-sdk-go#2727) + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + + case envCfg.Creds.HasKeys(): + // Environment credentials + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + + case len(envCfg.WebIdentityTokenFilePath) != 0: + // Web identity token from environment, RoleARN required to also be + // set. + return assumeWebIdentity(cfg, handlers, + envCfg.WebIdentityTokenFilePath, + envCfg.RoleARN, + envCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, + ) + + default: + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) + } +} + +// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but +// 'AWS_ROLE_ARN' was not set. +var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil) + +// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but +// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set. +var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil) + +func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers, + filepath string, + roleARN, sessionName string, + credOptions *CredentialsProviderOptions, +) (*credentials.Credentials, error) { + + if len(filepath) == 0 { + return nil, WebIdentityEmptyTokenFilePathErr + } + + if len(roleARN) == 0 { + return nil, WebIdentityEmptyRoleARNErr + } + + svc := sts.New(&Session{ + Config: cfg, + Handlers: handlers.Copy(), + }) + + var optFns []func(*stscreds.WebIdentityRoleProvider) + if credOptions != nil && credOptions.WebIdentityRoleProviderOptions != nil { + optFns = append(optFns, credOptions.WebIdentityRoleProviderOptions) + } + + p := stscreds.NewWebIdentityRoleProviderWithOptions(svc, roleARN, sessionName, stscreds.FetchTokenPath(filepath), optFns...) + return credentials.NewCredentials(p), nil +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch { + case sharedCfg.SourceProfile != nil: + // Assume IAM role with credentials source from a different profile. + creds, err = resolveCredsFromProfile(cfg, envCfg, + *sharedCfg.SourceProfile, handlers, sessOpts, + ) + + case sharedCfg.Creds.HasKeys(): + // Static Credentials from Shared Config/Credentials file. + creds = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + + case len(sharedCfg.CredentialSource) != 0: + creds, err = resolveCredsFromSource(cfg, envCfg, + sharedCfg, handlers, sessOpts, + ) + + case len(sharedCfg.WebIdentityTokenFile) != 0: + // Credentials from Assume Web Identity token require an IAM Role, and + // that roll will be assumed. May be wrapped with another assume role + // via SourceProfile. + return assumeWebIdentity(cfg, handlers, + sharedCfg.WebIdentityTokenFile, + sharedCfg.RoleARN, + sharedCfg.RoleSessionName, + sessOpts.CredentialsProviderOptions, + ) + + case sharedCfg.hasSSOConfiguration(): + creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers) + + case len(sharedCfg.CredentialProcess) != 0: + // Get credentials from CredentialProcess + var optFns []func(*processcreds.ProcessProvider) + if sessOpts.CredentialsProviderOptions != nil && sessOpts.CredentialsProviderOptions.ProcessProviderOptions != nil { + optFns = append(optFns, sessOpts.CredentialsProviderOptions.ProcessProviderOptions) + } + creds = processcreds.NewCredentials(sharedCfg.CredentialProcess, optFns...) + + default: + // Fallback to default credentials provider, include mock errors for + // the credential chain so user can identify why credentials failed to + // be retrieved. + creds = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + if err != nil { + return nil, err + } + + if len(sharedCfg.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = creds + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + + return creds, nil +} + +func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) { + if err := sharedCfg.validateSSOConfiguration(); err != nil { + return nil, err + } + + var optFns []func(provider *ssocreds.Provider) + cfgCopy := cfg.Copy() + + if sharedCfg.SSOSession != nil { + cfgCopy.Region = &sharedCfg.SSOSession.SSORegion + cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedCfg.SSOSession.Name) + if err != nil { + return nil, err + } + // create oidcClient with AnonymousCredentials to avoid recursively resolving credentials + mySession := Must(NewSession(&aws.Config{ + Credentials: credentials.AnonymousCredentials, + })) + oidcClient := ssooidc.New(mySession, cfgCopy) + tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath) + optFns = append(optFns, func(p *ssocreds.Provider) { + p.TokenProvider = tokenProvider + p.CachedTokenFilepath = cachedPath + }) + } else { + cfgCopy.Region = &sharedCfg.SSORegion + } + + return ssocreds.NewCredentials( + &Session{ + Config: cfgCopy, + Handlers: handlers.Copy(), + }, + sharedCfg.SSOAccountID, + sharedCfg.SSORoleName, + sharedCfg.SSOStartURL, + optFns..., + ), nil +} + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (creds *credentials.Credentials, err error) { + + switch sharedCfg.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + case credSourceEnvironment: + creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(*cfg, handlers) + creds = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return creds, nil +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + + if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.RoleSessionName + + if sessOpts.AssumeRoleDuration == 0 && + sharedCfg.AssumeRoleDuration != nil && + *sharedCfg.AssumeRoleDuration/time.Minute > 15 { + opt.Duration = *sharedCfg.AssumeRoleDuration + } else if sessOpts.AssumeRoleDuration != 0 { + opt.Duration = sessOpts.AssumeRoleDuration + } + + // Assume role with external ID + if len(sharedCfg.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a +// session when the MFAToken option is not set when shared config is configured +// load assume a role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go new file mode 100644 index 00000000000..4390ad52f49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go @@ -0,0 +1,28 @@ +//go:build go1.13 +// +build go1.13 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go new file mode 100644 index 00000000000..668565bea0c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go @@ -0,0 +1,27 @@ +//go:build !go1.13 && go1.7 +// +build !go1.13,go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go new file mode 100644 index 00000000000..e101aa6b6c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go @@ -0,0 +1,23 @@ +//go:build !go1.6 && go1.5 +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go new file mode 100644 index 00000000000..b5fcbe0d1e0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go @@ -0,0 +1,24 @@ +//go:build !go1.7 && go1.6 +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 00000000000..ff3cc012ae3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,367 @@ +/* +Package session provides configuration for the SDK's service clients. Sessions +can be shared across service clients that share the same base configuration. + +Sessions are safe to use concurrently as long as the Session is not being +modified. Sessions should be cached when possible, because creating a new +Session will load all configuration values from the environment, and config +files each time the Session is created. Sharing the Session value across all of +your service clients will ensure the configuration is loaded the fewest number +of times possible. + +Sessions options from Shared Config + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. Using the NewSessionWithOptions with +SharedConfigState set to SharedConfigEnable will create the session as if the +AWS_SDK_LOAD_CONFIG environment variable was set. + +Credential and config loading order + +The Session will attempt to load configuration and credentials from the +environment, configuration files, and other credential sources. The order +configuration is loaded in is: + + * Environment Variables + * Shared Credentials file + * Shared Configuration file (if SharedConfig is enabled) + * EC2 Instance Metadata (credentials only) + +The Environment variables for credentials will have precedence over shared +config even if SharedConfig is enabled. To override this behavior, and use +shared config credentials instead specify the session.Options.Profile, (e.g. +when using credential_source=Environment to assume a role). + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "myProfile", + }) + +Creating Sessions + +Creating a Session without additional options will load credentials region, and +profile loaded from the environment and shared config automatically. See, +"Environment Variables" section for information on environment variables used +by Session. + + // Create Session + sess, err := session.NewSession() + + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded, config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{ + Region: aws.String("us-west-2"), + }) + +Use NewSessionWithOptions to provide additional configuration driving how the +Session's configuration will be loaded. Such as, specifying shared config +profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess, err := session.NewSessionWithOptions(session.Options{ + // Options + }) + + sess, err := session.NewSessionWithOptions(session.Options{ + // Specify profile to load for the session's config + Profile: "profile_name", + + // Provide SDK Config options, such as Region. + Config: aws.Config{ + Region: aws.String("us-west-2"), + }, + + // Force enable Shared Config support + SharedConfigState: session.SharedConfigEnable, + }) + +Adding Handlers + +You can add handlers to a session to decorate API operation, (e.g. adding HTTP +headers). All clients that use the Session receive a copy of the Session's +handlers. For example, the following request handler added to the Session logs +every requests made. + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Printf("Request: %s/%s, Params: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Shared Config Fields + +By default the SDK will only load the shared credentials file's +(~/.aws/credentials) credentials values, and all other config is provided by +the environment variables, SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file +(~/.aws/config). + +Credentials are the values the SDK uses to authenticating requests with AWS +Services. When specified in a file, both aws_access_key_id and +aws_secret_access_key must be provided together in the same file to be +considered valid. They will be ignored if both are not present. +aws_session_token is an optional field that can be provided in addition to the +other two fields. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + + ; region only supported if SharedConfigEnabled. + region = us-east-1 + +Assume Role configuration + +The role_arn field allows you to configure the SDK to assume an IAM role using +a set of credentials from another source. Such as when paired with static +credentials, "profile_source", "credential_process", or "credential_source" +fields. If "role_arn" is provided, a source of credentials must also be +specified, such as "source_profile", "credential_source", or +"credential_process". + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + + +The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you +must also set the Session Option.AssumeRoleTokenProvider. The Session will fail +to load if the AssumeRoleTokenProvider is not specified. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + +To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Custom Shared Config and Credential Files + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Custom CA Bundle + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. + +Custom Client TLS Certificate + +The SDK supports the environment and session option being configured with +Client TLS certificates that are sent as a part of the client's TLS handshake +for client authentication. If used, both Cert and Key values are required. If +one is missing, or either fail to load the contents of the file an error will +be returned. + +HTTP Client's Transport concrete implementation must be a http.Transport +or creating the session will fail. + + AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + +This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. + + sess, err := session.NewSessionWithOptions(session.Options{ + ClientTLSCert: myCertFile, + ClientTLSKey: myKeyFile, + }) + +Custom EC2 IMDS Endpoint + +The endpoint of the EC2 IMDS client can be configured via the environment +variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a +Session. See Options.EC2IMDSEndpoint for more details. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + +If using an URL with an IPv6 address literal, the IPv6 address +component must be enclosed in square brackets. + + AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + +The custom EC2 IMDS endpoint can also be specified via the Session options. + + sess, err := session.NewSessionWithOptions(session.Options{ + EC2MetadataEndpoint: "http://[::1]", + }) + +FIPS and DualStack Endpoints + +The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack. + +You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable +or disable FIPS endpoint resolution. + + AWS_USE_FIPS_ENDPOINT=true + +To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable +or disable FIPS endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_fips_endpoint=true + +To configure a FIPS endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled, + }) + +You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config), +or programmatically. + +To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to +enable or disable DualStack endpoint resolution. + + AWS_USE_DUALSTACK_ENDPOINT=true + +To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable +or disable DualStack endpoint resolution. + + [profile myprofile] + region=us-west-2 + use_dualstack_endpoint=true + +To configure a DualStack endpoint programmatically + + // Option 1: Configure it on a session for all clients + sess, err := session.NewSessionWithOptions(session.Options{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) + if err != nil { + // handle error + } + + client := s3.New(sess) + + // Option 2: Configure it per client + sess, err := session.NewSession() + if err != nil { + // handle error + } + + client := s3.New(sess, &aws.Config{ + UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled, + }) +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 00000000000..93bb5de6470 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,499 @@ +package session + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string + + // Sets the TLC client certificate that should be used by the SDK's HTTP transport + // when making requests. The certificate must be paired with a TLS client key file. + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert string + + // Sets the TLC client key that should be used by the SDK's HTTP transport + // when making requests. The key must be paired with a TLS client certificate file. + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey string + + csmEnabled string + CSMEnabled *bool + CSMPort string + CSMHost string + CSMClientID string + + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool + enableEndpointDiscovery string + + // Specifies the WebIdentity token the SDK should use to assume a role + // with. + // + // AWS_WEB_IDENTITY_TOKEN_FILE=file_path + WebIdentityTokenFilePath string + + // Specifies the IAM role arn to use when assuming an role. + // + // AWS_ROLE_ARN=role_arn + RoleARN string + + // Specifies the IAM role session name to use when assuming a role. + // + // AWS_ROLE_SESSION_NAME=session_name + RoleSessionName string + + // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint + // for a service. + // + // AWS_STS_REGIONAL_ENDPOINTS=regional + // This can take value as `regional` or `legacy` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the S3 Regional Endpoint flag for the SDK to resolve the + // endpoint for a service. + // + // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional + // This can take value as `regional` or `legacy` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // AWS_S3_USE_ARN_REGION=true + S3UseARNRegion bool + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // AWS_USE_DUALSTACK_ENDPOINT=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // AWS_USE_FIPS_ENDPOINT=true + UseFIPSEndpoint endpoints.FIPSEndpointState +} + +var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } + webIdentityTokenFilePathEnvKey = []string{ + "AWS_WEB_IDENTITY_TOKEN_FILE", + } + roleARNEnvKey = []string{ + "AWS_ROLE_ARN", + } + roleSessionNameEnvKey = []string{ + "AWS_ROLE_SESSION_NAME", + } + stsRegionalEndpointKey = []string{ + "AWS_STS_REGIONAL_ENDPOINTS", + } + s3UsEast1RegionalEndpoint = []string{ + "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT", + } + s3UseARNRegionEnvKey = []string{ + "AWS_S3_USE_ARN_REGION", + } + ec2IMDSEndpointEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT", + } + ec2IMDSEndpointModeEnvKey = []string{ + "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", + } + ec2MetadataV1DisabledEnvKey = []string{ + "AWS_EC2_METADATA_V1_DISABLED", + } + useCABundleKey = []string{ + "AWS_CA_BUNDLE", + } + useClientTLSCert = []string{ + "AWS_SDK_GO_CLIENT_TLS_CERT", + } + useClientTLSKey = []string{ + "AWS_SDK_GO_CLIENT_TLS_KEY", + } + awsUseDualStackEndpoint = []string{ + "AWS_USE_DUALSTACK_ENDPOINT", + } + awsUseFIPSEndpoint = []string{ + "AWS_USE_FIPS_ENDPOINT", + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() (envConfig, error) { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() (envConfig, error) { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) (envConfig, error) { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + // Static environment credentials + var creds credentials.Value + setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&creds.SessionToken, credSessionEnvKey) + if creds.HasKeys() { + // Require logical grouping of credentials + creds.ProviderName = EnvProviderName + cfg.Creds = creds + } + + // Role Metadata + setFromEnvVal(&cfg.RoleARN, roleARNEnvKey) + setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey) + + // Web identity environment variables + setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey) + + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + + if len(cfg.csmEnabled) != 0 { + v, _ := strconv.ParseBool(cfg.csmEnabled) + cfg.CSMEnabled = &v + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } + + setFromEnvVal(&cfg.CustomCABundle, useCABundleKey) + setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert) + setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey) + + var err error + // STS Regional Endpoint variable + for _, k := range stsRegionalEndpointKey { + if v := os.Getenv(k); len(v) != 0 { + cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + // S3 Regional Endpoint variable + for _, k := range s3UsEast1RegionalEndpoint { + if v := os.Getenv(k); len(v) != 0 { + cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err) + } + } + } + + var s3UseARNRegion string + setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey) + if len(s3UseARNRegion) != 0 { + switch { + case strings.EqualFold(s3UseARNRegion, "false"): + cfg.S3UseARNRegion = false + case strings.EqualFold(s3UseARNRegion, "true"): + cfg.S3UseARNRegion = true + default: + return envConfig{}, fmt.Errorf( + "invalid value for environment variable, %s=%s, need true or false", + s3UseARNRegionEnvKey[0], s3UseARNRegion) + } + } + + setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey) + if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { + return envConfig{}, err + } + setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, ec2MetadataV1DisabledEnvKey) + + if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { + return cfg, err + } + + if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, awsUseFIPSEndpoint); err != nil { + return cfg, err + } + + return cfg, nil +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) != 0 { + *dst = v + break + } + } +} + +func setBoolPtrFromEnvVal(dst **bool, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch { + case strings.EqualFold(value, "false"): + *dst = new(bool) + **dst = false + case strings.EqualFold(value, "true"): + *dst = new(bool) + **dst = true + } + } +} + +func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + if err := mode.SetFromString(value); err != nil { + return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err) + } + return nil + } + return nil +} + +func setUseDualStackEndpointFromEnvVal(dst *endpoints.DualStackEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.DualStackEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.DualStackEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} + +func setUseFIPSEndpointFromEnvVal(dst *endpoints.FIPSEndpointState, keys []string) error { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue // skip if empty + } + + switch { + case strings.EqualFold(value, "true"): + *dst = endpoints.FIPSEndpointStateEnabled + case strings.EqualFold(value, "false"): + *dst = endpoints.FIPSEndpointStateDisabled + default: + return fmt.Errorf( + "invalid value for environment variable, %s=%s, need true, false", + k, value) + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 00000000000..3c88dee526d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,1005 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/csm" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" + + // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle. + ErrCodeLoadCustomCABundle = "LoadCustomCABundleError" + + // ErrCodeLoadClientTLSCert error code for unable to load client TLS + // certificate or key + ErrCodeLoadClientTLSCert = "LoadClientTLSCertError" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers + + options Options +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg, envErr := loadEnvConfig() + + if envCfg.EnableSharedConfig { + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + + return s + } + + s := deprecatedNewSession(envCfg, cfgs...) + if envErr != nil { + msg := "failed to load env config" + s.logDeprecatedNewSessionError(msg, envErr, cfgs) + } + + if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + msg := "failed to enable CSM" + s.logDeprecatedNewSessionError(msg, err, cfgs) + } + } + + return s +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created, such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role with MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // Can also be specified via the environment variable: + // + // AWS_CA_BUNDLE=$HOME/ca_bundle + // + // Can also be specified via the shared config field: + // + // ca_bundle = $HOME/ca_bundle + CustomCABundle io.Reader + + // Reader for the TLC client certificate that should be used by the SDK's + // HTTP transport when making requests. The certificate must be paired with + // a TLS client key file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert io.Reader + + // Reader for the TLC client key that should be used by the SDK's HTTP + // transport when making requests. The key must be paired with a TLS client + // certificate file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers + + // Allows specifying a custom endpoint to be used by the EC2 IMDS client + // when making requests to the EC2 IMDS API. The endpoint value should + // include the URI scheme. If the scheme is not present it will be defaulted to http. + // + // If unset, will the EC2 IMDS client will use its default endpoint. + // + // Can also be specified via the environment variable, + // AWS_EC2_METADATA_SERVICE_ENDPOINT. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254 + // + // If using an URL with an IPv6 address literal, the IPv6 address + // component must be enclosed in square brackets. + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1] + EC2IMDSEndpoint string + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies options for creating credential providers. + // These are only used if the aws.Config does not already + // include credentials. + CredentialsProviderOptions *CredentialsProviderOptions +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + var err error + if opts.SharedConfigState == SharedConfigEnable { + envCfg, err = loadSharedEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load shared config, %v", err) + } + } else { + envCfg, err = loadEnvConfig() + if err != nil { + return nil, fmt.Errorf("failed to load environment config, %v", err) + } + } + + if len(opts.Profile) != 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +// Wraps the endpoint resolver with a resolver that will return a custom +// endpoint for EC2 IMDS. +func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string, mode endpoints.EC2IMDSEndpointModeState) endpoints.Resolver { + return endpoints.ResolverFunc( + func(service, region string, opts ...func(*endpoints.Options)) ( + endpoints.ResolvedEndpoint, error, + ) { + if service == ec2MetadataServiceID && len(endpoint) > 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoint, + SigningName: ec2MetadataServiceID, + SigningRegion: region, + }, nil + } else if service == ec2MetadataServiceID { + opts = append(opts, func(o *endpoints.Options) { + o.EC2MetadataEndpointMode = mode + }) + } + return resolver.EndpointFor(service, region, opts...) + }) +} + +func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + + if !(len(envCfg.EC2IMDSEndpoint) == 0 && envCfg.EC2IMDSEndpointMode == endpoints.EC2IMDSEndpointModeStateUnset) { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint, envCfg.EC2IMDSEndpointMode) + } + + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: Options{ + EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint, + }, + } + + initHandlers(s) + return s +} + +func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + cfg.MergeIn(userCfg) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig) + if err != nil { + if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) { + // Special case where the user has not explicitly specified an AWS_PROFILE, + // or session.Options.profile, shared config is not enabled, and the + // environment has credentials, allow the shared config file to fail to + // load since the user has already provided credentials, and nothing else + // is required to be read file. Github(aws/aws-sdk-go#2455) + } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return nil, err + } + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + options: opts, + } + + initHandlers(s) + + if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil { + if l := s.Config.Logger; l != nil { + l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err)) + } + } else if csmCfg.Enabled { + err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger) + if err != nil { + return nil, err + } + } + + return s, nil +} + +type csmConfig struct { + Enabled bool + Host string + Port string + ClientID string +} + +var csmProfileName = "aws_csm" + +func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { + if envCfg.CSMEnabled != nil { + if *envCfg.CSMEnabled { + return csmConfig{ + Enabled: true, + ClientID: envCfg.CSMClientID, + Host: envCfg.CSMHost, + Port: envCfg.CSMPort, + }, nil + } + return csmConfig{}, nil + } + + sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false) + if err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); !ok { + return csmConfig{}, err + } + } + if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true { + return csmConfig{ + Enabled: true, + ClientID: sharedCfg.CSMClientID, + Host: sharedCfg.CSMHost, + Port: sharedCfg.CSMPort, + }, nil + } + + return csmConfig{}, nil +} + +func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { + // CA Bundle can be specified in both environment variable shared config file. + var caBundleFilename = envCfg.CustomCABundle + if len(caBundleFilename) == 0 { + caBundleFilename = sharedCfg.CustomCABundle + } + + // Only use environment value if session option is not provided. + customTLSOptions := map[string]struct { + filename string + field *io.Reader + errCode string + }{ + "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle}, + "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert}, + "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert}, + } + for name, v := range customTLSOptions { + if len(v.filename) != 0 && *v.field == nil { + f, err := os.Open(v.filename) + if err != nil { + return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err) + } + defer f.Close() + *v.field = f + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil { + return err + } + } + + // Setup HTTP client TLS certificate and key for client TLS authentication. + if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil { + if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil { + return err + } + } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil { + // Do nothing if neither values are available. + + } else { + return awserr.New(ErrCodeLoadClientTLSCert, + fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided", + opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil) + } + + return nil +} + +func getHTTPTransport(client *http.Client) (*http.Transport, error) { + var t *http.Transport + switch v := client.Transport.(type) { + case *http.Transport: + t = v + default: + if client.Transport != nil { + return nil, fmt.Errorf("unsupported transport, %T", client.Transport) + } + } + if t == nil { + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCustomTransport() + } + + return t, nil +} + +func loadCustomCABundle(client *http.Client, bundle io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadCustomCABundle, + "unable to load custom CA bundle, HTTPClient's transport unsupported type", err) + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + client.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New(ErrCodeLoadCustomCABundle, + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get usable HTTP transport from client", err) + } + + cert, err := ioutil.ReadAll(certFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS cert file", err) + } + + key, err := ioutil.ReadAll(keyFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS key file", err) + } + + clientCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to load x509 key pair from client cert", err) + } + + tlsCfg := t.TLSClientConfig + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + + tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) + + t.TLSClientConfig = tlsCfg + client.Transport = t + + return nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) + } + } + + // Regional Endpoint flag for STS endpoint resolving + mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{ + userCfg.STSRegionalEndpoint, + envCfg.STSRegionalEndpoint, + sharedCfg.STSRegionalEndpoint, + endpoints.LegacySTSEndpoint, + }) + + // Regional Endpoint flag for S3 endpoint resolving + mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{ + userCfg.S3UsEast1RegionalEndpoint, + envCfg.S3UsEast1RegionalEndpoint, + sharedCfg.S3UsEast1RegionalEndpoint, + endpoints.LegacyS3UsEast1Endpoint, + }) + + var ec2IMDSEndpoint string + for _, v := range []string{ + sessOpts.EC2IMDSEndpoint, + envCfg.EC2IMDSEndpoint, + sharedCfg.EC2IMDSEndpoint, + } { + if len(v) != 0 { + ec2IMDSEndpoint = v + break + } + } + + var endpointMode endpoints.EC2IMDSEndpointModeState + for _, v := range []endpoints.EC2IMDSEndpointModeState{ + sessOpts.EC2IMDSEndpointMode, + envCfg.EC2IMDSEndpointMode, + sharedCfg.EC2IMDSEndpointMode, + } { + if v != endpoints.EC2IMDSEndpointModeStateUnset { + endpointMode = v + break + } + } + + if len(ec2IMDSEndpoint) != 0 || endpointMode != endpoints.EC2IMDSEndpointModeStateUnset { + cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) + } + + cfg.EC2MetadataEnableFallback = userCfg.EC2MetadataEnableFallback + if cfg.EC2MetadataEnableFallback == nil && envCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*envCfg.EC2IMDSv1Disabled) + } + if cfg.EC2MetadataEnableFallback == nil && sharedCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*sharedCfg.EC2IMDSv1Disabled) + } + + cfg.S3UseARNRegion = userCfg.S3UseARNRegion + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &envCfg.S3UseARNRegion + } + if cfg.S3UseARNRegion == nil { + cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion + } + + for _, v := range []endpoints.DualStackEndpointState{userCfg.UseDualStackEndpoint, envCfg.UseDualStackEndpoint, sharedCfg.UseDualStackEndpoint} { + if v != endpoints.DualStackEndpointStateUnset { + cfg.UseDualStackEndpoint = v + break + } + } + + for _, v := range []endpoints.FIPSEndpointState{userCfg.UseFIPSEndpoint, envCfg.UseFIPSEndpoint, sharedCfg.UseFIPSEndpoint} { + if v != endpoints.FIPSEndpointStateUnset { + cfg.UseFIPSEndpoint = v + break + } + } + + // Configure credentials if not already set by the user when creating the Session. + // Credentials are resolved last such that all _resolved_ config values are propagated to credential providers. + // ticket: P83606045 + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } + + return nil +} + +func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetSTSEndpoint { + cfg.STSRegionalEndpoint = v + break + } + } +} + +func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) { + for _, v := range values { + if v != endpoints.UnsetS3UsEast1Endpoint { + cfg.S3UsEast1RegionalEndpoint = v + break + } + } +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, copying the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + options: s.options, + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + resolvedRegion := normalizeRegion(s.Config) + + region := aws.StringValue(s.Config.Region) + resolved, err := s.resolveEndpoint(service, region, resolvedRegion, s.Config) + if err != nil { + s.Handlers.Validate.PushBack(func(r *request.Request) { + if len(r.ClientInfo.Endpoint) != 0 { + // Error occurred while resolving endpoint, but the request + // being invoked has had an endpoint specified after the client + // was created. + return + } + r.Error = err + }) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + PartitionID: resolved.PartitionID, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, + } +} + +const ec2MetadataServiceID = "ec2metadata" + +func (s *Session) resolveEndpoint(service, region, resolvedRegion string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) { + + if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 { + return endpoints.ResolvedEndpoint{ + URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)), + SigningRegion: region, + }, nil + } + + resolved, err := cfg.EndpointResolver.EndpointFor(service, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(cfg.DisableSSL) + + opt.UseDualStack = aws.BoolValue(cfg.UseDualStack) + opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint + + opt.UseFIPSEndpoint = cfg.UseFIPSEndpoint + + // Support for STSRegionalEndpoint where the STSRegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint + + // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is + // provided in envConfig or sharedConfig with envConfig getting + // precedence. + opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + + opt.ResolvedRegion = resolvedRegion + + opt.Logger = cfg.Logger + opt.LogDeprecated = cfg.LogLevel.Matches(aws.LogDebugWithDeprecated) + }, + ) + if err != nil { + return endpoints.ResolvedEndpoint{}, err + } + + return resolved, nil +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + resolvedRegion := normalizeRegion(s.Config) + + var resolved endpoints.ResolvedEndpoint + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = aws.StringValue(s.Config.Region) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, + ResolvedRegion: resolvedRegion, + } +} + +// logDeprecatedNewSessionError function enables error handling for session +func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) { + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) +} + +// normalizeRegion resolves / normalizes the configured region (converts pseudo fips regions), and modifies the provided +// config to have the equivalent options for resolution and returns the resolved region name. +func normalizeRegion(cfg *aws.Config) (resolved string) { + const fipsInfix = "-fips-" + const fipsPrefix = "-fips" + const fipsSuffix = "fips-" + + region := aws.StringValue(cfg.Region) + + if strings.Contains(region, fipsInfix) || + strings.Contains(region, fipsPrefix) || + strings.Contains(region, fipsSuffix) { + resolved = strings.Replace(strings.Replace(strings.Replace( + region, fipsInfix, "-", -1), fipsPrefix, "", -1), fipsSuffix, "", -1) + cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + } + + return resolved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 00000000000..f3ce8183dd9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,856 @@ +package session + +import ( + "fmt" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/internal/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + roleDurationSecondsKey = "duration_seconds" // optional + + // Prefix to be used for SSO sections. These are supposed to only exist in + // the shared config file, not the credentials file. + ssoSectionPrefix = `sso-session ` + + // AWS Single Sign-On (AWS SSO) group + ssoSessionNameKey = "sso_session" + + // AWS Single Sign-On (AWS SSO) group + ssoAccountIDKey = "sso_account_id" + ssoRegionKey = "sso_region" + ssoRoleNameKey = "sso_role_name" + ssoStartURL = "sso_start_url" + + // CSM options + csmEnabledKey = `csm_enabled` + csmHostKey = `csm_host` + csmPortKey = `csm_port` + csmClientIDKey = `csm_client_id` + + // Additional Config fields + regionKey = `region` + + // custom CA Bundle filename + customCABundleKey = `ca_bundle` + + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + + // External Credential Process + credentialProcessKey = `credential_process` // optional + + // Web Identity Token File + webIdentityTokenFileKey = `web_identity_token_file` // optional + + // Additional config fields for regional or legacy endpoints + stsRegionalEndpointSharedKey = `sts_regional_endpoints` + + // Additional config fields for regional or legacy endpoints + s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` + + // S3 ARN Region Usage + s3UseARNRegionKey = "s3_use_arn_region" + + // EC2 IMDS Endpoint Mode + ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode" + + // EC2 IMDS Endpoint + ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + + // ECS IMDSv1 disable fallback + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + + // Use DualStack Endpoint Resolution + useDualStackEndpoint = "use_dualstack_endpoint" + + // Use FIPS Endpoint Resolution + useFIPSEndpointKey = "use_fips_endpoint" +) + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + Profile string + + // Credentials values from the config file. Both aws_access_key_id and + // aws_secret_access_key must be provided together in the same file to be + // considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of + // the other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + CredentialSource string + CredentialProcess string + WebIdentityTokenFile string + + // SSO session options + SSOSessionName string + SSOSession *ssoSession + + SSOAccountID string + SSORegion string + SSORoleName string + SSOStartURL string + + RoleARN string + RoleSessionName string + ExternalID string + MFASerial string + AssumeRoleDuration *time.Duration + + SourceProfileName string + SourceProfile *sharedConfig + + // Region is the region the SDK should use for looking up AWS service + // endpoints and signing requests. + // + // region + Region string + + // CustomCABundle is the file path to a PEM file the SDK will read and + // use to configure the HTTP transport with additional CA certs that are + // not present in the platforms default CA store. + // + // This value will be ignored if the file does not exist. + // + // ca_bundle + CustomCABundle string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool + + // CSM Options + CSMEnabled *bool + CSMHost string + CSMPort string + CSMClientID string + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // sts_regional_endpoints = regional + // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint` + STSRegionalEndpoint endpoints.STSRegionalEndpoint + + // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service + // + // s3_us_east_1_regional_endpoint = regional + // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint` + S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint + + // Specifies if the S3 service should allow ARNs to direct the region + // the client's requests are sent to. + // + // s3_use_arn_region=true + S3UseARNRegion bool + + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) + // + // ec2_metadata_service_endpoint_mode=IPv6 + EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + + // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode. + // + // ec2_metadata_service_endpoint=http://fd00:ec2::254 + EC2IMDSEndpoint string + + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + + // Specifies that SDK clients must resolve a dual-stack endpoint for + // services. + // + // use_dualstack_endpoint=true + UseDualStackEndpoint endpoints.DualStackEndpointState + + // Specifies that SDK clients must resolve a FIPS endpoint for + // services. + // + // use_fips_endpoint=true + UseFIPSEndpoint endpoints.FIPSEndpointState +} + +type sharedConfigFile struct { + Filename string + IniData ini.Sections +} + +// SSOSession provides the shared configuration parameters of the sso-session +// section. +type ssoSession struct { + Name string + SSORegion string + SSOStartURL string +} + +func (s *ssoSession) setFromIniSection(section ini.Section) { + updateString(&s.Name, section, ssoSessionNameKey) + updateString(&s.SSORegion, section, ssoRegionKey) + updateString(&s.SSOStartURL, section, ssoStartURL) +} + +// loadSharedConfig retrieves the configuration from the list of files using +// the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of +// A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + profiles := map[string]struct{}{} + if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil { + return sharedConfig{}, err + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { + // Skip files which can't be opened and read for whatever reason + continue + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: sections, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error { + cfg.Profile = profile + + // Trim files from the list that don't exist. + var skippedFiles int + var profileNotFoundErr error + for _, f := range files { + if err := cfg.setFromIniFile(profile, f, exOpts); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore profiles not defined in individual files. + profileNotFoundErr = err + skippedFiles++ + continue + } + return err + } + } + if skippedFiles == len(files) { + // If all files were skipped because the profile is not found, return + // the original profile not found error. + return profileNotFoundErr + } + + if _, ok := profiles[profile]; ok { + // if this is the second instance of the profile the Assume Role + // options must be cleared because they are only valid for the + // first reference of a profile. The self linked instance of the + // profile only have credential provider options. + cfg.clearAssumeRoleOptions() + } else { + // First time a profile has been seen. Assert if the credential type + // requires a role ARN, the ARN is also set + if err := cfg.validateCredentialsConfig(profile); err != nil { + return err + } + } + + profiles[profile] = struct{}{} + + if err := cfg.validateCredentialType(); err != nil { + return err + } + + // Link source profiles for assume roles + if len(cfg.SourceProfileName) != 0 { + // Linked profile via source_profile ignore credential provider + // options, the source profile must provide the credentials. + cfg.clearCredentialOptions() + + srcCfg := &sharedConfig{} + err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts) + if err != nil { + // SourceProfile that doesn't exist is an error in configuration. + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + err = SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + return err + } + + if !srcCfg.hasCredentials() { + return SharedConfigAssumeRoleError{ + RoleARN: cfg.RoleARN, + SourceProfile: cfg.SourceProfileName, + } + } + + cfg.SourceProfile = srcCfg + } + + // If the profile contains an SSO session parameter, the session MUST exist + // as a section in the config file. Load the SSO session using the name + // provided. If the session section is not found or incomplete an error + // will be returned. + if cfg.hasSSOTokenProviderConfiguration() { + skippedFiles = 0 + for _, f := range files { + section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + if ok { + var ssoSession ssoSession + ssoSession.setFromIniSection(section) + ssoSession.Name = cfg.SSOSessionName + cfg.SSOSession = &ssoSession + break + } + skippedFiles++ + } + if skippedFiles == len(files) { + // If all files were skipped because the sso session section is not found, return + // the sso section not found error. + return fmt.Errorf("failed to find SSO session section, %v", cfg.SSOSessionName) + } + } + + return nil +} + +// setFromFile loads the configuration from the file using the profile +// provided. A sharedConfig pointer type value is used so that multiple config +// file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For +// example if a config file only includes aws_access_key_id but no +// aws_secret_access_key the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error { + section, ok := file.IniData.GetSection(profile) + if !ok { + // Fallback to to alternate profile name: profile + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} + } + } + + if exOpts { + // Assume Role Parameters + updateString(&cfg.RoleARN, section, roleArnKey) + updateString(&cfg.ExternalID, section, externalIDKey) + updateString(&cfg.MFASerial, section, mfaSerialKey) + updateString(&cfg.RoleSessionName, section, roleSessionNameKey) + updateString(&cfg.SourceProfileName, section, sourceProfileKey) + updateString(&cfg.CredentialSource, section, credentialSourceKey) + updateString(&cfg.Region, section, regionKey) + updateString(&cfg.CustomCABundle, section, customCABundleKey) + + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for (aws-sdk-go-v2/#2276): + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 + if section.Has(roleDurationSecondsKey) { + var d time.Duration + if v, ok := section.Int(roleDurationSecondsKey); ok { + d = time.Duration(v) * time.Second + } + cfg.AssumeRoleDuration = &d + } + + if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 { + sre, err := endpoints.GetSTSRegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + stsRegionalEndpointSharedKey, file.Filename, err) + } + cfg.STSRegionalEndpoint = sre + } + + if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 { + sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v) + if err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + s3UsEast1RegionalSharedKey, file.Filename, err) + } + cfg.S3UsEast1RegionalEndpoint = sre + } + + // AWS Single Sign-On (AWS SSO) + // SSO session options + updateString(&cfg.SSOSessionName, section, ssoSessionNameKey) + + // AWS Single Sign-On (AWS SSO) + updateString(&cfg.SSOAccountID, section, ssoAccountIDKey) + updateString(&cfg.SSORegion, section, ssoRegionKey) + updateString(&cfg.SSORoleName, section, ssoRoleNameKey) + updateString(&cfg.SSOStartURL, section, ssoStartURL) + + if err := updateEC2MetadataServiceEndpointMode(&cfg.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil { + return fmt.Errorf("failed to load %s from shared config, %s, %v", + ec2MetadataServiceEndpointModeKey, file.Filename, err) + } + updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&cfg.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) + + updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) + + updateUseFIPSEndpoint(&cfg.UseFIPSEndpoint, section, useFIPSEndpointKey) + } + + updateString(&cfg.CredentialProcess, section, credentialProcessKey) + updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey) + + // Shared Credentials + creds := credentials.Value{ + AccessKeyID: section.String(accessKeyIDKey), + SecretAccessKey: section.String(secretAccessKey), + SessionToken: section.String(sessionTokenKey), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + if creds.HasKeys() { + cfg.Creds = creds + } + + // Endpoint discovery + updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey) + + // CSM options + updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey) + updateString(&cfg.CSMHost, section, csmHostKey) + updateString(&cfg.CSMPort, section, csmPortKey) + updateString(&cfg.CSMClientID, section, csmClientIDKey) + + updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey) + + return nil +} + +func updateEC2MetadataServiceEndpointMode(endpointMode *endpoints.EC2IMDSEndpointModeState, section ini.Section, key string) error { + if !section.Has(key) { + return nil + } + value := section.String(key) + return endpointMode.SetFromString(value) +} + +func (cfg *sharedConfig) validateCredentialsConfig(profile string) error { + if err := cfg.validateCredentialsRequireARN(profile); err != nil { + return err + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error { + var credSource string + + switch { + case len(cfg.SourceProfileName) != 0: + credSource = sourceProfileKey + case len(cfg.CredentialSource) != 0: + credSource = credentialSourceKey + case len(cfg.WebIdentityTokenFile) != 0: + credSource = webIdentityTokenFileKey + } + + if len(credSource) != 0 && len(cfg.RoleARN) == 0 { + return CredentialRequiresARNError{ + Type: credSource, + Profile: profile, + } + } + + return nil +} + +func (cfg *sharedConfig) validateCredentialType() error { + // Only one or no credential type can be defined. + if !oneOrNone( + len(cfg.SourceProfileName) != 0, + len(cfg.CredentialSource) != 0, + len(cfg.CredentialProcess) != 0, + len(cfg.WebIdentityTokenFile) != 0, + ) { + return ErrSharedConfigSourceCollision + } + + return nil +} + +func (cfg *sharedConfig) validateSSOConfiguration() error { + if cfg.hasSSOTokenProviderConfiguration() { + err := cfg.validateSSOTokenProviderConfiguration() + if err != nil { + return err + } + return nil + } + + if cfg.hasLegacySSOConfiguration() { + err := cfg.validateLegacySSOConfiguration() + if err != nil { + return err + } + } + return nil +} + +func (cfg *sharedConfig) hasCredentials() bool { + switch { + case len(cfg.SourceProfileName) != 0: + case len(cfg.CredentialSource) != 0: + case len(cfg.CredentialProcess) != 0: + case len(cfg.WebIdentityTokenFile) != 0: + case cfg.hasSSOConfiguration(): + case cfg.Creds.HasKeys(): + default: + return false + } + + return true +} + +func (cfg *sharedConfig) clearCredentialOptions() { + cfg.CredentialSource = "" + cfg.CredentialProcess = "" + cfg.WebIdentityTokenFile = "" + cfg.Creds = credentials.Value{} + cfg.SSOAccountID = "" + cfg.SSORegion = "" + cfg.SSORoleName = "" + cfg.SSOStartURL = "" +} + +func (cfg *sharedConfig) clearAssumeRoleOptions() { + cfg.RoleARN = "" + cfg.ExternalID = "" + cfg.MFASerial = "" + cfg.RoleSessionName = "" + cfg.SourceProfileName = "" +} + +func (cfg *sharedConfig) hasSSOConfiguration() bool { + return cfg.hasSSOTokenProviderConfiguration() || cfg.hasLegacySSOConfiguration() +} + +func (c *sharedConfig) hasSSOTokenProviderConfiguration() bool { + return len(c.SSOSessionName) > 0 +} + +func (c *sharedConfig) hasLegacySSOConfiguration() bool { + return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0 +} + +func (c *sharedConfig) validateSSOTokenProviderConfiguration() error { + var missing []string + + if len(c.SSOSessionName) == 0 { + missing = append(missing, ssoSessionNameKey) + } + + if c.SSOSession == nil { + missing = append(missing, ssoSectionPrefix) + } else { + if len(c.SSOSession.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOSession.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + + if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix) + } + + if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL { + return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURL, c.Profile, ssoStartURL, ssoSectionPrefix) + } + + return nil +} + +func (c *sharedConfig) validateLegacySSOConfiguration() error { + var missing []string + + if len(c.SSORegion) == 0 { + missing = append(missing, ssoRegionKey) + } + + if len(c.SSOStartURL) == 0 { + missing = append(missing, ssoStartURL) + } + + if len(c.SSOAccountID) == 0 { + missing = append(missing, ssoAccountIDKey) + } + + if len(c.SSORoleName) == 0 { + missing = append(missing, ssoRoleNameKey) + } + + if len(missing) > 0 { + return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s", + c.Profile, strings.Join(missing, ", ")) + } + return nil +} + +func oneOrNone(bs ...bool) bool { + var count int + + for _, b := range bs { + if b { + count++ + if count > 1 { + return false + } + } + } + + return true +} + +// updateString will only update the dst with the value in the section key, key +// is present in the section. +func updateString(dst *string, section ini.Section, key string) { + if !section.Has(key) { + return + } + *dst = section.String(key) +} + +// updateBool will only update the dst with the value in the section key, key +// is present in the section. +func updateBool(dst *bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v +} + +// updateBoolPtr will only update the dst with the value in the section key, +// key is present in the section. +func updateBoolPtr(dst **bool, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = new(bool) + **dst = v +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string + SourceProfile string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf( + "failed to load assume role for %s, source profile %s has no shared credentials", + e.RoleARN, e.SourceProfile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// CredentialRequiresARNError provides the error for shared config credentials +// that are incorrectly configured in the shared config or credentials file. +type CredentialRequiresARNError struct { + // type of credentials that were configured. + Type string + + // Profile name the credentials were in. + Profile string +} + +// Code is the short id of the error. +func (e CredentialRequiresARNError) Code() string { + return "CredentialRequiresARNError" +} + +// Message is the description of the error +func (e CredentialRequiresARNError) Message() string { + return fmt.Sprintf( + "credential type %s requires role_arn, profile %s", + e.Type, e.Profile, + ) +} + +// OrigErr is the underlying error that caused the failure. +func (e CredentialRequiresARNError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e CredentialRequiresARNError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = endpoints.DualStackEndpointStateEnabled + } else { + *dst = endpoints.DualStackEndpointStateDisabled + } + + return +} + +// updateEndpointDiscoveryType will only update the dst with the value in the section, if +// a valid key and corresponding EndpointDiscoveryType is found. +func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section, key string) { + if !section.Has(key) { + return + } + + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { + *dst = endpoints.FIPSEndpointStateEnabled + } else { + *dst = endpoints.FIPSEndpointStateDisabled + } + + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 00000000000..9937538317a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,81 @@ +package v4 + +import ( + "github.com/aws/aws-sdk-go/internal/strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// allowList is a generic rule for allow listing +type allowList struct { + rule +} + +// IsValid for allow list checks if the value is within the allow list +func (w allowList) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// excludeList is a generic rule for exclude listing +type excludeList struct { + rule +} + +// IsValid for exclude list checks if the value is within the exclude list +func (b excludeList) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefixFold(value, pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 00000000000..6aa2ed241bb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go new file mode 100644 index 00000000000..cf672b6ac46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go @@ -0,0 +1,14 @@ +//go:build !go1.7 +// +build !go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return aws.BackgroundContext() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go new file mode 100644 index 00000000000..21fe74e6fac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go @@ -0,0 +1,14 @@ +//go:build go1.7 +// +build go1.7 + +package v4 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws" +) + +func requestContext(r *http.Request) aws.Context { + return r.Context() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go new file mode 100644 index 00000000000..02cbd97e234 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go @@ -0,0 +1,63 @@ +package v4 + +import ( + "encoding/hex" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +type credentialValueProvider interface { + Get() (credentials.Value, error) +} + +// StreamSigner implements signing of event stream encoded payloads +type StreamSigner struct { + region string + service string + + credentials credentialValueProvider + + prevSig []byte +} + +// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages +func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner { + return &StreamSigner{ + region: region, + service: service, + credentials: credentials, + prevSig: seedSignature, + } +} + +// GetSignature takes an event stream encoded headers and payload and returns a signature +func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) { + credValue, err := s.credentials.Get() + if err != nil { + return nil, err + } + + sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date) + + keyPath := buildSigningScope(s.region, s.service, date) + + stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date) + + signature := hmacSHA256(sigKey, []byte(stringToSign)) + s.prevSig = signature + + return signature, nil +} + +func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string { + return strings.Join([]string{ + "AWS4-HMAC-SHA256-PAYLOAD", + formatTime(date), + scope, + hex.EncodeToString(prevSig), + hex.EncodeToString(hashSHA256(headers)), + hex.EncodeToString(hashSHA256(payload)), + }, "\n") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 00000000000..7711ec7377f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,25 @@ +//go:build go1.5 +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 00000000000..b542df93156 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,857 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// # Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you may need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authorizationHeader = "Authorization" + authHeaderSignatureElem = "Signature=" + signatureQueryKey = "X-Amz-Signature" + + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + awsV4Request = "aws4_request" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + excludeList{ + mapRule{ + authorizationHeader: struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a allow list for build canonical headers. +var requiredSignedHeaders = rules{ + allowList{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Expected-Bucket-Owner": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Context": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, + patterns{"X-Amz-Object-Lock-"}, +} + +// allowedHoisting is a allow list for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + excludeList{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disables the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, false, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, true, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: isPresign, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r)) + if err != nil { + return http.Header{}, err + } + + ctx.sanitizeHostForHeader() + ctx.assignAmzQueryValues() + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler should only be used with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + curTime := curTimeFn() + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTime +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + if err := ctx.buildBodyDigest(); err != nil { + return err + } + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + authHeaderSignatureElem + ctx.signature, + } + ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", ")) + } + + return nil +} + +// GetSignedRequestSignature attempts to extract the signature of the request. +// Returning an error if the request is unsigned, or unable to extract the +// signature. +func GetSignedRequestSignature(r *http.Request) ([]byte, error) { + + if auth := r.Header.Get(authorizationHeader); len(auth) != 0 { + ps := strings.Split(auth, ", ") + for _, p := range ps { + if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 { + sig := p[len(authHeaderSignatureElem):] + if len(sig) == 0 { + return nil, fmt.Errorf("invalid request signature authorization header") + } + return hex.DecodeString(sig) + } + } + } + + if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 { + return hex.DecodeString(sig) + } + + return nil, fmt.Errorf("request not signed") +} + +func (ctx *signingCtx) buildTime() { + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time)) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time)) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time) + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + if !r.IsValid(k) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerItems := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + if ctx.Request.Host != "" { + headerItems[i] = "host:" + ctx.Request.Host + } else { + headerItems[i] = "host:" + ctx.Request.URL.Host + } + } else { + headerValues := make([]string, len(ctx.SignedHeaderVals[k])) + for i, v := range ctx.SignedHeaderVals[k] { + headerValues[i] = strings.TrimSpace(v) + } + headerItems[i] = k + ":" + + strings.Join(headerValues, ",") + } + } + stripExcessSpaces(headerItems) + ctx.canonicalHeaders = strings.Join(headerItems, "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + formatTime(ctx.Time), + ctx.credentialString, + hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time) + signature := hmacSHA256(creds, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() error { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda" || + ctx.ServiceName == "glacier" || + ctx.ServiceName == "s3-outposts" + + s3Presign := ctx.isPresign && + (ctx.ServiceName == "s3" || + ctx.ServiceName == "s3-object-lambda") + + if ctx.unsignedPayload || s3Presign { + hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } + hashBytes, err := makeSha256Reader(ctx.Body) + if err != nil { + return err + } + hash = hex.EncodeToString(hashBytes) + } + + if includeSHA256Header { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash + + return nil +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func hmacSHA256(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func hashSHA256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) { + hash := sha256.New() + start, err := reader.Seek(0, sdkio.SeekCurrent) + if err != nil { + return nil, err + } + defer func() { + // ensure error is return if unable to seek back to start of payload. + _, err = reader.Seek(start, sdkio.SeekStart) + }() + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } + + return hash.Sum(nil), nil +} + +const doubleSpace = " " + +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } + + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] + + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } + + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ + } + spaces++ + } else { + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ + } + } + + vals[i] = string(buf[:m]) + } +} + +func buildSigningScope(region, service string, dt time.Time) string { + return strings.Join([]string{ + formatShortTime(dt), + region, + service, + awsV4Request, + }, "/") +} + +func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte { + kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt))) + kRegion := hmacSHA256(kDate, []byte(region)) + kService := hmacSHA256(kRegion, []byte(service)) + signingKey := hmacSHA256(kService, []byte(awsV4Request)) + return signingKey +} + +func formatShortTime(dt time.Time) string { + return dt.UTC().Format(shortTimeFormat) +} + +func formatTime(dt time.Time) string { + return dt.UTC().Format(timeFormat) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 00000000000..98751ee84f2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,264 @@ +package aws + +import ( + "io" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. +// +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} + +// MultiCloser is a utility to close multiple io.Closers within a single +// statement. +type MultiCloser []io.Closer + +// Close closes all of the io.Closers making up the MultiClosers. Any +// errors that occur while closing will be returned in the order they +// occur. +func (m MultiCloser) Close() error { + var errs errors + for _, c := range m { + err := c.Close() + if err != nil { + errs = append(errs, err) + } + } + if len(errs) != 0 { + return errs + } + + return nil +} + +type errors []error + +func (es errors) Error() string { + var parts []string + for _, e := range es { + parts = append(parts, e.Error()) + } + + return strings.Join(parts, "\n") +} + +// CopySeekableBody copies the seekable body to an io.Writer +func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // copy errors may be assumed to be from the body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + // seek back to the first position after reading to reset + // the body for transmission. + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 00000000000..fed561bd597 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,13 @@ +//go:build go1.8 +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 00000000000..95282db03b8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,30 @@ +//go:build !go1.8 +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 00000000000..a10b0af52a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.51.14" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go new file mode 100644 index 00000000000..365345353e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go @@ -0,0 +1,41 @@ +//go:build !go1.7 +// +build !go1.7 + +package context + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case BackgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +// BackgroundCtx is the common base context. +var BackgroundCtx = new(emptyCtx) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 00000000000..e83a99886bc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 00000000000..0895d53cbe6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 00000000000..0b76999ba1f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 00000000000..1e55bbd07b9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,42 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> section | stmt' +// stmt' -> epsilon | expr +// expr -> value (stmt)* | equal_expr (stmt)* +// equal_expr -> value ( ':' | '=' ) equal_expr' +// equal_expr' -> number | string | quoted_string +// quoted_string -> " quoted_string' +// quoted_string' -> string quoted_string_end +// quoted_string_end -> " +// +// section -> [ section' +// section' -> section_value section_close +// section_value -> number | string_subset | boolean | quoted_string_subset +// quoted_string_subset -> " quoted_string_subset' +// quoted_string_subset' -> string_subset quoted_string_end +// quoted_string_subset -> " +// section_close -> ] +// +// value -> number | string_subset | boolean +// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ? +// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ? +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 00000000000..04345a54c20 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 00000000000..91ba2a59dd5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 00000000000..6e545b63bc4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,18 @@ +//go:build gofuzz +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 00000000000..3b0ca7afe3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 00000000000..582c024ad15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 00000000000..0ba319491c0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,350 @@ +package ini + +import ( + "fmt" + "io" +) + +// ParseState represents the current state of the parser. +type ParseState uint + +// State enums for the parse table +const ( + InvalidState ParseState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]ParseState{ + ASTKindStart: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: { + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + TokenNone: SkipState, + }, + ASTKindStatement: { + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: { + TokenLit: ValueState, + TokenSep: ValueState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: { + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: { + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: { + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + // if should skip is true, we skip the tokens until should skip is set to false. + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + switch k.Kind { + case ASTKindEqualExpr: + // assigning a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + root.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + // If OpenScopeState is not at the start, we must mark the previous ast as complete + // + // for example: if previous ast was a skip statement; + // we should mark it as complete before we create a new statement + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 00000000000..b1b686086a9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,337 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isCaselessLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. +func isCaselessLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != unicode.ToLower(have[i]) { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType // deprecated + IntegerType // deprecated + StringType + QuotedStringType + BoolType // deprecated +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 // deprecated + decimal float64 // deprecated + boolean bool // deprecated + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = isCaselessLitValue(runesTrue, v.raw) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.raw), 0, 64) + if err != nil { + return 0, false + } + return i, true +} + +// FloatValue returns a float value +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.raw), 64) + if err != nil { + return 0, false + } + return f, true +} + +// BoolValue returns a bool value +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if isCaselessLitValue(runesTrue, v.raw) { + return true, true + } else if isCaselessLitValue(runesFalse, v.raw) { + return false, true + } + return false, false +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 00000000000..e52ac399f17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 00000000000..a45c0bc5662 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 00000000000..8a84c7cbe08 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 00000000000..45728701931 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 00000000000..7f01cf7c703 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 00000000000..f82095ba259 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 00000000000..da7a4049cfa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + // should skip state will be modified only if previous token was new line (NL); + // and the current token is not WhiteSpace (WS). + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + s.Continue() + return false + } + s.prevTok = tok + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true +} + +func (s *skipper) Continue() { + s.shouldSkip = false + // empty token is assigned as we return to default state, when should skip is false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 00000000000..18f3fe89317 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 00000000000..b5480fdeb35 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isCaselessLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 00000000000..1d08e138aba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,169 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values. + // If the token is not either a literal or one of the token types that identifies those four additional + // tokens then error. + if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) (bool, bool) { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) (int64, bool) { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) (float64, bool) { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 00000000000..99915f7f777 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 00000000000..7ffb4ae06ff --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go new file mode 100644 index 00000000000..bf18031a38e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/accesspoint_arn.go @@ -0,0 +1,50 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// AccessPointARN provides representation +type AccessPointARN struct { + arn.ARN + AccessPointName string +} + +// GetARN returns the base ARN for the Access Point resource +func (a AccessPointARN) GetARN() arn.ARN { + return a.ARN +} + +// ParseAccessPointResource attempts to parse the ARN's resource as an +// AccessPoint resource. +// +// Supported Access point resource format: +// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName} +// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint +// +func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) { + if len(a.Region) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"} + } + if len(a.AccountID) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + if len(resParts) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + if len(resParts) > 1 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + resID := resParts[0] + if len(strings.TrimSpace(resID)) == 0 { + return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"} + } + + return AccessPointARN{ + ARN: a, + AccessPointName: resID, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go new file mode 100644 index 00000000000..216c4baabfe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/arn.go @@ -0,0 +1,94 @@ +package arn + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +var supportedServiceARN = []string{ + "s3", + "s3-outposts", + "s3-object-lambda", +} + +func isSupportedServiceARN(service string) bool { + for _, name := range supportedServiceARN { + if name == service { + return true + } + } + return false +} + +// Resource provides the interfaces abstracting ARNs of specific resource +// types. +type Resource interface { + GetARN() arn.ARN + String() string +} + +// ResourceParser provides the function for parsing an ARN's resource +// component into a typed resource. +type ResourceParser func(arn.ARN) (Resource, error) + +// ParseResource parses an AWS ARN into a typed resource for the S3 API. +func ParseResource(s string, resParser ResourceParser) (resARN Resource, err error) { + a, err := arn.Parse(s) + if err != nil { + return nil, err + } + + if len(a.Partition) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "partition not set"} + } + + if !isSupportedServiceARN(a.Service) { + return nil, InvalidARNError{ARN: a, Reason: "service is not supported"} + } + + if strings.HasPrefix(a.Region, "fips-") || strings.HasSuffix(a.Region, "-fips") { + return nil, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"} + } + + if len(a.Resource) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "resource not set"} + } + + return resParser(a) +} + +// SplitResource splits the resource components by the ARN resource delimiters. +func SplitResource(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + + return parts +} + +// IsARN returns whether the given string is an ARN +func IsARN(s string) bool { + return arn.IsARN(s) +} + +// InvalidARNError provides the error for an invalid ARN error. +type InvalidARNError struct { + ARN arn.ARN + Reason string +} + +// Error returns a string denoting the occurred InvalidARNError +func (e InvalidARNError) Error() string { + return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go new file mode 100644 index 00000000000..1e10f8de00b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/outpost_arn.go @@ -0,0 +1,126 @@ +package arn + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" +) + +// OutpostARN interface that should be satisfied by outpost ARNs +type OutpostARN interface { + Resource + GetOutpostID() string +} + +// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format +// and return a specific OutpostARN type +// +// Currently supported outpost ARN formats: +// * Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +// +// * Outpost Bucket ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket +// +// Other outpost ARN formats may be supported and added in the future. +// +func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) { + if len(a.Region) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "region not set"} + } + + if len(a.AccountID) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "account-id not set"} + } + + // verify if outpost id is present and valid + if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 { + return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + // verify possible resource type exists + if len(resParts) < 3 { + return nil, InvalidARNError{ + ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present", + } + } + + // Since we know this is a OutpostARN fetch outpostID + outpostID := strings.TrimSpace(resParts[0]) + + switch resParts[1] { + case "accesspoint": + accesspointARN, err := ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return OutpostAccessPointARN{}, err + } + return OutpostAccessPointARN{ + AccessPointARN: accesspointARN, + OutpostID: outpostID, + }, nil + + case "bucket": + bucketName, err := parseBucketResource(a, resParts[2:]) + if err != nil { + return nil, err + } + return OutpostBucketARN{ + ARN: a, + BucketName: bucketName, + OutpostID: outpostID, + }, nil + + default: + return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"} + } +} + +// OutpostAccessPointARN represents outpost access point ARN. +type OutpostAccessPointARN struct { + AccessPointARN + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost access point arn +func (o OutpostAccessPointARN) GetOutpostID() string { + return o.OutpostID +} + +// OutpostBucketARN represents the outpost bucket ARN. +type OutpostBucketARN struct { + arn.ARN + BucketName string + OutpostID string +} + +// GetOutpostID returns the outpost id of outpost bucket arn +func (o OutpostBucketARN) GetOutpostID() string { + return o.OutpostID +} + +// GetARN retrives the base ARN from outpost bucket ARN resource +func (o OutpostBucketARN) GetARN() arn.ARN { + return o.ARN +} + +// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the +// bucket resource id. +// +// parseBucketResource only parses the bucket resource id. +// +func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) { + if len(resParts) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + if len(resParts) > 1 { + return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"} + } + + bucketName = strings.TrimSpace(resParts[0]) + if len(bucketName) == 0 { + return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"} + } + return bucketName, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go new file mode 100644 index 00000000000..513154cc0e3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/arn/s3_object_lambda_arn.go @@ -0,0 +1,15 @@ +package arn + +// S3ObjectLambdaARN represents an ARN for the s3-object-lambda service +type S3ObjectLambdaARN interface { + Resource + + isS3ObjectLambdasARN() +} + +// S3ObjectLambdaAccessPointARN is an S3ObjectLambdaARN for the Access Point resource type +type S3ObjectLambdaAccessPointARN struct { + AccessPointARN +} + +func (s S3ObjectLambdaAccessPointARN) isS3ObjectLambdasARN() {} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go new file mode 100644 index 00000000000..4290ff67601 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/endpoint_errors.go @@ -0,0 +1,202 @@ +package s3shared + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +const ( + invalidARNErrorErrCode = "InvalidARNError" + configurationErrorErrCode = "ConfigurationError" +) + +// InvalidARNError denotes the error for Invalid ARN +type InvalidARNError struct { + message string + resource arn.Resource + origErr error +} + +// Error returns the InvalidARNError +func (e InvalidARNError) Error() string { + var extra string + if e.resource != nil { + extra = "ARN: " + e.resource.String() + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns the invalid ARN error code +func (e InvalidARNError) Code() string { + return invalidARNErrorErrCode +} + +// Message returns the message for Invalid ARN error +func (e InvalidARNError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Invalid ARN Error +func (e InvalidARNError) OrigErr() error { + return e.origErr +} + +// NewInvalidARNError denotes invalid arn error +func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "invalid ARN", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints +func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported with custom client endpoints", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition +func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for the target ARN partition", + origErr: err, + resource: resource, + } +} + +// NewInvalidARNWithFIPSError ARN not supported for FIPS region +// +// Deprecated: FIPS will not appear in the ARN region component. +func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError { + return InvalidARNError{ + message: "resource ARN not supported for FIPS region", + resource: resource, + origErr: err, + } +} + +// ConfigurationError is used to denote a client configuration error +type ConfigurationError struct { + message string + resource arn.Resource + clientPartitionID string + clientRegion string + origErr error +} + +// Error returns the Configuration error string +func (e ConfigurationError) Error() string { + extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s", + e.resource, e.clientPartitionID, e.clientRegion) + + return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr) +} + +// Code returns configuration error's error-code +func (e ConfigurationError) Code() string { + return configurationErrorErrCode +} + +// Message returns the configuration error message +func (e ConfigurationError) Message() string { + return e.message +} + +// OrigErr is the original error wrapped by Configuration Error +func (e ConfigurationError) OrigErr() error { + return e.origErr +} + +// NewClientPartitionMismatchError stub +func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client partition does not match provided ARN partition", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientRegionMismatchError denotes cross region access error +func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client region does not match provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFailedToResolveEndpointError denotes endpoint resolving error +func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "endpoint resolver failed to find an endpoint for the provided ARN region", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access +func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for fips but cross-region resource ARN provided", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewFIPSConfigurationError denotes a configuration error when a client or request is configured for FIPS +func NewFIPSConfigurationError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "use of ARN is not supported when client or request is configured for FIPS", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate +func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Accelerate but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request +func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} + +// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack +func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError { + return ConfigurationError{ + message: "client configured for S3 Dual-stack but is not supported with resource ARN", + origErr: err, + resource: resource, + clientPartitionID: clientPartitionID, + clientRegion: clientRegion, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go new file mode 100644 index 00000000000..ef43d6c5896 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/resource_request.go @@ -0,0 +1,45 @@ +package s3shared + +import ( + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +// ResourceRequest represents the request and arn resource +type ResourceRequest struct { + Resource arn.Resource + Request *request.Request +} + +// ARN returns the resource ARN +func (r ResourceRequest) ARN() awsarn.ARN { + return r.Resource.GetARN() +} + +// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set +func (r ResourceRequest) AllowCrossRegion() bool { + return aws.BoolValue(r.Request.Config.S3UseARNRegion) +} + +// IsCrossPartition returns true if client is configured for another partition, than +// the partition that resource ARN region resolves to. +func (r ResourceRequest) IsCrossPartition() bool { + return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition +} + +// IsCrossRegion returns true if ARN region is different than client configured region +func (r ResourceRequest) IsCrossRegion() bool { + return IsCrossRegion(r.Request, r.Resource.GetARN().Region) +} + +// HasCustomEndpoint returns true if custom client endpoint is provided +func (r ResourceRequest) HasCustomEndpoint() bool { + return len(aws.StringValue(r.Request.Config.Endpoint)) > 0 +} + +// IsCrossRegion returns true if request signing region is not same as configured region +func IsCrossRegion(req *request.Request, otherRegion string) bool { + return req.ClientInfo.SigningRegion != otherRegion +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go new file mode 100644 index 00000000000..0b9b0dfce04 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3shared/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go new file mode 100644 index 00000000000..6c443988bbc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go @@ -0,0 +1,12 @@ +package sdkio + +const ( + // Byte is 8 bits + Byte int64 = 1 + // KibiByte (KiB) is 1024 Bytes + KibiByte = Byte * 1024 + // MebiByte (MiB) is 1024 KiB + MebiByte = KibiByte * 1024 + // GibiByte (GiB) is 1024 MiB + GibiByte = MebiByte * 1024 +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 00000000000..037a998c4c3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,11 @@ +//go:build !go1.7 +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 00000000000..65e7c60c4de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,13 @@ +//go:build go1.7 +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go new file mode 100644 index 00000000000..a8452878324 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go @@ -0,0 +1,16 @@ +//go:build go1.10 +// +build go1.10 + +package sdkmath + +import "math" + +// Round returns the nearest integer, rounding half away from zero. +// +// Special cases are: +// Round(±0) = ±0 +// Round(±Inf) = ±Inf +// Round(NaN) = NaN +func Round(x float64) float64 { + return math.Round(x) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go new file mode 100644 index 00000000000..a3ae3e5dba8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go @@ -0,0 +1,57 @@ +//go:build !go1.10 +// +build !go1.10 + +package sdkmath + +import "math" + +// Copied from the Go standard library's (Go 1.12) math/floor.go for use in +// Go version prior to Go 1.10. +const ( + uvone = 0x3FF0000000000000 + mask = 0x7FF + shift = 64 - 11 - 1 + bias = 1023 + signMask = 1 << 63 + fracMask = 1<= 0.5 { + // return t + Copysign(1, x) + // } + // return t + // } + bits := math.Float64bits(x) + e := uint(bits>>shift) & mask + if e < bias { + // Round abs(x) < 1 including denormals. + bits &= signMask // +-0 + if e == bias-1 { + bits |= uvone // +-1 + } + } else if e < bias+shift { + // Round any abs(x) >= 1 containing a fractional component [0,1). + // + // Numbers with larger exponents are returned unchanged since they + // must be either an integer, infinity, or NaN. + const half = 1 << (shift - 1) + e -= bias + bits += half >> e + bits &^= fracMask >> e + } + return math.Float64frombits(bits) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 00000000000..0c9802d8770 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go new file mode 100644 index 00000000000..4bae66ceed5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go @@ -0,0 +1,12 @@ +//go:build go1.6 +// +build go1.6 + +package sdkrand + +import "math/rand" + +// Read provides the stub for math.Rand.Read method support for go version's +// 1.6 and greater. +func Read(r *rand.Rand, p []byte) (int, error) { + return r.Read(p) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go new file mode 100644 index 00000000000..3a6ab882516 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go @@ -0,0 +1,25 @@ +//go:build !go1.6 +// +build !go1.6 + +package sdkrand + +import "math/rand" + +// Read backfills Go 1.6's math.Rand.Reader for Go 1.5 +func Read(r *rand.Rand, p []byte) (n int, err error) { + // Copy of Go standard libraries math package's read function not added to + // standard library until Go 1.6. + var pos int8 + var val int64 + for n = 0; n < len(p); n++ { + if pos == 0 { + val = r.Int63() + pos = 7 + } + p[n] = byte(val) + val >>= 8 + pos-- + } + + return n, err +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 00000000000..38ea61afeaa --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 00000000000..7da8a49ce52 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 00000000000..34fea49ca81 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,46 @@ +package shareddefaults + +import ( + "os/user" + "path/filepath" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + var home string + + home = userHomeDir() + if len(home) > 0 { + return home + } + + currUser, _ := user.Current() + if currUser != nil { + home = currUser.HomeDir + } + + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go new file mode 100644 index 00000000000..eb298ae0fc1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go @@ -0,0 +1,18 @@ +//go:build !go1.12 +// +build !go1.12 + +package shareddefaults + +import ( + "os" + "runtime" +) + +func userHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go new file mode 100644 index 00000000000..51541b50876 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go @@ -0,0 +1,13 @@ +//go:build go1.12 +// +build go1.12 + +package shareddefaults + +import ( + "os" +) + +func userHomeDir() string { + home, _ := os.UserHomeDir() + return home +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go new file mode 100644 index 00000000000..d008ae27cb3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go @@ -0,0 +1,11 @@ +package strings + +import ( + "strings" +) + +// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings, +// under Unicode case-folding. +func HasPrefixFold(s, prefix string) bool { + return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE new file mode 100644 index 00000000000..6a66aea5eaf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go new file mode 100644 index 00000000000..14ad0c58911 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go @@ -0,0 +1,120 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // forgotten indicates whether Forget was called with this call's key + // while the call was still in flight. + forgotten bool + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + if !c.forgotten { + delete(g.m, key) + } + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + if c, ok := g.m[key]; ok { + c.forgotten = true + } + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go new file mode 100644 index 00000000000..e045f38d837 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/checksum/content_md5.go @@ -0,0 +1,53 @@ +package checksum + +import ( + "crypto/md5" + "encoding/base64" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const contentMD5Header = "Content-Md5" + +// AddBodyContentMD5Handler computes and sets the HTTP Content-MD5 header for requests that +// require it. +func AddBodyContentMD5Handler(r *request.Request) { + // if Content-MD5 header is already present, return + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) != 0 { + return + } + + // if S3DisableContentMD5Validation flag is set, return + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + + // if request is presigned, return + if r.IsPresigned() { + return + } + + // if body is not seekable, return + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + h := md5.New() + + if _, err := aws.CopySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 00000000000..151054971a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + *hs = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 00000000000..47433939189 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,216 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader, opts ...func(*Decoder)) *Decoder { + d := &Decoder{ + r: r, + } + + for _, opt := range opts { + opt(d) + } + + return d +} + +// DecodeWithLogger adds a logger to be used by the decoder when decoding +// stream events. +func DecodeWithLogger(logger aws.Logger) func(*Decoder) { + return func(d *Decoder) { + d.logger = logger + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + m, err = Decode(reader, payloadBuf) + + return m, err +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the reader. +func Decode(reader io.Reader, payloadBuf []byte) (m Message, err error) { + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 00000000000..ffade3bc0c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,162 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + logger aws.Logger + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer, opts ...func(*Encoder)) *Encoder { + e := &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// EncodeWithLogger adds a logger to be used by the encode when decoding +// stream events. +func EncodeWithLogger(logger aws.Logger) func(*Encoder) { + return func(d *Encoder) { + d.logger = logger + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) (err error) { + e.headersBuf.Reset() + + writer := e.w + if e.logger != nil { + encodeMsgBuf := bytes.NewBuffer(nil) + writer = io.MultiWriter(writer, encodeMsgBuf) + defer func() { + logMessageEncode(e.logger, encodeMsgBuf, msg, err) + }() + } + + if err = EncodeHeaders(e.headersBuf, msg.Headers); err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(writer, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err = encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err = io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err = hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(writer, binary.BigEndian, msgCRC) +} + +func logMessageEncode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, encodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Message to encode:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(msg); err != nil { + fmt.Fprintf(w, "Failed to get encoded message, %v\n", err) + } + + if encodeErr != nil { + fmt.Fprintf(w, "Encode error: %v\n", encodeErr) + return + } + + fmt.Fprintf(w, "Raw message:\n%s\n", hex.Dump(msgBuf.Bytes())) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +// EncodeHeaders writes the header values to the writer encoded in the event +// stream format. Returns an error if a header fails to encode. +func EncodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 00000000000..5481ef30796 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 00000000000..0a63340e41d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,81 @@ +package eventstreamapi + +import ( + "fmt" + "sync" +) + +// InputWriterCloseErrorCode is used to denote an error occurred +// while closing the event stream input writer. +const InputWriterCloseErrorCode = "EventStreamInputWriterCloseError" + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} + +// OnceError wraps the behavior of recording an error +// once and signal on a channel when this has occurred. +// Signaling is done by closing of the channel. +// +// Type is safe for concurrent usage. +type OnceError struct { + mu sync.RWMutex + err error + ch chan struct{} +} + +// NewOnceError return a new OnceError +func NewOnceError() *OnceError { + return &OnceError{ + ch: make(chan struct{}, 1), + } +} + +// Err acquires a read-lock and returns an +// error if one has been set. +func (e *OnceError) Err() error { + e.mu.RLock() + err := e.err + e.mu.RUnlock() + + return err +} + +// SetError acquires a write-lock and will set +// the underlying error value if one has not been set. +func (e *OnceError) SetError(err error) { + if err == nil { + return + } + + e.mu.Lock() + if e.err == nil { + e.err = err + close(e.ch) + } + e.mu.Unlock() +} + +// ErrorSet returns a channel that will be used to signal +// that an error has been set. This channel will be closed +// when the error value has been set for OnceError. +func (e *OnceError) ErrorSet() <-chan struct{} { + return e.ch +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go new file mode 100644 index 00000000000..0e4aa42f3e4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/reader.go @@ -0,0 +1,173 @@ +package eventstreamapi + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + decoder *eventstream.Decoder, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + decoder: decoder, + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + return nil, r.unmarshalEventException(msg) + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, &UnknownMessageTypeError{ + Type: typ, Message: msg.Clone(), + } + } +} + +// UnknownMessageTypeError provides an error when a message is received from +// the stream, but the reader is unable to determine what kind of message it is. +type UnknownMessageTypeError struct { + Type string + Message eventstream.Message +} + +func (e *UnknownMessageTypeError) Error() string { + return "unknown eventstream message type, " + e.Type +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go new file mode 100644 index 00000000000..e46b8acc200 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/shared.go @@ -0,0 +1,23 @@ +package eventstreamapi + +// EventStream headers with specific meaning to async API functionality. +const ( + ChunkSignatureHeader = `:chunk-signature` // chunk signature for message + DateHeader = `:date` // Date header for signature + + // Message header and values + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go new file mode 100644 index 00000000000..3a7ba5cd57a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/signer.go @@ -0,0 +1,123 @@ +package eventstreamapi + +import ( + "bytes" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +var timeNow = time.Now + +// StreamSigner defines an interface for the implementation of signing of event stream payloads +type StreamSigner interface { + GetSignature(headers, payload []byte, date time.Time) ([]byte, error) +} + +// SignEncoder envelopes event stream messages +// into an event stream message payload with included +// signature headers using the provided signer and encoder. +type SignEncoder struct { + signer StreamSigner + encoder Encoder + bufEncoder *BufferEncoder + + closeErr error + closed bool +} + +// NewSignEncoder returns a new SignEncoder using the provided stream signer and +// event stream encoder. +func NewSignEncoder(signer StreamSigner, encoder Encoder) *SignEncoder { + // TODO: Need to pass down logging + + return &SignEncoder{ + signer: signer, + encoder: encoder, + bufEncoder: NewBufferEncoder(), + } +} + +// Close encodes a final event stream signing envelope with an empty event stream +// payload. This final end-frame is used to mark the conclusion of the stream. +func (s *SignEncoder) Close() error { + if s.closed { + return s.closeErr + } + + if err := s.encode([]byte{}); err != nil { + if strings.Contains(err.Error(), "on closed pipe") { + return nil + } + + s.closeErr = err + s.closed = true + return s.closeErr + } + + return nil +} + +// Encode takes the provided message and add envelopes the message +// with the required signature. +func (s *SignEncoder) Encode(msg eventstream.Message) error { + payload, err := s.bufEncoder.Encode(msg) + if err != nil { + return err + } + + return s.encode(payload) +} + +func (s SignEncoder) encode(payload []byte) error { + date := timeNow() + + var msg eventstream.Message + msg.Headers.Set(DateHeader, eventstream.TimestampValue(date)) + msg.Payload = payload + + var headers bytes.Buffer + if err := eventstream.EncodeHeaders(&headers, msg.Headers); err != nil { + return err + } + + sig, err := s.signer.GetSignature(headers.Bytes(), msg.Payload, date) + if err != nil { + return err + } + + msg.Headers.Set(ChunkSignatureHeader, eventstream.BytesValue(sig)) + + return s.encoder.Encode(msg) +} + +// BufferEncoder is a utility that provides a buffered +// event stream encoder +type BufferEncoder struct { + encoder Encoder + buffer *bytes.Buffer +} + +// NewBufferEncoder returns a new BufferEncoder initialized +// with a 1024 byte buffer. +func NewBufferEncoder() *BufferEncoder { + buf := bytes.NewBuffer(make([]byte, 1024)) + return &BufferEncoder{ + encoder: eventstream.NewEncoder(buf), + buffer: buf, + } +} + +// Encode returns the encoded message as a byte slice. +// The returned byte slice will be modified on the next encode call +// and should not be held onto. +func (e *BufferEncoder) Encode(msg eventstream.Message) ([]byte, error) { + e.buffer.Reset() + + if err := e.encoder.Encode(msg); err != nil { + return nil, err + } + + return e.buffer.Bytes(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go new file mode 100644 index 00000000000..433bb1630a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/stream_writer.go @@ -0,0 +1,129 @@ +package eventstreamapi + +import ( + "fmt" + "io" + "sync" + + "github.com/aws/aws-sdk-go/aws" +) + +// StreamWriter provides concurrent safe writing to an event stream. +type StreamWriter struct { + eventWriter *EventWriter + stream chan eventWriteAsyncReport + + done chan struct{} + closeOnce sync.Once + err *OnceError + + streamCloser io.Closer +} + +// NewStreamWriter returns a StreamWriter for the event writer, and stream +// closer provided. +func NewStreamWriter(eventWriter *EventWriter, streamCloser io.Closer) *StreamWriter { + w := &StreamWriter{ + eventWriter: eventWriter, + streamCloser: streamCloser, + stream: make(chan eventWriteAsyncReport), + done: make(chan struct{}), + err: NewOnceError(), + } + go w.writeStream() + + return w +} + +// Close terminates the writers ability to write new events to the stream. Any +// future call to Send will fail with an error. +func (w *StreamWriter) Close() error { + w.closeOnce.Do(w.safeClose) + return w.Err() +} + +func (w *StreamWriter) safeClose() { + close(w.done) +} + +// ErrorSet returns a channel which will be closed +// if an error occurs. +func (w *StreamWriter) ErrorSet() <-chan struct{} { + return w.err.ErrorSet() +} + +// Err returns any error that occurred while attempting to write an event to the +// stream. +func (w *StreamWriter) Err() error { + return w.err.Err() +} + +// Send writes a single event to the stream returning an error if the write +// failed. +// +// Send may be called concurrently. Events will be written to the stream +// safely. +func (w *StreamWriter) Send(ctx aws.Context, event Marshaler) error { + if err := w.Err(); err != nil { + return err + } + + resultCh := make(chan error) + wrapped := eventWriteAsyncReport{ + Event: event, + Result: resultCh, + } + + select { + case w.stream <- wrapped: + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } + + select { + case err := <-resultCh: + return err + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return fmt.Errorf("stream closed, unable to send event") + } +} + +func (w *StreamWriter) writeStream() { + defer w.Close() + + for { + select { + case wrapper := <-w.stream: + err := w.eventWriter.WriteEvent(wrapper.Event) + wrapper.ReportResult(w.done, err) + if err != nil { + w.err.SetError(err) + return + } + + case <-w.done: + if err := w.streamCloser.Close(); err != nil { + w.err.SetError(err) + } + return + } + } +} + +type eventWriteAsyncReport struct { + Event Marshaler + Result chan<- error +} + +func (e eventWriteAsyncReport) ReportResult(cancel <-chan struct{}, err error) bool { + select { + case e.Result <- err: + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go new file mode 100644 index 00000000000..4bf2b27b2b2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +package eventstreamapi + +import "github.com/aws/aws-sdk-go/aws/request" + +// ApplyHTTPTransportFixes is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go new file mode 100644 index 00000000000..2ee2c36fd37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/transport_go1.17.go @@ -0,0 +1,19 @@ +//go:build !go1.18 +// +build !go1.18 + +package eventstreamapi + +import "github.com/aws/aws-sdk-go/aws/request" + +// ApplyHTTPTransportFixes applies fixes to the HTTP request for proper event +// stream functionality. Go 1.15 through 1.17 HTTP client could hang forever +// when an HTTP/2 connection failed with an non-200 status code and err. Using +// Expect 100-Continue, allows the HTTP client to gracefully handle the non-200 +// status code, and close the connection. +// +// This is a no-op for Go 1.18 and above. +func ApplyHTTPTransportFixes(r *request.Request) { + r.Handlers.Sign.PushBack(func(r *request.Request) { + r.HTTPRequest.Header.Set("Expect", "100-Continue") + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go new file mode 100644 index 00000000000..7d7a7935283 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/writer.go @@ -0,0 +1,63 @@ +package eventstreamapi + +import ( + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Marshaler provides a marshaling interface for event types to event stream +// messages. +type Marshaler interface { + MarshalEvent(protocol.PayloadMarshaler) (eventstream.Message, error) +} + +// Encoder is an stream encoder that will encode an event stream message for +// the transport. +type Encoder interface { + Encode(eventstream.Message) error +} + +// EventWriter provides a wrapper around the underlying event stream encoder +// for an io.WriteCloser. +type EventWriter struct { + encoder Encoder + payloadMarshaler protocol.PayloadMarshaler + eventTypeFor func(Marshaler) (string, error) +} + +// NewEventWriter returns a new event stream writer, that will write to the +// writer provided. Use the WriteEvent method to write an event to the stream. +func NewEventWriter(encoder Encoder, pm protocol.PayloadMarshaler, eventTypeFor func(Marshaler) (string, error), +) *EventWriter { + return &EventWriter{ + encoder: encoder, + payloadMarshaler: pm, + eventTypeFor: eventTypeFor, + } +} + +// WriteEvent writes an event to the stream. Returns an error if the event +// fails to marshal into a message, or writing to the underlying writer fails. +func (w *EventWriter) WriteEvent(event Marshaler) error { + msg, err := w.marshal(event) + if err != nil { + return err + } + + return w.encoder.Encode(msg) +} + +func (w *EventWriter) marshal(event Marshaler) (eventstream.Message, error) { + eventType, err := w.eventTypeFor(event) + if err != nil { + return eventstream.Message{}, err + } + + msg, err := event.MarshalEvent(w.payloadMarshaler) + if err != nil { + return eventstream.Message{}, err + } + + msg.Headers.Set(EventTypeHeader, eventstream.StringValue(eventType)) + return msg, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 00000000000..f6f8c5674ed --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,175 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +// Clone returns a deep copy of the headers +func (hs Headers) Clone() Headers { + o := make(Headers, 0, len(hs)) + for _, h := range hs { + o.Set(h.Name, h.Value) + } + return o +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 00000000000..9f509d8f6dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,506 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +// MarshalJSON implements the json.Marshaler interface +func (v TimestampValue) MarshalJSON() ([]byte, error) { + return []byte(v.String()), nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 00000000000..f7427da039e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,117 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := EncodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +// Clone returns a deep copy of the message. +func (m Message) Clone() Message { + var payload []byte + if m.Payload != nil { + payload = make([]byte, len(m.Payload)) + copy(payload, m.Payload) + } + + return Message{ + Headers: m.Headers.Clone(), + Payload: payload, + } +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 00000000000..1f1d27aea49 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,104 @@ +package protocol + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "net" + "strconv" + "strings" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + + var hostname string + var port string + var err error + + if strings.Contains(host, ":") { + hostname, port, err = net.SplitHostPort(host) + + if err != nil { + paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host)) + } + + if !ValidPortNumber(port) { + paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port)) + } + } else { + hostname = host + } + + labels := strings.Split(hostname, ".") + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(hostname) == 0 { + paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1)) + } + + if len(hostname) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} + +// ValidPortNumber return if the port is valid RFC 3986 port +func ValidPortNumber(port string) bool { + i, err := strconv.Atoi(port) + if err != nil { + return false + } + + if i < 0 || i > 65535 { + return false + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 00000000000..915b0fcafd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 00000000000..53831dff984 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 00000000000..12e814ddf25 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,309 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + if !value.IsValid() && tag.Get("type") != "structure" { + return nil + } + } + + buf.WriteByte('{') + defer buf.WriteString("}") + + if !value.IsValid() { + return nil + } + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + switch { + case math.IsNaN(f): + writeString(floatNaN, buf) + case math.IsInf(f, 1): + writeString(floatInf, buf) + case math.IsInf(f, -1): + writeString(floatNegInf, buf) + default: + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + } + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 00000000000..f9334879b80 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,317 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math" + "math/big" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var millisecondsFloat = new(big.Float).SetInt64(1e3) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "") +} + +// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the +// object v. Ignores casing for structure members. +func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error { + var out interface{} + + decoder := json.NewDecoder(stream) + decoder.UseNumber() + err := decoder.Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshaler{ + caseInsensitive: true, + }.unmarshalAny(reflect.ValueOf(v), out, "") +} + +type unmarshaler struct { + caseInsensitive bool +} + +func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return u.unmarshalStruct(value, data, tag) + case "list": + return u.unmarshalList(value, data, tag) + case "map": + return u.unmarshalMap(value, data, tag) + default: + return u.unmarshalScalar(value, data, tag) + } +} + +func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return u.unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if u.caseInsensitive { + if _, ok := mapData[name]; !ok { + // Fallback to uncased name search if the exact name didn't match. + for kn, v := range mapData { + if strings.EqualFold(kn, name) { + mapData[name] = v + } + } + } + } + + member := value.FieldByIndex(field.Index) + err := u.unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := u.unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + u.unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + case *float64: + // These are regular strings when parsed by encoding/json's unmarshaler. + switch { + case strings.EqualFold(d, floatNaN): + value.Set(reflect.ValueOf(aws.Float64(math.NaN()))) + case strings.EqualFold(d, floatInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(1)))) + case strings.EqualFold(d, floatNegInf): + value.Set(reflect.ValueOf(aws.Float64(math.Inf(-1)))) + default: + return fmt.Errorf("unknown JSON number value: %s", d) + } + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case json.Number: + switch value.Interface().(type) { + case *int64: + // Retain the old behavior where we would just truncate the float64 + // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt + f, err := d.Float64() + if err != nil { + return err + } + di := int64(f) + value.Set(reflect.ValueOf(&di)) + case *float64: + f, err := d.Float64() + if err != nil { + return err + } + value.Set(reflect.ValueOf(&f)) + case *time.Time: + float, ok := new(big.Float).SetString(d.String()) + if !ok { + return fmt.Errorf("unsupported float time representation: %v", d.String()) + } + float = float.Mul(float, millisecondsFloat) + ms, _ := float.Int64() + t := time.Unix(0, ms*1e6).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 00000000000..d9aa271148d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,87 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + // Always serialize the body, don't suppress it. + req.SetBufferBody(buf) + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + + // Only set the content type if one is not already specified and an + // JSONVersion is specified. + if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go new file mode 100644 index 00000000000..9c1ccde54ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go @@ -0,0 +1,160 @@ +package jsonrpc + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" +) + +const ( + awsQueryError = "x-amzn-query-error" + // A valid header example - "x-amzn-query-error": ";" + awsQueryErrorPartsCount = 2 +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error + queryExceptions map[string]func(protocol.ResponseMetadata, string) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + queryExceptions: map[string]func(protocol.ResponseMetadata, string) error{}, + } +} + +// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError +// before returning it +func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError { + unmarshaledError := NewUnmarshalTypedError(exceptions) + for _, fn := range optFns { + fn(unmarshaledError) + } + return unmarshaledError +} + +// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions. +// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found. +// See also [awsQueryCompatible trait] +// +// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait +func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) { + return func(typedError *UnmarshalTypedError) { + typedError.queryExceptions = queryExceptions + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + + var buf bytes.Buffer + var jsonErr jsonErrorResponse + teeReader := io.TeeReader(resp.Body, &buf) + err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader) + if err != nil { + return nil, err + } + body := ioutil.NopCloser(&buf) + + // Code may be separated by hash(#), with the last element being the code + // used by the SDK. + codeParts := strings.SplitN(jsonErr.Code, "#", 2) + code := codeParts[len(codeParts)-1] + msg := jsonErr.Message + + queryCodeParts := queryCodeParts(resp, u) + + if fn, ok := u.exceptions[code]; ok { + // If query-compatible exceptions are found and query-error-header is found, + // then use associated constructor to get exception with query error code. + // + // If exception code is known, use associated constructor to get a value + // for the exception that the JSON body can be unmarshaled into. + var v error + queryErrFn, queryExceptionsFound := u.queryExceptions[code] + if len(queryCodeParts) == awsQueryErrorPartsCount && queryExceptionsFound { + v = queryErrFn(respMeta, queryCodeParts[0]) + } else { + v = fn(respMeta) + } + err := jsonutil.UnmarshalJSONCaseInsensitive(v, body) + if err != nil { + return nil, err + } + return v, nil + } + + if len(queryCodeParts) == awsQueryErrorPartsCount && len(u.queryExceptions) > 0 { + code = queryCodeParts[0] + } + + // fallback to unmodeled generic exceptions + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil +} + +// A valid header example - "x-amzn-query-error": ";" +func queryCodeParts(resp *http.Response, u *UnmarshalTypedError) []string { + queryCodeHeader := resp.Header.Get(awsQueryError) + var queryCodeParts []string + if queryCodeHeader != "" && len(u.queryExceptions) > 0 { + queryCodeParts = strings.Split(queryCodeHeader, ";") + } + return queryCodeParts +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.jsonrpc.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 00000000000..776d1101843 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 00000000000..0ea0647a57d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "PUT"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go new file mode 100644 index 00000000000..9d521dcb950 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go @@ -0,0 +1,49 @@ +package protocol + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequireHTTPMinProtocol request handler is used to enforce that +// the target endpoint supports the given major and minor HTTP protocol version. +type RequireHTTPMinProtocol struct { + Major, Minor int +} + +// Handler will mark the request.Request with an error if the +// target endpoint did not connect with the required HTTP protocol +// major and minor version. +func (p RequireHTTPMinProtocol) Handler(r *request.Request) { + if r.Error != nil || r.HTTPResponse == nil { + return + } + + if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } + + if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor { + r.Error = newMinHTTPProtoError(p.Major, p.Minor, r) + } +} + +// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint +// did not match the required HTTP major and minor protocol version. +const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError" + +func newMinHTTPProtoError(major, minor int, r *request.Request) error { + return awserr.NewRequestFailure( + awserr.New("MinimumHTTPProtocolError", + fmt.Sprintf( + "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s", + major, minor, r.HTTPResponse.Proto, + ), + nil, + ), + r.HTTPResponse.StatusCode, r.RequestID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go new file mode 100644 index 00000000000..d40346a7790 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -0,0 +1,36 @@ +// Package query provides serialization of AWS query requests, and responses. +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go + +import ( + "net/url" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" +) + +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + +// Build builds a request for an AWS Query service. +func Build(r *request.Request) { + body := url.Values{ + "Action": {r.Operation.Name}, + "Version": {r.ClientInfo.APIVersion}, + } + if err := queryutil.Parse(body, r.Params, false); err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) + return + } + + if !r.IsPresigned() { + r.HTTPRequest.Method = "POST" + r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + r.SetBufferBody([]byte(body.Encode())) + } else { // This is a pre-signed request + r.HTTPRequest.Method = "GET" + r.HTTPRequest.URL.RawQuery = body.Encode() + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go new file mode 100644 index 00000000000..058334053c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -0,0 +1,276 @@ +package queryutil + +import ( + "encoding/base64" + "fmt" + "math" + "net/url" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + +// Parse parses an object i and fills a url.Values object. The isEC2 flag +// indicates if this is the EC2 Query sub-protocol. +func Parse(body url.Values, i interface{}, isEC2 bool) error { + q := queryParser{isEC2: isEC2} + return q.parseValue(body, reflect.ValueOf(i), "", "") +} + +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +type queryParser struct { + isEC2 bool +} + +func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + value = elemOf(value) + + // no need to handle zero values + if !value.IsValid() { + return nil + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + return q.parseStruct(v, value, prefix) + case "list": + return q.parseList(v, value, prefix, tag) + case "map": + return q.parseMap(v, value, prefix, tag) + default: + return q.parseScalar(v, value, prefix, tag) + } +} + +func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error { + if !value.IsValid() { + return nil + } + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } + + var name string + if q.isEC2 { + name = field.Tag.Get("queryName") + } + if name == "" { + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + if name != "" && q.isEC2 { + name = strings.ToUpper(name[0:1]) + name[1:] + } + } + if name == "" { + name = field.Name + } + + if prefix != "" { + name = prefix + "." + name + } + + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } + } + + for i := 0; i < value.Len(); i++ { + slicePrefix := prefix + if slicePrefix == "" { + slicePrefix = strconv.Itoa(i + 1) + } else { + slicePrefix = slicePrefix + "." + strconv.Itoa(i+1) + } + if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil { + return err + } + } + return nil +} + +func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error { + // If it's empty, generate an empty value + if !value.IsNil() && value.Len() == 0 { + v.Set(prefix, "") + return nil + } + + // check for unflattened list member + if !q.isEC2 && tag.Get("flattened") == "" { + prefix += ".entry" + } + + // sort keys for improved serialization consistency. + // this is not strictly necessary for protocol support. + mapKeyValues := value.MapKeys() + mapKeys := map[string]reflect.Value{} + mapKeyNames := make([]string, len(mapKeyValues)) + for i, mapKey := range mapKeyValues { + name := mapKey.String() + mapKeys[name] = mapKey + mapKeyNames[i] = name + } + sort.Strings(mapKeyNames) + + for i, mapKeyName := range mapKeyNames { + mapKey := mapKeys[mapKeyName] + mapValue := value.MapIndex(mapKey) + + kname := tag.Get("locationNameKey") + if kname == "" { + kname = "key" + } + vname := tag.Get("locationNameValue") + if vname == "" { + vname = "value" + } + + // serialize key + var keyName string + if prefix == "" { + keyName = strconv.Itoa(i+1) + "." + kname + } else { + keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname + } + + if err := q.parseValue(v, mapKey, keyName, ""); err != nil { + return err + } + + // serialize value + var valueName string + if prefix == "" { + valueName = strconv.Itoa(i+1) + "." + vname + } else { + valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname + } + + if err := q.parseValue(v, mapValue, valueName, ""); err != nil { + return err + } + } + + return nil +} + +func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error { + switch value := r.Interface().(type) { + case string: + v.Set(name, value) + case []byte: + if !r.IsNil() { + v.Set(name, base64.StdEncoding.EncodeToString(value)) + } + case bool: + v.Set(name, strconv.FormatBool(value)) + case int64: + v.Set(name, strconv.FormatInt(value, 10)) + case int: + v.Set(name, strconv.Itoa(value)) + case float64: + var str string + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } + v.Set(name, str) + case float32: + asFloat64 := float64(value) + var str string + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } + v.Set(name, str) + case time.Time: + const ISO8601UTC = "2006-01-02T15:04:05Z" + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 00000000000..9231e95d160 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,39 @@ +package query + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 00000000000..2c0cbba909b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,70 @@ +package query + +import ( + "encoding/xml" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +type xmlErrorResponse struct { + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlResponseError struct { + xmlErrorResponse +} + +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID + } + + r.Error = awserr.NewRequestFailure( + awserr.New(strings.TrimSpace(respErr.Code), strings.TrimSpace(respErr.Message), nil), + r.HTTPResponse.StatusCode, + reqID, + ) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 00000000000..ecc521f88f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,353 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "math" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +var byteSliceType = reflect.TypeOf([]byte{}) + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as string but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New(request.ErrCodeSerialization, + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + + } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) + + header.Add(prefix+keyStr, str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + switch value := v.Interface().(type) { + case string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + value = base64.StdEncoding.EncodeToString([]byte(value)) + } + str = value + case []*string: + if tag.Get("location") != "header" || tag.Get("enum") == "" { + return "", fmt.Errorf("%T is only supported with location header and enum shapes", value) + } + if len(value) == 0 { + return "", errValueNotSet + } + + buff := &bytes.Buffer{} + for i, sv := range value { + if sv == nil || len(*sv) == 0 { + continue + } + if i != 0 { + buff.WriteRune(',') + } + item := *sv + if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 { + item = strconv.Quote(item) + } + buff.WriteString(item) + } + str = string(buff.Bytes()) + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + switch { + case math.IsNaN(value): + str = floatNaN + case math.IsInf(value, 1): + str = floatInf + case math.IsInf(value, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(value, 'f', -1, 64) + } + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } + default: + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 00000000000..b54c99edae4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,54 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +const nopayloadPayloadType = "nopayload" + +// PayloadType returns the type of a payload field member of i if there is one, +// or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + + if field, ok := v.Type().FieldByName("_"); ok { + if noPayload := field.Tag.Get(nopayloadPayloadType); noPayload != "" { + return nopayloadPayloadType + } + + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 00000000000..79fcf1699b7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,276 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "math" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + awsStrings "github.com/aws/aws-sdk-go/internal/strings" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + if err := unmarshalBody(r, v); err != nil { + r.Error = err + } + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil { + r.Error = err + } + } +} + +// UnmarshalResponse attempts to unmarshal the REST response headers to +// the data type passed in. The type must be a pointer. An error is returned +// with any error unmarshaling the response into the target datatype. +func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error { + v := reflect.Indirect(reflect.ValueOf(data)) + return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps) +} + +func unmarshalBody(r *request.Request, v reflect.Value) error { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + payload.Set(reflect.ValueOf(b)) + + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + str := string(b) + payload.Set(reflect.ValueOf(&str)) + + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to read response body", err) + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + return awserr.New(request.ErrCodeSerialization, + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } + + return nil +} + +func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, resp.StatusCode) + + case "header": + err := unmarshalHeader(m, resp.Header.Get(name), field.Tag) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps) + if err != nil { + return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) + } + } + } + } + + return nil +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error { + if len(headers) == 0 { + return nil + } + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + if awsStrings.HasPrefixFold(k, prefix) { + if normalize == true { + k = strings.ToLower(k) + } else { + k = http.CanonicalHeaderKey(k) + } + out[k[len(prefix):]] = &v[0] + } + } + if len(out) != 0 { + r.Set(reflect.ValueOf(out)) + } + + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + switch tag.Get("type") { + case "jsonvalue": + if len(header) == 0 { + return nil + } + case "blob": + if len(header) == 0 { + return nil + } + default: + if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + } + + switch v.Interface().(type) { + case *string: + if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" { + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return fmt.Errorf("failed to decode JSONValue, %v", err) + } + header = string(b) + } + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + var f float64 + switch { + case strings.EqualFold(header, floatNaN): + f = math.NaN() + case strings.EqualFold(header, floatInf): + f = math.Inf(1) + case strings.EqualFold(header, floatNegInf): + f = math.Inf(-1) + default: + var err error + f, err = strconv.ParseFloat(header, 64) + if err != nil { + return err + } + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go new file mode 100644 index 00000000000..2e0e205af37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -0,0 +1,59 @@ +// Package restjson provides RESTful JSON serialization of AWS +// requests and responses. +package restjson + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go + +import ( + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +// BuildHandler is a named request handler for building restjson protocol +// requests +var BuildHandler = request.NamedHandler{ + Name: "awssdk.restjson.Build", + Fn: Build, +} + +// UnmarshalHandler is a named request handler for unmarshaling restjson +// protocol requests +var UnmarshalHandler = request.NamedHandler{ + Name: "awssdk.restjson.Unmarshal", + Fn: Unmarshal, +} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restjson +// protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalMeta", + Fn: UnmarshalMeta, +} + +// Build builds a request for the REST JSON protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 { + r.HTTPRequest.Header.Set("Content-Type", "application/json") + } + jsonrpc.Build(r) + } +} + +// Unmarshal unmarshals a response body for the REST JSON protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + jsonrpc.Unmarshal(r) + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST JSON protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go new file mode 100644 index 00000000000..5366a646d9c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go @@ -0,0 +1,157 @@ +package restjson + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + errorTypeHeader = "X-Amzn-Errortype" + errorMessageHeader = "X-Amzn-Errormessage" +) + +// UnmarshalTypedError provides unmarshaling errors API response errors +// for both typed and untyped errors. +type UnmarshalTypedError struct { + exceptions map[string]func(protocol.ResponseMetadata) error +} + +// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the +// set of exception names to the error unmarshalers +func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError { + return &UnmarshalTypedError{ + exceptions: exceptions, + } +} + +// UnmarshalError attempts to unmarshal the HTTP response error as a known +// error type. If unable to unmarshal the error type, the generic SDK error +// type will be used. +func (u *UnmarshalTypedError) UnmarshalError( + resp *http.Response, + respMeta protocol.ResponseMetadata, +) (error, error) { + code, msg, err := unmarshalErrorInfo(resp) + if err != nil { + return nil, err + } + + fn, ok := u.exceptions[code] + if !ok { + return awserr.NewRequestFailure( + awserr.New(code, msg, nil), + respMeta.StatusCode, + respMeta.RequestID, + ), nil + } + + v := fn(respMeta) + if err := jsonutil.UnmarshalJSONCaseInsensitive(v, resp.Body); err != nil { + return nil, err + } + + if err := rest.UnmarshalResponse(resp, v, true); err != nil { + return nil, err + } + + return v, nil +} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restjson +// protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{ + Name: "awssdk.restjson.UnmarshalError", + Fn: UnmarshalError, +} + +// UnmarshalError unmarshals a response error for the REST JSON protocol. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + code, msg, err := unmarshalErrorInfo(r.HTTPResponse) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed to unmarshal response error", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + r.Error = awserr.NewRequestFailure( + awserr.New(code, msg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +type jsonErrorResponse struct { + Type string `json:"__type"` + Code string `json:"code"` + Message string `json:"message"` +} + +func (j *jsonErrorResponse) SanitizedCode() string { + code := j.Code + if len(j.Type) > 0 { + code = j.Type + } + return sanitizeCode(code) +} + +// Remove superfluous components from a restJson error code. +// - If a : character is present, then take only the contents before the +// first : character in the value. +// - If a # character is present, then take only the contents after the first +// # character in the value. +// +// All of the following error values resolve to FooError: +// - FooError +// - FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +// - aws.protocoltests.restjson#FooError +// - aws.protocoltests.restjson#FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/ +func sanitizeCode(code string) string { + noColon := strings.SplitN(code, ":", 2)[0] + hashSplit := strings.SplitN(noColon, "#", 2) + return hashSplit[len(hashSplit)-1] +} + +// attempt to garner error details from the response, preferring header values +// when present +func unmarshalErrorInfo(resp *http.Response) (code string, msg string, err error) { + code = sanitizeCode(resp.Header.Get(errorTypeHeader)) + msg = resp.Header.Get(errorMessageHeader) + if len(code) > 0 && len(msg) > 0 { + return + } + + // a modeled error will have to be re-deserialized later, so the body must + // be preserved + var buf bytes.Buffer + tee := io.TeeReader(resp.Body, &buf) + defer func() { resp.Body = ioutil.NopCloser(&buf) }() + + var jsonErr jsonErrorResponse + if decodeErr := json.NewDecoder(tee).Decode(&jsonErr); decodeErr != nil && decodeErr != io.EOF { + err = awserr.NewUnmarshalError(decodeErr, "failed to decode response body", buf.Bytes()) + return + } + + if len(code) == 0 { + code = jsonErr.SanitizedCode() + } + if len(msg) == 0 { + msg = jsonErr.Message + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 00000000000..b1ae3648719 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,79 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + 0, + r.RequestID, + ) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 00000000000..d9a4e764932 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,134 @@ +package protocol + +import ( + "bytes" + "fmt" + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/internal/sdkmath" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +// Output time is intended to not contain decimals +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT" + rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT" + + // This format is used for output time without seconds precision + RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z" + iso8601TimeFormatNoZ = "2006-01-02T15:04:05.999999999" + + // This format is used for output time with fractional second precision up to milliseconds + ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC().Truncate(time.Millisecond) + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822OutputTimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601OutputTimeFormat) + case UnixTimeFormatName: + ms := t.UnixNano() / int64(time.Millisecond) + return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: // Smithy HTTPDate format + return tryParse(value, + RFC822TimeFormat, + rfc822TimeFormatSingleDigitDay, + rfc822TimeFormatSingleDigitDayTwoDigitYear, + time.RFC850, + time.ANSIC, + ) + case ISO8601TimeFormatName: // Smithy DateTime format + return tryParse(value, + ISO8601TimeFormat, + iso8601TimeFormatNoZ, + time.RFC3339Nano, + time.RFC3339, + ) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + _, dec := math.Modf(v) + dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123 + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), int64(dec*(1e9))), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} + +func tryParse(v string, formats ...string) (time.Time, error) { + var errs parseErrors + for _, f := range formats { + t, err := time.Parse(f, v) + if err != nil { + errs = append(errs, parseError{ + Format: f, + Err: err, + }) + continue + } + return t, nil + } + + return time.Time{}, fmt.Errorf("unable to parse time string, %v", errs) +} + +type parseErrors []parseError + +func (es parseErrors) Error() string { + var s bytes.Buffer + for _, e := range es { + fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err) + } + + return "parse errors:" + s.String() +} + +type parseError struct { + Format string + Err error +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 00000000000..f614ef898be --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,27 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} + +// ResponseMetadata provides the SDK response metadata attributes. +type ResponseMetadata struct { + StatusCode int + RequestID string +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go new file mode 100644 index 00000000000..cc857f136c5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go @@ -0,0 +1,65 @@ +package protocol + +import ( + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalErrorHandler provides unmarshaling errors API response errors for +// both typed and untyped errors. +type UnmarshalErrorHandler struct { + unmarshaler ErrorUnmarshaler +} + +// ErrorUnmarshaler is an abstract interface for concrete implementations to +// unmarshal protocol specific response errors. +type ErrorUnmarshaler interface { + UnmarshalError(*http.Response, ResponseMetadata) (error, error) +} + +// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler +// initialized for the set of exception names to the error unmarshalers +func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler { + return &UnmarshalErrorHandler{ + unmarshaler: unmarshaler, + } +} + +// UnmarshalErrorHandlerName is the name of the named handler. +const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError" + +// NamedHandler returns a NamedHandler for the unmarshaler using the set of +// errors the unmarshaler was initialized for. +func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler { + return request.NamedHandler{ + Name: UnmarshalErrorHandlerName, + Fn: u.UnmarshalError, + } +} + +// UnmarshalError will attempt to unmarshal the API response's error message +// into either a generic SDK error type, or a typed error corresponding to the +// errors exception name. +func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + respMeta := ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + } + + v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal response error", err), + respMeta.StatusCode, + respMeta.RequestID, + ) + return + } + + r.Error = v +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go new file mode 100644 index 00000000000..58c12bd8ccb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -0,0 +1,345 @@ +// Package xmlutil provides XML serialization of AWS requests and responses. +package xmlutil + +import ( + "encoding/base64" + "encoding/xml" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + floatNaN = "NaN" + floatInf = "Infinity" + floatNegInf = "-Infinity" +) + +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. +func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { + b := xmlBuilder{encoder: e, namespaces: map[string]string{}} + root := NewXMLElement(xml.Name{}) + if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { + return err + } + for _, c := range root.Children { + for _, v := range c { + return StructToXML(e, v, sorted) + } + } + return nil +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} + +// A xmlBuilder serializes values from Go code to XML +type xmlBuilder struct { + encoder *xml.Encoder + namespaces map[string]string +} + +// buildValue generic XMLNode builder for any type. Will build value for their specific type +// struct, list, map, scalar. +// +// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If +// type is not provided reflect will be used to determine the value's type. +func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + value = elemOf(value) + if !value.IsValid() { // no need to handle zero values + return nil + } else if tag.Get("location") != "" { // don't handle non-body location values + return nil + } + + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + t := tag.Get("type") + if t == "" { + switch value.Kind() { + case reflect.Struct: + t = "structure" + case reflect.Slice: + t = "list" + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := value.Type().FieldByName("_"); ok { + tag = tag + reflect.StructTag(" ") + field.Tag + } + return b.buildStruct(value, current, tag) + case "list": + return b.buildList(value, current, tag) + case "map": + return b.buildMap(value, current, tag) + default: + return b.buildScalar(value, current, tag) + } +} + +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested +// types are converted to XMLNodes also. +func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + child := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + + // there is an xmlNamespace associated with this struct + if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" { + ns := xml.Attr{ + Name: xml.Name{Local: "xmlns"}, + Value: uri, + } + if prefix != "" { + b.namespaces[prefix] = uri // register the namespace + ns.Name.Local = "xmlns:" + prefix + } + + child.Attr = append(child.Attr, ns) + } + + var payloadFields, nonPayloadFields int + + t := value.Type() + for i := 0; i < value.NumField(); i++ { + member := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag + if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ + continue + } + payloadFields++ + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + + memberName := mTag.Get("locationName") + if memberName == "" { + memberName = field.Name + mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`) + } + if err := b.buildValue(member, child, mTag); err != nil { + return err + } + } + + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { + current.AddChild(child) + } + + return nil +} + +// buildList adds the value's list items to the current XMLNode as children nodes. All +// nested values in the list are converted to XMLNodes also. +func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted lists + return nil + } + + // check for unflattened list member + flattened := tag.Get("flattened") != "" + + xname := xml.Name{Local: tag.Get("locationName")} + if flattened { + for i := 0; i < value.Len(); i++ { + child := NewXMLElement(xname) + current.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } else { + list := NewXMLElement(xname) + current.AddChild(list) + + for i := 0; i < value.Len(); i++ { + iname := tag.Get("locationNameList") + if iname == "" { + iname = "member" + } + + child := NewXMLElement(xml.Name{Local: iname}) + list.AddChild(child) + if err := b.buildValue(value.Index(i), child, ""); err != nil { + return err + } + } + } + + return nil +} + +// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All +// nested values in the map are converted to XMLNodes also. +// +// Error will be returned if it is unable to build the map's values into XMLNodes +func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + if value.IsNil() { // don't build omitted maps + return nil + } + + maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")}) + current.AddChild(maproot) + current = maproot + + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + // sorting is not required for compliance, but it makes testing easier + keys := make([]string, value.Len()) + for i, k := range value.MapKeys() { + keys[i] = k.String() + } + sort.Strings(keys) + + for _, k := range keys { + v := value.MapIndex(reflect.ValueOf(k)) + + mapcur := current + if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps + child := NewXMLElement(xml.Name{Local: "entry"}) + mapcur.AddChild(child) + mapcur = child + } + + kchild := NewXMLElement(xml.Name{Local: kname}) + kchild.Text = k + vchild := NewXMLElement(xml.Name{Local: vname}) + mapcur.AddChild(kchild) + mapcur.AddChild(vchild) + + if err := b.buildValue(v, vchild, ""); err != nil { + return err + } + } + + return nil +} + +// buildScalar will convert the value into a string and append it as a attribute or child +// of the current XMLNode. +// +// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value. +// +// Error will be returned if the value type is unsupported. +func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { + var str string + + switch converted := value.Interface().(type) { + case string: + str = converted + case []byte: + if !value.IsNil() { + str = base64.StdEncoding.EncodeToString(converted) + } + case bool: + str = strconv.FormatBool(converted) + case int64: + str = strconv.FormatInt(converted, 10) + case int: + str = strconv.Itoa(converted) + case float64: + switch { + case math.IsNaN(converted): + str = floatNaN + case math.IsInf(converted, 1): + str = floatInf + case math.IsInf(converted, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(converted, 'f', -1, 64) + } + case float32: + // The SDK doesn't render float32 values in types, only float64. This case would never be hit currently. + asFloat64 := float64(converted) + switch { + case math.IsNaN(asFloat64): + str = floatNaN + case math.IsInf(asFloat64, 1): + str = floatInf + case math.IsInf(asFloat64, -1): + str = floatNegInf + default: + str = strconv.FormatFloat(asFloat64, 'f', -1, 32) + } + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) + default: + return fmt.Errorf("unsupported value for param %s: %v (%s)", + tag.Get("locationName"), value.Interface(), value.Type().Name()) + } + + xname := xml.Name{Local: tag.Get("locationName")} + if tag.Get("xmlAttribute") != "" { // put into current node's attribute list + attr := xml.Attr{Name: xname, Value: str} + current.Attr = append(current.Attr, attr) + } else if len(xname.Local) == 0 { + current.Text = str + } else { // regular text node + current.AddChild(&XMLNode{Name: xname, Text: str}) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go new file mode 100644 index 00000000000..c1a511851f6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go @@ -0,0 +1,32 @@ +package xmlutil + +import ( + "encoding/xml" + "strings" +) + +type xmlAttrSlice []xml.Attr + +func (x xmlAttrSlice) Len() int { + return len(x) +} + +func (x xmlAttrSlice) Less(i, j int) bool { + spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space + localI, localJ := x[i].Name.Local, x[j].Name.Local + valueI, valueJ := x[i].Value, x[j].Value + + spaceCmp := strings.Compare(spaceI, spaceJ) + localCmp := strings.Compare(localI, localJ) + valueCmp := strings.Compare(valueI, valueJ) + + if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) { + return true + } + + return false +} + +func (x xmlAttrSlice) Swap(i, j int) { + x[i], x[j] = x[j], x[i] +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go new file mode 100644 index 00000000000..44a580a940b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -0,0 +1,311 @@ +package xmlutil + +import ( + "bytes" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + +// UnmarshalXML deserializes an xml.Decoder into the container v. V +// needs to match the shape of the XML expected to be decoded. +// If the shape doesn't match unmarshaling will fail. +func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } + if n.Children != nil { + for _, root := range n.Children { + for _, c := range root { + if wrappedChild, ok := c.Children[wrapper]; ok { + c = wrappedChild[0] // pull out wrapped element + } + + err = parse(reflect.ValueOf(v), c, "") + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + return nil + } + return nil +} + +// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect +// will be used to determine the type from r. +func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + xml := tag.Get("xml") + if len(xml) != 0 { + name := strings.SplitAfterN(xml, ",", 2)[0] + if name == "-" { + return nil + } + } + + rtype := r.Type() + if rtype.Kind() == reflect.Ptr { + rtype = rtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch rtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + t = "map" + } + } + + switch t { + case "structure": + if field, ok := rtype.FieldByName("_"); ok { + tag = field.Tag + } + return parseStruct(r, node, tag) + case "list": + return parseList(r, node, tag) + case "map": + return parseMap(r, node, tag) + default: + return parseScalar(r, node, tag) + } +} + +// parseStruct deserializes a structure and its fields from an XMLNode. Any nested +// types in the structure will also be deserialized. +func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + if r.Kind() == reflect.Ptr { + if r.IsNil() { // create the structure if it's nil + s := reflect.New(r.Type().Elem()) + r.Set(s) + r = s + } + + r = r.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return parseStruct(r.FieldByName(payload), node, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if c := field.Name[0:1]; strings.ToLower(c) == c { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" { + name = field.Tag.Get("locationNameList") + } else if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + // try to find the field by name in elements + elems := node.Children[name] + + if elems == nil { // try to find the field in attributes + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} + } + } + + member := r.FieldByName(field.Name) + for _, elem := range elems { + err := parse(member, elem, field.Tag) + if err != nil { + return err + } + } + } + return nil +} + +// parseList deserializes a list of values from an XML node. Each list entry +// will also be deserialized. +func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + t := r.Type() + + if tag.Get("flattened") == "" { // look at all item entries + mname := "member" + if name := tag.Get("locationNameList"); name != "" { + mname = name + } + + if Children, ok := node.Children[mname]; ok { + if r.IsNil() { + r.Set(reflect.MakeSlice(t, len(Children), len(Children))) + } + + for i, c := range Children { + err := parse(r.Index(i), c, "") + if err != nil { + return err + } + } + } + } else { // flattened list means this is a single element + if r.IsNil() { + r.Set(reflect.MakeSlice(t, 0, 0)) + } + + childR := reflect.Zero(t.Elem()) + r.Set(reflect.Append(r, childR)) + err := parse(r.Index(r.Len()-1), node, "") + if err != nil { + return err + } + } + + return nil +} + +// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode +// will also be deserialized as map entries. +func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + if r.IsNil() { + r.Set(reflect.MakeMap(r.Type())) + } + + if tag.Get("flattened") == "" { // look at all child entries + for _, entry := range node.Children["entry"] { + parseMapEntry(r, entry, tag) + } + } else { // this element is itself an entry + parseMapEntry(r, node, tag) + } + + return nil +} + +// parseMapEntry deserializes a map entry from a XML node. +func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + kname, vname := "key", "value" + if n := tag.Get("locationNameKey"); n != "" { + kname = n + } + if n := tag.Get("locationNameValue"); n != "" { + vname = n + } + + keys, ok := node.Children[kname] + values := node.Children[vname] + if ok { + for i, key := range keys { + keyR := reflect.ValueOf(key.Text) + value := values[i] + valueR := reflect.New(r.Type().Elem()).Elem() + + parse(valueR, value, "") + r.SetMapIndex(keyR, valueR) + } + } + return nil +} + +// parseScaller deserializes an XMLNode value into a concrete type based on the +// interface type of r. +// +// Error is returned if the deserialization fails due to invalid type conversion, +// or unsupported interface type. +func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { + switch r.Interface().(type) { + case *string: + r.Set(reflect.ValueOf(&node.Text)) + return nil + case []byte: + b, err := base64.StdEncoding.DecodeString(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(b)) + case *bool: + v, err := strconv.ParseBool(node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *int64: + v, err := strconv.ParseInt(node.Text, 10, 64) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&v)) + case *float64: + var v float64 + switch { + case strings.EqualFold(node.Text, floatNaN): + v = math.NaN() + case strings.EqualFold(node.Text, floatInf): + v = math.Inf(1) + case strings.EqualFold(node.Text, floatNegInf): + v = math.Inf(-1) + default: + var err error + v, err = strconv.ParseFloat(node.Text, 64) + if err != nil { + return err + } + } + r.Set(reflect.ValueOf(&v)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) + if err != nil { + return err + } + r.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type()) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go new file mode 100644 index 00000000000..c85b79fddd2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -0,0 +1,173 @@ +package xmlutil + +import ( + "encoding/xml" + "fmt" + "io" + "sort" +) + +// A XMLNode contains the values to be encoded or decoded. +type XMLNode struct { + Name xml.Name `json:",omitempty"` + Children map[string][]*XMLNode `json:",omitempty"` + Text string `json:",omitempty"` + Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode +} + +// textEncoder is a string type alias that implemnts the TextMarshaler interface. +// This alias type is used to ensure that the line feed (\n) (U+000A) is escaped. +type textEncoder string + +func (t textEncoder) MarshalText() ([]byte, error) { + return []byte(t), nil +} + +// NewXMLElement returns a pointer to a new XMLNode initialized to default values. +func NewXMLElement(name xml.Name) *XMLNode { + return &XMLNode{ + Name: name, + Children: map[string][]*XMLNode{}, + Attr: []xml.Attr{}, + } +} + +// AddChild adds child to the XMLNode. +func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n + if _, ok := n.Children[child.Name.Local]; !ok { + n.Children[child.Name.Local] = []*XMLNode{} + } + n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child) +} + +// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values. +func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { + out := &XMLNode{} + for { + tok, err := d.Token() + if err != nil { + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break + } + + switch typed := tok.(type) { + case xml.CharData: + out.Text = string(typed.Copy()) + case xml.StartElement: + el := typed.Copy() + out.Attr = el.Attr + if out.Children == nil { + out.Children = map[string][]*XMLNode{} + } + + name := typed.Name.Local + slice := out.Children[name] + if slice == nil { + slice = []*XMLNode{} + } + node, e := XMLToStruct(d, &el) + out.findNamespaces() + if e != nil { + return out, e + } + node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut + slice = append(slice, node) + out.Children[name] = slice + case xml.EndElement: + if s != nil && s.Name.Local == typed.Name.Local { // matching end token + return out, nil + } + out = &XMLNode{} + } + } + return out, nil +} + +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + +// StructToXML writes an XMLNode to a xml.Encoder as tokens. +func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { + // Sort Attributes + attrs := node.Attr + if sorted { + sortedAttrs := make([]xml.Attr, len(attrs)) + for _, k := range node.Attr { + sortedAttrs = append(sortedAttrs, k) + } + sort.Sort(xmlAttrSlice(sortedAttrs)) + attrs = sortedAttrs + } + + startElement := xml.StartElement{Name: node.Name, Attr: attrs} + + if node.Text != "" { + e.EncodeElement(textEncoder(node.Text), startElement) + return e.Flush() + } + + e.EncodeToken(startElement) + + if sorted { + sortedNames := []string{} + for k := range node.Children { + sortedNames = append(sortedNames, k) + } + sort.Strings(sortedNames) + + for _, k := range sortedNames { + for _, v := range node.Children[k] { + StructToXML(e, v, sorted) + } + } + } else { + for _, c := range node.Children { + for _, v := range c { + StructToXML(e, v, sorted) + } + } + } + + e.EncodeToken(startElement.End()) + + return e.Flush() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 00000000000..f472cdc8d2b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,45919 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "bytes" + "fmt" + "io" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/checksum" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + output = &AbortMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// +// This operation aborts a multipart upload. After a multipart upload is aborted, +// no additional parts can be uploaded using that upload ID. The storage consumed +// by any previously uploaded parts will be freed. However, if any part uploads +// are currently in progress, those part uploads might or might not succeed. +// As a result, it might be necessary to abort a given multipart upload multiple +// times in order to completely free all storage consumed by all parts. +// +// To verify that all parts have been removed and prevent getting charged for +// the part storage, you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// API operation and ensure that the parts list is empty. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to AbortMultipartUpload: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + output = &CompleteMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// +// Completes a multipart upload by assembling previously uploaded parts. +// +// You first initiate the multipart upload and then upload all parts using the +// UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. After successfully uploading all relevant parts of an upload, +// you call this CompleteMultipartUpload operation to complete the upload. Upon +// receiving this request, Amazon S3 concatenates all the parts in ascending +// order by part number to create a new object. In the CompleteMultipartUpload +// request, you must provide the parts list and ensure that the parts list is +// complete. The CompleteMultipartUpload API operation concatenates the parts +// that you provide in the list. For each part in the list, you must provide +// the PartNumber value and the ETag value that are returned after that part +// was uploaded. +// +// The processing of a CompleteMultipartUpload request could take several minutes +// to finalize. After Amazon S3 begins processing the request, it sends an HTTP +// response header that specifies a 200 OK response. While processing is in +// progress, Amazon S3 periodically sends white space characters to keep the +// connection from timing out. A request could fail after the initial 200 OK +// response has been sent. This means that a 200 OK response can contain either +// a success or an error. The error response might be embedded in the 200 OK +// response. If you call this API operation directly, make sure to design your +// application to parse the contents of the response and handle it appropriately. +// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs +// detect the embedded error and apply error handling per your configuration +// settings (including automatically retrying the request as appropriate). If +// the condition persists, the SDKs throw an exception (or, for the SDKs that +// don't use exceptions, they return an error). +// +// Note that if CompleteMultipartUpload fails, applications should be prepared +// to retry any failed requests (including 500 error responses). For more information, +// see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). +// +// You can't use Content-Type: application/x-www-form-urlencoded for the CompleteMultipartUpload +// requests. Also, if you don't provide a Content-Type header, CompleteMultipartUpload +// can still return a 200 OK response. +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Special errors +// +// - Error Code: EntityTooSmall Description: Your proposed upload is smaller +// than the minimum allowed object size. Each part must be at least 5 MB +// in size, except the last part. HTTP Status Code: 400 Bad Request +// +// - Error Code: InvalidPart Description: One or more of the specified parts +// could not be found. The part might not have been uploaded, or the specified +// ETag might not have matched the uploaded part's ETag. HTTP Status Code: +// 400 Bad Request +// +// - Error Code: InvalidPartOrder Description: The list of parts was not +// in ascending order. The parts list must be specified in order by part +// number. HTTP Status Code: 400 Bad Request +// +// - Error Code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to CompleteMultipartUpload: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + output = &CopyObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyObject API operation for Amazon Simple Storage Service. +// +// Creates a copy of an object that is already stored in Amazon S3. +// +// You can store individual objects of up to 5 TB in Amazon S3. You create a +// copy of your object up to 5 GB in size in a single atomic action using this +// API. However, to copy an object greater than 5 GB, you must use the multipart +// upload Upload Part - Copy (UploadPartCopy) API. For more information, see +// Copy Object Using the REST Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html). +// +// You can copy individual objects between general purpose buckets, between +// directory buckets, and between general purpose buckets and directory buckets. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Both the Region that you want to copy the object from and the Region that +// you want to copy the object to must be enabled for your account. For more +// information about how to enable a Region for your account, see Enable or +// disable a Region for standalone accounts (https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone) +// in the Amazon Web Services Account Management Guide. +// +// Amazon S3 transfer acceleration does not support cross-Region copies. If +// you request a cross-Region copy using a transfer acceleration endpoint, you +// get a 400 Bad Request error. For more information, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// # Authentication and authorization +// +// All CopyObject requests must be authenticated and signed by using IAM credentials +// (access key ID and secret access key for the IAM identities). All headers +// with the x-amz- prefix, including x-amz-copy-source, must be signed. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// +// Directory buckets - You must use the IAM credentials to authenticate and +// authorize your access to the CopyObject API operation, instead of using the +// temporary security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. +// +// # Permissions +// +// You must have read access to the source object and write access to the destination +// bucket. +// +// - General purpose bucket permissions - You must have permissions in an +// IAM policy based on the source and destination bucket types in a CopyObject +// operation. If the source object is in a general purpose bucket, you must +// have s3:GetObject permission to read the source object that is being copied. +// If the destination bucket is a general purpose bucket, you must have s3:PutObject +// permission to write the object copy to the destination bucket. +// +// - Directory bucket permissions - You must have permissions in a bucket +// policy or an IAM identity-based policy based on the source and destination +// bucket types in a CopyObject operation. If the source object that you +// want to copy is in a directory bucket, you must have the s3express:CreateSession +// permission in the Action element of a policy to read the object. By default, +// the session is in the ReadWrite mode. If you want to restrict the access, +// you can explicitly set the s3express:SessionMode condition key to ReadOnly +// on the copy source bucket. If the copy destination is a directory bucket, +// you must have the s3express:CreateSession permission in the Action element +// of a policy to write the object to the destination. The s3express:SessionMode +// condition key can't be set to ReadOnly on the copy destination bucket. +// For example policies, see Example bucket policies for S3 Express One Zone +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// # Response and special errors +// +// When the request is an HTTP 1.1 request, the response is chunk encoded. When +// the request is not an HTTP 1.1 request, the response would not contain the +// Content-Length. You always need to read the entire response body to check +// if the copy succeeds. to keep the connection alive while we copy the data. +// +// - If the copy is successful, you receive a response with information about +// the copied object. +// +// - A copy request might return an error when Amazon S3 receives the copy +// request or while Amazon S3 is copying the files. A 200 OK response can +// contain either a success or an error. If the error occurs before the copy +// action starts, you receive a standard Amazon S3 error. If the error occurs +// during the copy operation, the error response is embedded in the 200 OK +// response. For example, in a cross-region copy, you may encounter throttling +// and receive a 200 OK response. For more information, see Resolve the Error +// 200 response when copying objects to Amazon S3 (https://repost.aws/knowledge-center/s3-resolve-200-internalerror). +// The 200 OK status code means the copy was accepted, but it doesn't mean +// the copy is complete. Another example is when you disconnect from Amazon +// S3 before the copy is complete, Amazon S3 might cancel the copy and you +// may receive a 200 OK response. You must stay connected to Amazon S3 until +// the entire response is successfully received and processed. If you call +// this API operation directly, make sure to design your application to parse +// the content of the response and handle it appropriately. If you use Amazon +// Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded +// error and apply error handling per your configuration settings (including +// automatically retrying the request as appropriate). If the condition persists, +// the SDKs throw an exception (or, for the SDKs that don't use exceptions, +// they return an error). +// +// # Charge +// +// The copy request charge is based on the storage class and Region that you +// specify for the destination object. The request can also result in a data +// retrieval charge for the source if the source storage class bills for data +// retrieval. If the copy source is in a different region, the data transfer +// is billed to the copy source account. For pricing information, see Amazon +// S3 pricing (http://aws.amazon.com/s3/pricing/). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to CopyObject: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// - ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY action is not in the active tier and is only +// stored in Amazon S3 Glacier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Simple Storage Service. +// +// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts +// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html). +// +// Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and +// have a valid Amazon Web Services Access Key ID to authenticate requests. +// Anonymous requests are never allowed to create buckets. By creating the bucket, +// you become the bucket owner. +// +// There are two types of buckets: general purpose buckets and directory buckets. +// For more information about these bucket types, see Creating, configuring, +// and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html) +// in the Amazon S3 User Guide. +// +// - General purpose buckets - If you send your CreateBucket request to the +// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. +// So the signature calculations in Signature Version 4 must use us-east-1 +// as the Region, even if the location constraint in the request specifies +// another Region where the bucket is to be created. If you create a bucket +// in a Region other than US East (N. Virginia), your application must be +// able to handle 307 redirect. For more information, see Virtual hosting +// of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html) +// in the Amazon S3 User Guide. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, +// see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - In addition to the s3:CreateBucket +// permission, the following permissions are required in a policy when your +// CreateBucket request includes specific headers: Access control lists (ACLs) +// +// - In your CreateBucket request, if you specify an access control list +// (ACL) and set it to public-read, public-read-write, authenticated-read, +// or if you explicitly specify any other custom ACLs, both s3:CreateBucket +// and s3:PutBucketAcl permissions are required. In your CreateBucket request, +// if you set the ACL to private, or if you don't specify any ACLs, only +// the s3:CreateBucket permission is required. Object Lock - In your CreateBucket +// request, if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration +// and s3:PutBucketVersioning permissions are required. S3 Object Ownership +// +// - If your CreateBucket request includes the x-amz-object-ownership header, +// then the s3:PutBucketOwnershipControls permission is required. To set +// an ACL on a bucket as part of a CreateBucket request, you must explicitly +// set S3 Object Ownership for the bucket to a different value than the default, +// BucketOwnerEnforced. Additionally, if your desired bucket ACL grants public +// access, you must first create the bucket (without the bucket ACL) and +// then explicitly disable Block Public Access on the bucket before using +// PutBucketAcl to set the ACL. If you try to create a bucket with a public +// ACL, the request will fail. For the majority of modern use cases in S3, +// we recommend that you keep all Block Public Access settings enabled and +// keep ACLs disabled. If you would like to share data with users outside +// of your account, you can use bucket policies as needed. For more information, +// see Controlling ownership of objects and disabling ACLs for your bucket +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// and Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) +// in the Amazon S3 User Guide. S3 Block Public Access - If your specific +// use case requires granting public access to your S3 resources, you can +// disable Block Public Access. Specifically, you can create a new bucket +// with Block Public Access enabled, then separately call the DeletePublicAccessBlock +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about S3 Block Public Access, see Blocking +// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have the s3express:CreateBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation +// can only be performed by the Amazon Web Services account that owns the +// resource. For more information about directory bucket policies and permissions, +// see Amazon Web Services Identity and Access Management (IAM) for S3 Express +// One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 +// Object Ownership, and S3 Block Public Access are not supported for directory +// buckets. For directory buckets, all Block Public Access settings are enabled +// at the bucket level and S3 Object Ownership is set to Bucket owner enforced +// (ACLs disabled). These settings can't be modified. For more information +// about permissions for creating and working with directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. For more information about supported S3 features +// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following operations are related to CreateBucket: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Select a different name and try again. +// +// - ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// The bucket you tried to create already exists, and you own it. Amazon S3 +// returns this error in all Amazon Web Services Regions except in the North +// Virginia Region. For legacy compatibility, if you re-create an existing bucket +// that you already own in the North Virginia Region, Amazon S3 returns 200 +// OK and resets the bucket access control lists (ACLs). +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + output = &CreateMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// +// This action initiates a multipart upload and returns an upload ID. This upload +// ID is used to associate all of the parts in the specific multipart upload. +// You specify this upload ID in each of your subsequent upload part requests +// (see UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)). +// You also include this upload ID in the final request to either complete or +// abort the multipart upload request. For more information about multipart +// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) +// in the Amazon S3 User Guide. +// +// After you initiate a multipart upload and upload one or more parts, to stop +// being charged for storing the uploaded parts, you must either complete or +// abort the multipart upload. Amazon S3 frees up the space used to store the +// parts and stops charging you for storing them only after you either complete +// or abort a multipart upload. +// +// If you have configured a lifecycle rule to abort incomplete multipart uploads, +// the created multipart upload must be completed within the number of days +// specified in the bucket lifecycle configuration. Otherwise, the incomplete +// multipart upload becomes eligible for an abort action and Amazon S3 aborts +// the multipart upload. For more information, see Aborting Incomplete Multipart +// Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). +// +// - Directory buckets - S3 Lifecycle is not supported by directory buckets. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Request signing +// +// For request signing, multipart upload is just a series of regular requests. +// You initiate a multipart upload, send one or more requests to upload parts, +// and then complete the multipart upload process. You sign each request individually. +// There is nothing special about signing multipart upload requests. For more +// information about signing, see Authenticating Requests (Amazon Web Services +// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about the permissions +// required to use the multipart upload API, see Multipart upload and permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. To perform a multipart upload with encryption +// by using an Amazon Web Services KMS key, the requester must have permission +// to the kms:Decrypt and kms:GenerateDataKey* actions on the key. These +// permissions are required because Amazon S3 must decrypt and read data +// from the encrypted file parts before it completes the multipart upload. +// For more information, see Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services +// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Encryption +// +// - General purpose buckets - Server-side encryption is for data encryption +// at rest. Amazon S3 encrypts your data as it writes it to disks in its +// data centers and decrypts it when you access it. Amazon S3 automatically +// encrypts all new objects that are uploaded to an S3 bucket. When doing +// a multipart upload, if you don't specify encryption information in your +// request, the encryption setting of the uploaded parts is set to the default +// encryption configuration of the destination bucket. By default, all buckets +// have a base level of encryption configuration that uses server-side encryption +// with Amazon S3 managed keys (SSE-S3). If the destination bucket has a +// default encryption configuration that uses server-side encryption with +// an Key Management Service (KMS) key (SSE-KMS), or a customer-provided +// encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a +// customer-provided key to encrypt the uploaded parts. When you perform +// a CreateMultipartUpload operation, if you want to use a different type +// of encryption setting for the uploaded parts, you can request that Amazon +// S3 encrypts the object with a different encryption key (such as an Amazon +// S3 managed key, a KMS key, or a customer-provided key). When the encryption +// setting in your request is different from the default encryption configuration +// of the destination bucket, the encryption setting in your request takes +// precedence. If you choose to provide your own encryption key, the request +// headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// requests must match the headers you used in the CreateMultipartUpload +// request. Use KMS keys (SSE-KMS) that include the Amazon Web Services managed +// key (aws/s3) and KMS customer managed keys stored in Key Management Service +// (KMS) – If you want Amazon Web Services to manage the keys used to encrypt +// data, specify the following headers in the request. x-amz-server-side-encryption +// x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context +// If you specify x-amz-server-side-encryption:aws:kms, but don't provide +// x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon +// Web Services managed key (aws/s3 key) in KMS to protect the data. To perform +// a multipart upload with encryption by using an Amazon Web Services KMS +// key, the requester must have permission to the kms:Decrypt and kms:GenerateDataKey* +// actions on the key. These permissions are required because Amazon S3 must +// decrypt and read data from the encrypted file parts before it completes +// the multipart upload. For more information, see Multipart upload API and +// permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions) +// and Protecting data using server-side encryption with Amazon Web Services +// KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. If your Identity and Access Management (IAM) +// user or role is in the same Amazon Web Services account as the KMS key, +// then you must have these permissions on the key policy. If your IAM user +// or role is in a different account from the key, then you must have the +// permissions on both the key policy and your IAM user or role. All GET +// and PUT requests for an object protected by KMS fail if you don't make +// them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS), +// or Signature Version 4. For information about configuring any of the officially +// supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying +// the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) +// in the Amazon S3 User Guide. For more information about server-side encryption +// with KMS keys (SSE-KMS), see Protecting Data Using Server-Side Encryption +// with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html) +// in the Amazon S3 User Guide. Use customer-provided encryption keys (SSE-C) +// – If you want to manage your own encryption keys, provide all the following +// headers in the request. x-amz-server-side-encryption-customer-algorithm +// x-amz-server-side-encryption-customer-key x-amz-server-side-encryption-customer-key-MD5 +// For more information about server-side encryption with customer-provided +// encryption keys (SSE-C), see Protecting data using server-side encryption +// with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// - Directory buckets -For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to CreateMultipartUpload: +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSession = "CreateSession" + +// CreateSessionRequest generates a "aws/request.Request" representing the +// client's request for the CreateSession operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSession for more information on using the CreateSession +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateSessionRequest method. +// req, resp := client.CreateSessionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession +func (c *S3) CreateSessionRequest(input *CreateSessionInput) (req *request.Request, output *CreateSessionOutput) { + op := &request.Operation{ + Name: opCreateSession, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?session", + } + + if input == nil { + input = &CreateSessionInput{} + } + + output = &CreateSessionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSession API operation for Amazon Simple Storage Service. +// +// Creates a session that establishes temporary security credentials to support +// fast authentication and authorization for the Zonal endpoint APIs on directory +// buckets. For more information about Zonal endpoint APIs that include the +// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html) +// in the Amazon S3 User Guide. +// +// To make Zonal endpoint API requests on a directory bucket, use the CreateSession +// API operation. Specifically, you grant s3express:CreateSession permission +// to a bucket in a bucket policy or an IAM identity-based policy. Then, you +// use IAM credentials to make the CreateSession API request on the bucket, +// which returns temporary security credentials that include the access key +// ID, secret access key, session token, and expiration. These credentials have +// associated permissions to access the Zonal endpoint APIs. After the session +// is created, you don’t need to use other policies to grant permissions to +// each Zonal endpoint API individually. Instead, in your Zonal endpoint API +// requests, you sign your requests by applying the temporary security credentials +// of the session to the request headers and following the SigV4 protocol for +// authentication. You also apply the session token to the x-amz-s3session-token +// request header for authorization. Temporary security credentials are scoped +// to the bucket and expire after 5 minutes. After the expiration time, any +// calls that you make with those credentials will fail. You must use IAM credentials +// again to make a CreateSession API request that generates a new set of temporary +// credentials for use. Temporary credentials cannot be extended or refreshed +// beyond the original specified interval. +// +// If you use Amazon Web Services SDKs, SDKs handle the session token refreshes +// automatically to avoid service interruptions when a session expires. We recommend +// that you use the Amazon Web Services SDKs to initiate and manage requests +// to the CreateSession API. For more information, see Performance guidelines +// and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication) +// in the Amazon S3 User Guide. +// +// - You must make requests for this API operation to the Zonal endpoint. +// These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com. +// Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject +// API operation doesn't use the temporary security credentials returned +// from the CreateSession API operation for authentication and authorization. +// For information about authentication and authorization of the CopyObject +// API operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html). +// +// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket +// API operation doesn't use the temporary security credentials returned +// from the CreateSession API operation for authentication and authorization. +// For information about authentication and authorization of the HeadBucket +// API operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html). +// +// # Permissions +// +// To obtain temporary security credentials, you must create a bucket policy +// or an IAM identity-based policy that grants s3express:CreateSession permission +// to the bucket. In a policy, you can have the s3express:SessionMode condition +// key to control who can create a ReadWrite or ReadOnly session. For more information +// about ReadWrite or ReadOnly sessions, see x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters). +// For example policies, see Example bucket policies for S3 Express One Zone +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// To grant cross-account access to Zonal endpoint APIs, the bucket policy should +// also grant both accounts the s3express:CreateSession permission. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateSession for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateSession +func (c *S3) CreateSession(input *CreateSessionInput) (*CreateSessionOutput, error) { + req, out := c.CreateSessionRequest(input) + return out, req.Send() +} + +// CreateSessionWithContext is the same as CreateSession with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSession for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateSessionWithContext(ctx aws.Context, input *CreateSessionInput, opts ...request.Option) (*CreateSessionOutput, error) { + req, out := c.CreateSessionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucket API operation for Amazon Simple Storage Service. +// +// Deletes the S3 bucket. All objects (including all object versions and delete +// markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// - Directory buckets - If multipart uploads in a directory bucket are in +// progress, you can't delete the bucket until all the in-progress multipart +// uploads are aborted or completed. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, +// see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - You must have the s3:DeleteBucket +// permission on the specified bucket in a policy. +// +// - Directory bucket permissions - You must have the s3express:DeleteBucket +// permission in an IAM identity-based policy instead of a bucket policy. +// Cross-account access to this API operation isn't supported. This operation +// can only be performed by the Amazon Web Services account that owns the +// resource. For more information about directory bucket policies and permissions, +// see Amazon Web Services Identity and Access Management (IAM) for S3 Express +// One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following operations are related to DeleteBucket: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" + +// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. +// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketAnalyticsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &DeleteBucketAnalyticsConfigurationInput{} + } + + output = &DeleteBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to DeleteBucketAnalyticsConfiguration: +// +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + output = &DeleteBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes the cors configuration information set for the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketCORS +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// For information about cors, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) +// in the Amazon S3 User Guide. +// +// Related Resources +// +// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This implementation of the DELETE action resets the default encryption for +// the bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). +// For information about the bucket default encryption feature, see Amazon S3 +// Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. +// +// To use this operation, you must have permissions to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to DeleteBucketEncryption: +// +// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketIntelligentTieringConfiguration = "DeleteBucketIntelligentTieringConfiguration" + +// DeleteBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketIntelligentTieringConfiguration for more information on using the DeleteBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.DeleteBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfigurationRequest(input *DeleteBucketIntelligentTieringConfigurationInput) (req *request.Request, output *DeleteBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketIntelligentTieringConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &DeleteBucketIntelligentTieringConfigurationInput{} + } + + output = &DeleteBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to DeleteBucketIntelligentTieringConfiguration include: +// +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration +func (c *S3) DeleteBucketIntelligentTieringConfiguration(input *DeleteBucketIntelligentTieringConfigurationInput) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketIntelligentTieringConfigurationWithContext is the same as DeleteBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *DeleteBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*DeleteBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.DeleteBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" + +// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. +// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketInventoryConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &DeleteBucketInventoryConfigurationInput{} + } + + output = &DeleteBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// Operations related to DeleteBucketInventoryConfiguration include: +// +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes the lifecycle configuration from the specified bucket. Amazon S3 +// removes all the lifecycle configuration rules in the lifecycle subresource +// associated with the bucket. Your objects never expire, and Amazon S3 no longer +// automatically deletes any objects on the basis of rules contained in the +// deleted lifecycle configuration. +// +// To use this operation, you must have permission to perform the s3:PutLifecycleConfiguration +// action. By default, the bucket owner has this permission and the bucket owner +// can grant this permission to others. +// +// There is usually some time lag before lifecycle configuration deletion is +// fully propagated to all the Amazon S3 systems. +// +// For more information about the object expiration, see Elements to Describe +// Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions). +// +// Related actions include: +// +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" + +// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. +// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketMetricsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &DeleteBucketMetricsConfigurationInput{} + } + + output = &DeleteBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes a metrics configuration for the Amazon CloudWatch request metrics +// (specified by the metrics configuration ID) from the bucket. Note that this +// doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to DeleteBucketMetricsConfiguration: +// +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketOwnershipControls = "DeleteBucketOwnershipControls" + +// DeleteBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketOwnershipControls for more information on using the DeleteBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketOwnershipControlsRequest method. +// req, resp := client.DeleteBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControlsRequest(input *DeleteBucketOwnershipControlsInput) (req *request.Request, output *DeleteBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opDeleteBucketOwnershipControls, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &DeleteBucketOwnershipControlsInput{} + } + + output = &DeleteBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:PutBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html). +// +// The following operations are related to DeleteBucketOwnershipControls: +// +// - GetBucketOwnershipControls +// +// - PutBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls +func (c *S3) DeleteBucketOwnershipControls(input *DeleteBucketOwnershipControlsInput) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// DeleteBucketOwnershipControlsWithContext is the same as DeleteBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketOwnershipControlsWithContext(ctx aws.Context, input *DeleteBucketOwnershipControlsInput, opts ...request.Option) (*DeleteBucketOwnershipControlsOutput, error) { + req, out := c.DeleteBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + output = &DeleteBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketPolicy API operation for Amazon Simple Storage Service. +// +// Deletes the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the DeleteBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. +// +// If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 +// Access Denied error. If you have the correct permissions, but you're not +// using an identity that belongs to the bucket owner's account, Amazon S3 returns +// a 405 Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// +// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:DeleteBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following operations are related to DeleteBucketPolicy +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + output = &DeleteBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketReplication API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes the replication configuration from the bucket. +// +// To use this operation, you must have permissions to perform the s3:PutReplicationConfiguration +// action. The bucket owner has these permissions by default and can grant it +// to others. For more information about permissions, see Permissions Related +// to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// It can take a while for the deletion of a replication configuration to fully +// propagate. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to DeleteBucketReplication: +// +// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + output = &DeleteBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Deletes the tags from the bucket. +// +// To use this operation, you must have permission to perform the s3:PutBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// The following operations are related to DeleteBucketTagging: +// +// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + output = &DeleteBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This action removes the website configuration for a bucket. Amazon S3 returns +// a 200 OK response upon successfully deleting a website configuration on the +// specified bucket. You will get a 200 OK response if the website configuration +// you are trying to delete does not exist on the bucket. Amazon S3 returns +// a 404 response if the bucket specified in the request does not exist. +// +// This DELETE action requires the S3:DeleteBucketWebsite permission. By default, +// only the bucket owner can delete the website configuration attached to a +// bucket. However, bucket owners can grant other users permission to delete +// the website configuration by writing a bucket policy granting them the S3:DeleteBucketWebsite +// permission. +// +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// The following operations are related to DeleteBucketWebsite: +// +// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html) +// +// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + output = &DeleteObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes an object from a bucket. The behavior depends on the bucket's versioning +// state: +// +// - If bucket versioning is not enabled, the operation permanently deletes +// the object. +// +// - If bucket versioning is enabled, the operation inserts a delete marker, +// which becomes the current version of the object. To permanently delete +// an object in a versioned bucket, you must include the object’s versionId +// in the request. For more information about versioning-enabled buckets, +// see Deleting object versions from a versioning-enabled bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html). +// +// - If bucket versioning is suspended, the operation removes the object +// that has a null versionId, if there is one, and inserts a delete marker +// that becomes the current version of the object. If there isn't an object +// with a null versionId, and all versions of the object have a versionId, +// Amazon S3 does not remove the object and only inserts a delete marker. +// To permanently delete an object that has a versionId, you must include +// the object’s versionId in the request. For more information about versioning-suspended +// buckets, see Deleting objects from versioning-suspended buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectsfromVersioningSuspendedBuckets.html). +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID +// is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// To remove a specific version, you must use the versionId query parameter. +// Using this query parameter permanently deletes the version. If the object +// deleted is a delete marker, Amazon S3 sets the response header x-amz-delete-marker +// to true. +// +// If the object you want to delete is in a bucket where the bucket versioning +// configuration is MFA Delete enabled, you must include the x-amz-mfa request +// header in the DELETE versionId request. Requests that include x-amz-mfa must +// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html) +// in the Amazon S3 User Guide. To see sample requests that use versioning, +// see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete). +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// You can delete objects by explicitly calling DELETE Object or calling (PutBucketLifecycle +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)) +// to enable Amazon S3 to remove them for you. If you want to block users or +// accounts from removing or deleting objects from your bucket, you must deny +// them the s3:DeleteObject, s3:DeleteObjectVersion, and s3:PutLifeCycleConfiguration +// actions. +// +// Directory buckets - S3 Lifecycle is not supported by directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// s3:DeleteObject - To delete an object from a bucket, you must always have +// the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific +// version of an object from a versioning-enabled bucket, you must have the +// s3:DeleteObjectVersion permission. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following action is related to DeleteObject: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { + op := &request.Operation{ + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + output = &DeleteObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Removes the entire tag set from the specified object. For more information +// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// To use this operation, you must have permission to perform the s3:DeleteObjectTagging +// action. +// +// To delete tags of a specific object version, add the versionId query parameter +// in the request. You will need permission for the s3:DeleteObjectVersionTagging +// action. +// +// The following operations are related to DeleteObjectTagging: +// +// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + output = &DeleteObjectsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. If you know the object keys that you want to delete, +// then this operation provides a suitable alternative to sending individual +// delete requests, reducing per-request overhead. +// +// The request can contain a list of up to 1000 keys that you want to delete. +// In the XML, you provide the object key names, and optionally, version IDs +// if you want to delete a specific version of the object from a versioning-enabled +// bucket. For each key, Amazon S3 performs a delete operation and returns the +// result of that delete, success or failure, in the response. Note that if +// the object specified in the request is not found, Amazon S3 returns the result +// as deleted. +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// The operation supports two modes for the response: verbose and quiet. By +// default, the operation uses verbose mode in which the response includes the +// result of deletion of each key in your request. In quiet mode the response +// includes only keys where the delete operation encountered an error. For a +// successful deletion in a quiet mode, the operation does not return any information +// about the delete in the response body. +// +// When performing this action on an MFA Delete enabled bucket, that attempts +// to delete any versioned objects, you must include an MFA token. If you do +// not provide one, the entire request will fail, even if there are non-versioned +// objects you are trying to delete. If you provide an invalid token, whether +// there are versioned keys in the request or not, the entire Multi-Object Delete +// request will fail. For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) +// in the Amazon S3 User Guide. +// +// Directory buckets - MFA delete is not supported by directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your DeleteObjects request includes specific headers. +// s3:DeleteObject - To delete an object from a bucket, you must always specify +// the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific +// version of an object from a versiong-enabled bucket, you must specify +// the s3:DeleteObjectVersion permission. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Content-MD5 request header +// +// - General purpose bucket - The Content-MD5 request header is required +// for all Multi-Object Delete requests. Amazon S3 uses the header value +// to ensure that your request body has not been altered in transit. +// +// - Directory bucket - The Content-MD5 request header or a additional checksum +// request header (including x-amz-checksum-crc32, x-amz-checksum-crc32c, +// x-amz-checksum-sha1, or x-amz-checksum-sha256) is required for all Multi-Object +// Delete requests. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to DeleteObjects: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" + +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { + op := &request.Operation{ + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &DeletePublicAccessBlockInput{} + } + + output = &DeletePublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use +// this operation, you must have the s3:PutBucketPublicAccessBlock permission. +// For more information about permissions, see Permissions Related to Bucket +// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following operations are related to DeletePublicAccessBlock: +// +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + return out, req.Send() +} + +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + output = &GetBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This implementation of the GET action uses the accelerate subresource to +// return the Transfer Acceleration state of a bucket, which is either Enabled +// or Suspended. Amazon S3 Transfer Acceleration is a bucket-level feature that +// enables you to perform faster data transfers to and from Amazon S3. +// +// To use this operation, you must have permission to perform the s3:GetAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// You set the Transfer Acceleration state of an existing bucket to Enabled +// or Suspended by using the PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// operation. +// +// A GET accelerate request does not return a state value for a bucket that +// has no transfer acceleration state. A bucket has no Transfer Acceleration +// state if a state has never been set on the bucket. +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketAccelerateConfiguration: +// +// - PutBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + output = &GetBucketAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This implementation of the GET action uses the acl subresource to return +// the access control list (ACL) of a bucket. To use GET to return the ACL of +// the bucket, you must have the READ_ACP access to the bucket. If READ_ACP +// permission is granted to the anonymous user, you can return the ACL of the +// bucket without using an authorization header. +// +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the bucket-owner-full-control +// ACL with the owner being the account that created the bucket. For more information, +// see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketAcl: +// +// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" + +// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. +// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAnalyticsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &GetBucketAnalyticsConfigurationInput{} + } + + output = &GetBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This implementation of the GET action returns an analytics configuration +// (identified by the analytics configuration ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to GetBucketAnalyticsConfiguration: +// +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + output = &GetBucketCorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the Cross-Origin Resource Sharing (CORS) configuration information +// set for the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// +// For more information about CORS, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). +// +// The following operations are related to GetBucketCors: +// +// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html) +// +// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the default encryption configuration for an Amazon S3 bucket. By +// default, all buckets have a default encryption configuration that uses server-side +// encryption with Amazon S3 managed keys (SSE-S3). For information about the +// bucket default encryption feature, see Amazon S3 Bucket Default Encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon S3 User Guide. +// +// To use this operation, you must have permission to perform the s3:GetEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following operations are related to GetBucketEncryption: +// +// - PutBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html) +// +// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketIntelligentTieringConfiguration = "GetBucketIntelligentTieringConfiguration" + +// GetBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketIntelligentTieringConfiguration for more information on using the GetBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.GetBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfigurationRequest(input *GetBucketIntelligentTieringConfigurationInput) (req *request.Request, output *GetBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketIntelligentTieringConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &GetBucketIntelligentTieringConfigurationInput{} + } + + output = &GetBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Gets the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to GetBucketIntelligentTieringConfiguration include: +// +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration +func (c *S3) GetBucketIntelligentTieringConfiguration(input *GetBucketIntelligentTieringConfigurationInput) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketIntelligentTieringConfigurationWithContext is the same as GetBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *GetBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*GetBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.GetBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" + +// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketInventoryConfigurationRequest method. +// req, resp := client.GetBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketInventoryConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &GetBucketInventoryConfigurationInput{} + } + + output = &GetBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns an inventory configuration (identified by the inventory configuration +// ID) from the bucket. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html). +// +// The following operations are related to GetBucketInventoryConfiguration: +// +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + output = &GetBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycle API operation for Amazon Simple Storage Service. +// +// For an updated version of this API, see GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html). +// If you configured a bucket lifecycle using the filter element, you should +// see the updated version of this topic. This topic is provided for backward +// compatibility. +// +// This operation is not supported by directory buckets. +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// GetBucketLifecycle has the following special error: +// +// - Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycle: +// +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketLifecycleWithContext has been deprecated +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + output = &GetBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility. For the related API description, +// see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html). +// Accordingly, this section describes the latest API. The response describes +// the new filter element that you can use to specify a filter to select a subset +// of objects to which the rule applies. If you are using a previous version +// of the lifecycle configuration, it still works. For the earlier action, +// +// Returns the lifecycle configuration information set on the bucket. For information +// about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html). +// +// To use this operation, you must have permission to perform the s3:GetLifecycleConfiguration +// action. The bucket owner has this permission, by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// GetBucketLifecycleConfiguration has the following special error: +// +// - Error code: NoSuchLifecycleConfiguration Description: The lifecycle +// configuration does not exist. HTTP Status Code: 404 Not Found SOAP Fault +// Code Prefix: Client +// +// The following operations are related to GetBucketLifecycleConfiguration: +// +// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html) +// +// - PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html) +// +// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the Region the bucket resides in. You set the bucket's Region using +// the LocationConstraint request parameter in a CreateBucket request. For more +// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// +// We recommend that you use HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html) +// to return the Region that a bucket resides in. For backward compatibility, +// Amazon S3 continues to support GetBucketLocation. +// +// The following operations are related to GetBucketLocation: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. +// +// The following operations are related to GetBucketLogging: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" + +// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketMetricsConfigurationRequest method. +// req, resp := client.GetBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketMetricsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &GetBucketMetricsConfigurationInput{} + } + + output = &GetBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Gets a metrics configuration (specified by the metrics configuration ID) +// from the bucket. Note that this doesn't include the daily storage metrics. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to GetBucketMetricsConfiguration: +// +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// - Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfigurationDeprecated{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotification API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// No longer used, see GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketNotificationWithContext has been deprecated +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfiguration{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the notification configuration of a bucket. +// +// If notifications are not enabled on the bucket, the action returns an empty +// NotificationConfiguration element. +// +// By default, you must be the bucket owner to read the notification configuration +// of a bucket. However, the bucket owner can use a bucket policy to grant permission +// to other users to read this configuration with the s3:GetBucketNotification +// permission. +// +// When you use this API operation with an access point, provide the alias of +// the access point in place of the bucket name. +// +// When you use this API operation with an Object Lambda access point, provide +// the alias of the Object Lambda access point in place of the bucket name. +// If the Object Lambda access point alias in a request is not valid, the error +// code InvalidAccessPointAliasError is returned. For more information about +// InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). +// +// For more information about setting and reading the notification configuration +// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// The following action is related to GetBucketNotification: +// +// - PutBucketNotification (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketOwnershipControls = "GetBucketOwnershipControls" + +// GetBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketOwnershipControls for more information on using the GetBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketOwnershipControlsRequest method. +// req, resp := client.GetBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControlsRequest(input *GetBucketOwnershipControlsInput) (req *request.Request, output *GetBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opGetBucketOwnershipControls, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &GetBucketOwnershipControlsInput{} + } + + output = &GetBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, +// you must have the s3:GetBucketOwnershipControls permission. For more information +// about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using Object Ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html). +// +// The following operations are related to GetBucketOwnershipControls: +// +// - PutBucketOwnershipControls +// +// - DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls +func (c *S3) GetBucketOwnershipControls(input *GetBucketOwnershipControlsInput) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// GetBucketOwnershipControlsWithContext is the same as GetBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketOwnershipControlsWithContext(ctx aws.Context, input *GetBucketOwnershipControlsInput, opts ...request.Option) (*GetBucketOwnershipControlsOutput, error) { + req, out := c.GetBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + output = &GetBucketPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicy API operation for Amazon Simple Storage Service. +// +// Returns the policy of a specified bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the GetBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. +// +// If you don't have GetBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// +// - General purpose bucket permissions - The s3:GetBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:GetBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # Example bucket policies +// +// General purpose buckets example bucket policies - See Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See Example bucket policies for +// S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following action is related to GetBucketPolicy: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicyStatus = "GetBucketPolicyStatus" + +// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicyStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketPolicyStatusRequest method. +// req, resp := client.GetBucketPolicyStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { + op := &request.Operation{ + Name: opGetBucketPolicyStatus, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policyStatus", + } + + if input == nil { + input = &GetBucketPolicyStatusInput{} + } + + output = &GetBucketPolicyStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. In order to use this operation, you must have the s3:GetBucketPolicyStatus +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// For more information about when Amazon S3 considers a bucket public, see +// The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetBucketPolicyStatus: +// +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicyStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + return out, req.Send() +} + +// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicyStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + output = &GetBucketReplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketReplication API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the replication configuration of a bucket. +// +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete +// can return a wrong result. +// +// For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. +// +// This action requires permissions for the s3:GetReplicationConfiguration action. +// For more information about permissions, see Using Bucket Policies and User +// Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html). +// +// If you include the Filter element in a replication configuration, you must +// also include the DeleteMarkerReplication and Priority elements. The response +// also returns those elements. +// +// For information about GetBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// The following operations are related to GetBucketReplication: +// +// - PutBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html) +// +// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + output = &GetBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the request payment configuration of a bucket. To use this version +// of the operation, you must be the bucket owner. For more information, see +// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to GetBucketRequestPayment: +// +// - ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + output = &GetBucketTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the tag set associated with the bucket. +// +// To use this operation, you must have permission to perform the s3:GetBucketTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// GetBucketTagging has the following special error: +// +// - Error code: NoSuchTagSet Description: There is no tag set associated +// with the bucket. +// +// The following operations are related to GetBucketTagging: +// +// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html) +// +// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + output = &GetBucketVersioningOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketVersioning API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the versioning state of a bucket. +// +// To retrieve the versioning state of a bucket, you must be the bucket owner. +// +// This implementation also returns the MFA Delete status of the versioning +// state. If the MFA Delete status is enabled, the bucket owner must use an +// authentication device to change the versioning state of the bucket. +// +// The following operations are related to GetBucketVersioning: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + output = &GetBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the website configuration for a bucket. To host website on Amazon +// S3, you can configure a bucket as website by adding a website configuration. +// For more information about hosting websites, see Hosting Websites on Amazon +// S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This GET action requires the S3:GetBucketWebsite permission. By default, +// only the bucket owner can read the bucket website configuration. However, +// bucket owners can allow other users to read the website configuration by +// writing a bucket policy granting them the S3:GetBucketWebsite permission. +// +// The following operations are related to GetBucketWebsite: +// +// - DeleteBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html) +// +// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObject for more information on using the GetObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + output = &GetObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObject API operation for Amazon Simple Storage Service. +// +// Retrieves an object from Amazon S3. +// +// In the GetObject request, specify the full key name for the object. +// +// General purpose buckets - Both the virtual-hosted-style requests and the +// path-style requests are supported. For a virtual hosted-style request example, +// if you have the object photos/2006/February/sample.jpg, specify the object +// key name as /photos/2006/February/sample.jpg. For a path-style request example, +// if you have the object photos/2006/February/sample.jpg in the bucket named +// examplebucket, specify the object key name as /examplebucket/photos/2006/February/sample.jpg. +// For more information about request types, see HTTP Host Header Bucket Specification +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket) +// in the Amazon S3 User Guide. +// +// Directory buckets - Only virtual-hosted-style requests are supported. For +// a virtual hosted-style request example, if you have the object photos/2006/February/sample.jpg +// in the bucket named examplebucket--use1-az5--x-s3, specify the object key +// name as /photos/2006/February/sample.jpg. Also, when you make requests to +// this API operation, your requests are sent to the Zonal endpoint. These endpoints +// support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - You must have the required permissions +// in a policy. To use GetObject, you must have the READ access to the object +// (or version). If you grant READ access to the anonymous user, the GetObject +// operation returns the object without using an authorization header. For +// more information, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If you include a versionId in your request +// header, you must have the s3:GetObjectVersion permission to access a specific +// version of an object. The s3:GetObject permission is not required in this +// scenario. If you request the current version of an object without a specific +// versionId in the request header, only the s3:GetObject permission is required. +// The s3:GetObjectVersion permission is not required in this scenario. If +// the object that you request doesn’t exist, the error that Amazon S3 +// returns depends on whether you also have the s3:ListBucket permission. +// If you have the s3:ListBucket permission on the bucket, Amazon S3 returns +// an HTTP status code 404 Not Found error. If you don’t have the s3:ListBucket +// permission, Amazon S3 returns an HTTP status code 403 Access Denied error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Storage classes +// +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering +// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, +// before you can retrieve the object you must first restore a copy using RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectState error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, only the S3 Express One Zone storage +// class is supported to store newly created objects. Unsupported storage class +// values won't write a destination object and will respond with the HTTP status +// code 400 Bad Request. +// +// # Encryption +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for the GetObject requests, if your object uses server-side encryption +// with Amazon S3 managed encryption keys (SSE-S3), server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), or dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS). If you include the header in +// your GetObject requests for the object that uses these types of keys, you’ll +// get an HTTP 400 Bad Request error. +// +// # Overriding response header values through the request +// +// There are times when you want to override certain response header values +// of a GetObject response. For example, you might override the Content-Disposition +// response header value through your GetObject request. +// +// You can override values for a set of response headers. These modified response +// header values are included only in a successful response, that is, when the +// HTTP status code 200 OK is returned. The headers you can override using the +// following query parameters in the request are a subset of the headers that +// Amazon S3 accepts when you create an object. +// +// The response headers that you can override for the GetObject response are +// Cache-Control, Content-Disposition, Content-Encoding, Content-Language, Content-Type, +// and Expires. +// +// To override values for a set of response headers in the GetObject response, +// you can use the following query parameters in the request. +// +// - response-cache-control +// +// - response-content-disposition +// +// - response-content-encoding +// +// - response-content-language +// +// - response-content-type +// +// - response-expires +// +// When you use these parameters, you must sign the request by using either +// an Authorization header or a presigned URL. These parameters cannot be used +// with an unsigned (anonymous) request. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to GetObject: +// +// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// - ErrCodeInvalidObjectState "InvalidObjectState" +// Object is archived and inaccessible until restored. +// +// If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval +// storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering +// Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, +// before you can retrieve the object you must first restore a copy using RestoreObject +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// Otherwise, this operation returns an InvalidObjectState error. For information +// about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + output = &GetObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAcl API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the access control list (ACL) of an object. To use this operation, +// you must have s3:GetObjectAcl permissions or READ_ACP access to the object. +// For more information, see Mapping of ACL permissions and access policy permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping) +// in the Amazon S3 User Guide +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// By default, GET returns ACL information about the current version of an object. +// To return ACL information about a different version, use the versionId subresource. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// requests to read ACLs are still supported and return the bucket-owner-full-control +// ACL with the owner being the account that created the bucket. For more information, +// see Controlling object ownership and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to GetObjectAcl: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAttributes = "GetObjectAttributes" + +// GetObjectAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAttributes operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectAttributes for more information on using the GetObjectAttributes +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectAttributesRequest method. +// req, resp := client.GetObjectAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes +func (c *S3) GetObjectAttributesRequest(input *GetObjectAttributesInput) (req *request.Request, output *GetObjectAttributesOutput) { + op := &request.Operation{ + Name: opGetObjectAttributes, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?attributes", + } + + if input == nil { + input = &GetObjectAttributesInput{} + } + + output = &GetObjectAttributesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAttributes API operation for Amazon Simple Storage Service. +// +// Retrieves all the metadata from an object without returning the object itself. +// This operation is useful if you're interested only in an object's metadata. +// +// GetObjectAttributes combines the functionality of HeadObject and ListParts. +// All of the data returned with each of those individual calls can be returned +// with a single call to GetObjectAttributes. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To use GetObjectAttributes, you +// must have READ access to the object. The permissions that you need to +// use this operation with depend on whether the bucket is versioned. If +// the bucket is versioned, you need both the s3:GetObjectVersion and s3:GetObjectVersionAttributes +// permissions for this operation. If the bucket is not versioned, you need +// the s3:GetObject and s3:GetObjectAttributes permissions. For more information, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. If the object that you request does not exist, +// the error Amazon S3 returns depends on whether you also have the s3:ListBucket +// permission. If you have the s3:ListBucket permission on the bucket, Amazon +// S3 returns an HTTP status code 404 Not Found ("no such key") error. If +// you don't have the s3:ListBucket permission, Amazon S3 returns an HTTP +// status code 403 Forbidden ("access denied") error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Encryption +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for HEAD requests if your object uses server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption +// header is used when you PUT an object to S3 and want to specify the encryption +// method. If you include this header in a GET request for an object that uses +// these types of keys, you’ll get an HTTP 400 Bad Request error. It's because +// the encryption method can't be changed when you retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers +// to provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// Directory bucket permissions - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// # Versioning +// +// Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID is +// supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// # Conditional request headers +// +// Consider the following when using request headers: +// +// - If both of the If-Match and If-Unmodified-Since headers are present +// in the request as follows, then Amazon S3 returns the HTTP status code +// 200 OK and the data requested: If-Match condition evaluates to true. If-Unmodified-Since +// condition evaluates to false. For more information about conditional requests, +// see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// - If both of the If-None-Match and If-Modified-Since headers are present +// in the request as follows, then Amazon S3 returns the HTTP status code +// 304 Not Modified: If-None-Match condition evaluates to false. If-Modified-Since +// condition evaluates to true. For more information about conditional requests, +// see RFC 7232 (https://tools.ietf.org/html/rfc7232). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following actions are related to GetObjectAttributes: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// - GetObjectLegalHold (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html) +// +// - GetObjectLockConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html) +// +// - GetObjectRetention (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html) +// +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// - HeadObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAttributes for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes +func (c *S3) GetObjectAttributes(input *GetObjectAttributesInput) (*GetObjectAttributesOutput, error) { + req, out := c.GetObjectAttributesRequest(input) + return out, req.Send() +} + +// GetObjectAttributesWithContext is the same as GetObjectAttributes with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAttributes for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAttributesWithContext(ctx aws.Context, input *GetObjectAttributesInput, opts ...request.Option) (*GetObjectAttributesOutput, error) { + req, out := c.GetObjectAttributesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLegalHold = "GetObjectLegalHold" + +// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLegalHold for more information on using the GetObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectLegalHoldRequest method. +// req, resp := client.GetObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opGetObjectLegalHold, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &GetObjectLegalHoldInput{} + } + + output = &GetObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLegalHold API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Gets an object's current legal hold status. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectLegalHold: +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + return out, req.Send() +} + +// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectLockConfiguration = "GetObjectLockConfiguration" + +// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectLockConfigurationRequest method. +// req, resp := client.GetObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opGetObjectLockConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &GetObjectLockConfigurationInput{} + } + + output = &GetObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Gets the Object Lock configuration for a bucket. The rule specified in the +// Object Lock configuration will be applied by default to every new object +// placed in the specified bucket. For more information, see Locking Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// The following action is related to GetObjectLockConfiguration: +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectRetention = "GetObjectRetention" + +// GetObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectRetention for more information on using the GetObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectRetentionRequest method. +// req, resp := client.GetObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { + op := &request.Operation{ + Name: opGetObjectRetention, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &GetObjectRetentionInput{} + } + + output = &GetObjectRetentionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectRetention API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Retrieves an object's retention settings. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectRetention: +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + return out, req.Send() +} + +// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns the tag-set of an object. You send the GET request against the tagging +// subresource associated with the object. +// +// To use this operation, you must have permission to perform the s3:GetObjectTagging +// action. By default, the GET action returns information about current version +// of an object. For a versioned bucket, you can have multiple versions of an +// object in your bucket. To retrieve tags of any other version, use the versionId +// query parameter. You also need permission for the s3:GetObjectVersionTagging +// action. +// +// By default, the bucket owner has this permission and can grant this permission +// to others. +// +// For information about the Amazon S3 object tagging feature, see Object Tagging +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). +// +// The following actions are related to GetObjectTagging: +// +// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// - PutObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + output = &GetObjectTorrentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns torrent files from a bucket. BitTorrent can save you bandwidth when +// you're distributing large files. +// +// You can get torrent only for objects that are less than 5 GB in size, and +// that are not encrypted using server-side encryption with a customer-provided +// encryption key. +// +// To use GET, you must have READ access to the object. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// The following action is related to GetObjectTorrent: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTorrent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPublicAccessBlock = "GetPublicAccessBlock" + +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opGetPublicAccessBlock, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &GetPublicAccessBlockInput{} + } + + output = &GetPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To +// use this operation, you must have the s3:GetBucketPublicAccessBlock permission. +// For more information about Amazon S3 permissions, see Specifying Permissions +// in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock settings are different between the bucket and the +// account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to GetPublicAccessBlock: +// +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// - PutPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html) +// +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + return out, req.Send() +} + +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// You can use this operation to determine if a bucket exists and if you have +// permission to access it. The action returns a 200 OK if the bucket exists +// and you have permission to access it. +// +// If the bucket does not exist or you do not have permission to access it, +// the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 +// Not Found code. A message body is not included, so you cannot determine the +// exception beyond these HTTP response codes. +// +// Directory buckets - You must make requests for this API operation to the +// Zonal endpoint. These endpoints support virtual-hosted-style requests in +// the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style +// requests are not supported. For more information, see Regional and Zonal +// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Authentication and authorization +// +// All HeadBucket requests must be authenticated and signed by using IAM credentials +// (access key ID and secret access key for the IAM identities). All headers +// with the x-amz- prefix, including x-amz-copy-source, must be signed. For +// more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// +// Directory bucket - You must use IAM credentials to authenticate and authorize +// your access to the HeadBucket API operation, instead of using the temporary +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. +// +// Permissions +// +// - General purpose bucket permissions - To use this operation, you must +// have permissions to perform the s3:ListBucket action. The bucket owner +// has this permission by default and can grant this permission to others. +// For more information about permissions, see Managing access permissions +// to your Amazon S3 resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have the s3express:CreateSession +// permission in the Action element of a policy. By default, the session +// is in the ReadWrite mode. If you want to restrict the access, you can +// explicitly set the s3express:SessionMode condition key to ReadOnly on +// the bucket. For more information about example bucket policies, see Example +// bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're interested only in an object's +// metadata. +// +// A HEAD request has the same options as a GET operation on an object. The +// response is identical to the GET response except that there is no response +// body. Because of this, if the HEAD request generates an error, it returns +// a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 +// Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not +// possible to retrieve the exact exception of these error codes. +// +// Request headers are limited to 8 KB in size. For more information, see Common +// Request Headers (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html). +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To use HEAD, you must have the +// s3:GetObject permission. You need the relevant read object (or version) +// permission for this operation. For more information, see Actions, resources, +// and condition keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html) +// in the Amazon S3 User Guide. If the object you request doesn't exist, +// the error that Amazon S3 returns depends on whether you also have the +// s3:ListBucket permission. If you have the s3:ListBucket permission on +// the bucket, Amazon S3 returns an HTTP status code 404 Not Found error. +// If you don’t have the s3:ListBucket permission, Amazon S3 returns an +// HTTP status code 403 Forbidden error. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Encryption +// +// Encryption request headers, like x-amz-server-side-encryption, should not +// be sent for HEAD requests if your object uses server-side encryption with +// Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption +// with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with +// Amazon S3 managed encryption keys (SSE-S3). The x-amz-server-side-encryption +// header is used when you PUT an object to S3 and want to specify the encryption +// method. If you include this header in a HEAD request for an object that uses +// these types of keys, you’ll get an HTTP 400 Bad Request error. It's because +// the encryption method can't be changed when you retrieve the object. +// +// If you encrypt an object by using server-side encryption with customer-provided +// encryption keys (SSE-C) when you store the object in Amazon S3, then when +// you retrieve the metadata from the object, you must use the following headers +// to provide the encryption key for the server to be able to retrieve the object's +// metadata. The headers are: +// +// - x-amz-server-side-encryption-customer-algorithm +// +// - x-amz-server-side-encryption-customer-key +// +// - x-amz-server-side-encryption-customer-key-MD5 +// +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. +// +// Directory bucket permissions - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// Versioning +// +// - If the current version of the object is a delete marker, Amazon S3 behaves +// as if the object was deleted and includes x-amz-delete-marker: true in +// the response. +// +// - If the specified version is a delete marker, the response returns a +// 405 Method Not Allowed error and the Last-Modified: timestamp response +// header. +// +// - Directory buckets - Delete marker is not supported by directory buckets. +// +// - Directory buckets - S3 Versioning isn't enabled and supported for directory +// buckets. For this API operation, only the null value of the version ID +// is supported by directory buckets. You can only specify null to the versionId +// query parameter in the request. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following actions are related to HeadObject: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &ListBucketAnalyticsConfigurationsInput{} + } + + output = &ListBucketAnalyticsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Lists the analytics configurations for the bucket. You can have up to 1,000 +// analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 configurations +// at a time. You should always check the IsTruncated element in the response. +// If there are no more configurations to list, IsTruncated is set to false. +// If there are more configurations to list, IsTruncated is set to true, and +// there will be a value in NextContinuationToken. You use the NextContinuationToken +// value to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about Amazon S3 analytics feature, see Amazon S3 Analytics +// – Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// The following operations are related to ListBucketAnalyticsConfigurations: +// +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// - PutBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketIntelligentTieringConfigurations = "ListBucketIntelligentTieringConfigurations" + +// ListBucketIntelligentTieringConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketIntelligentTieringConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketIntelligentTieringConfigurations for more information on using the ListBucketIntelligentTieringConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBucketIntelligentTieringConfigurationsRequest method. +// req, resp := client.ListBucketIntelligentTieringConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurationsRequest(input *ListBucketIntelligentTieringConfigurationsInput) (req *request.Request, output *ListBucketIntelligentTieringConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketIntelligentTieringConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &ListBucketIntelligentTieringConfigurationsInput{} + } + + output = &ListBucketIntelligentTieringConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketIntelligentTieringConfigurations API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Lists the S3 Intelligent-Tiering configuration from the specified bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to ListBucketIntelligentTieringConfigurations include: +// +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// - PutBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html) +// +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketIntelligentTieringConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations +func (c *S3) ListBucketIntelligentTieringConfigurations(input *ListBucketIntelligentTieringConfigurationsInput) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketIntelligentTieringConfigurationsWithContext is the same as ListBucketIntelligentTieringConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketIntelligentTieringConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketIntelligentTieringConfigurationsWithContext(ctx aws.Context, input *ListBucketIntelligentTieringConfigurationsInput, opts ...request.Option) (*ListBucketIntelligentTieringConfigurationsOutput, error) { + req, out := c.ListBucketIntelligentTieringConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" + +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketInventoryConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &ListBucketInventoryConfigurationsInput{} + } + + output = &ListBucketInventoryConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns a list of inventory configurations for the bucket. You can have up +// to 1,000 analytics configurations per bucket. +// +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetInventoryConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about the Amazon S3 inventory feature, see Amazon S3 Inventory +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// +// The following operations are related to ListBucketInventoryConfigurations: +// +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// - PutBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketInventoryConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" + +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketMetricsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &ListBucketMetricsConfigurationsInput{} + } + + output = &ListBucketMetricsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Lists the metrics configurations for the bucket. The metrics configurations +// are only for the request metrics of the bucket and do not provide information +// on daily storage metrics. You can have up to 1,000 configurations per bucket. +// +// This action supports list pagination and does not return more than 100 configurations +// at a time. Always check the IsTruncated element in the response. If there +// are no more configurations to list, IsTruncated is set to false. If there +// are more configurations to list, IsTruncated is set to true, and there is +// a value in NextContinuationToken. You use the NextContinuationToken value +// to continue the pagination of the list by passing the value in continuation-token +// in the request to GET the next page. +// +// To use this operation, you must have permissions to perform the s3:GetMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For more information about metrics configurations and CloudWatch request +// metrics, see Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to ListBucketMetricsConfigurations: +// +// - PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html) +// +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketMetricsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuckets API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// To use this operation, you must have the s3:ListAllMyBuckets permission. +// +// For information about Amazon S3 buckets, see Creating, configuring, and working +// with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListDirectoryBuckets = "ListDirectoryBuckets" + +// ListDirectoryBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListDirectoryBuckets operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListDirectoryBuckets for more information on using the ListDirectoryBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListDirectoryBucketsRequest method. +// req, resp := client.ListDirectoryBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets +func (c *S3) ListDirectoryBucketsRequest(input *ListDirectoryBucketsInput) (req *request.Request, output *ListDirectoryBucketsOutput) { + op := &request.Operation{ + Name: opListDirectoryBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"ContinuationToken"}, + LimitToken: "MaxDirectoryBuckets", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDirectoryBucketsInput{} + } + + output = &ListDirectoryBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListDirectoryBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all Amazon S3 directory buckets owned by the authenticated +// sender of the request. For more information about directory buckets, see +// Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// You must have the s3express:ListAllMyDirectoryBuckets permission in an IAM +// identity-based policy instead of a bucket policy. Cross-account access to +// this API operation isn't supported. This operation can only be performed +// by the Amazon Web Services account that owns the resource. For more information +// about directory bucket policies and permissions, see Amazon Web Services +// Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListDirectoryBuckets for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListDirectoryBuckets +func (c *S3) ListDirectoryBuckets(input *ListDirectoryBucketsInput) (*ListDirectoryBucketsOutput, error) { + req, out := c.ListDirectoryBucketsRequest(input) + return out, req.Send() +} + +// ListDirectoryBucketsWithContext is the same as ListDirectoryBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListDirectoryBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListDirectoryBucketsWithContext(ctx aws.Context, input *ListDirectoryBucketsInput, opts ...request.Option) (*ListDirectoryBucketsOutput, error) { + req, out := c.ListDirectoryBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListDirectoryBucketsPages iterates over the pages of a ListDirectoryBuckets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDirectoryBuckets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDirectoryBuckets operation. +// pageNum := 0 +// err := client.ListDirectoryBucketsPages(params, +// func(page *s3.ListDirectoryBucketsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListDirectoryBucketsPages(input *ListDirectoryBucketsInput, fn func(*ListDirectoryBucketsOutput, bool) bool) error { + return c.ListDirectoryBucketsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListDirectoryBucketsPagesWithContext same as ListDirectoryBucketsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListDirectoryBucketsPagesWithContext(ctx aws.Context, input *ListDirectoryBucketsInput, fn func(*ListDirectoryBucketsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListDirectoryBucketsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListDirectoryBucketsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListDirectoryBucketsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + output = &ListMultipartUploadsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This operation lists in-progress multipart uploads in a bucket. An in-progress +// multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload +// request, but has not yet been completed or aborted. +// +// Directory buckets - If multipart uploads in a directory bucket are in progress, +// you can't delete the bucket until all the in-progress multipart uploads are +// aborted or completed. +// +// The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads +// in the response. The limit of 1,000 multipart uploads is also the default +// value. You can further limit the number of uploads in a response by specifying +// the max-uploads request parameter. If there are more than 1,000 multipart +// uploads that satisfy your ListMultipartUploads request, the response returns +// an IsTruncated element with the value of true, a NextKeyMarker element, and +// a NextUploadIdMarker element. To list the remaining multipart uploads, you +// need to make subsequent ListMultipartUploads requests. In these requests, +// include two query parameters: key-marker and upload-id-marker. Set the value +// of key-marker to the NextKeyMarker value from the previous response. Similarly, +// set the value of upload-id-marker to the NextUploadIdMarker value from the +// previous response. +// +// Directory buckets - The upload-id-marker element and the NextUploadIdMarker +// element aren't supported by directory buckets. To list the additional multipart +// uploads, you only need to set the value of key-marker to the NextKeyMarker +// value from the previous response. +// +// For more information about multipart uploads, see Uploading Objects Using +// Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Sorting of multipart uploads in response +// +// - General purpose bucket - In the ListMultipartUploads response, the multipart +// uploads are sorted based on two criteria: Key-based sorting - Multipart +// uploads are initially sorted in ascending order based on their object +// keys. Time-based sorting - For uploads that share the same object key, +// they are further sorted in ascending order based on the upload initiation +// time. Among uploads with the same key, the one that was initiated first +// will appear before the ones that were initiated later. +// +// - Directory bucket - In the ListMultipartUploads response, the multipart +// uploads aren't sorted lexicographically based on the object keys. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to ListMultipartUploads: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + output = &ListObjectVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns metadata about all versions of the objects in a bucket. You can also +// use request parameters as selection criteria to return metadata about a subset +// of all the object versions. +// +// To use this operation, you must have permission to perform the s3:ListBucketVersions +// action. Be aware of the name difference. +// +// A 200 OK response can contain valid or invalid XML. Make sure to design your +// application to parse the contents of the response and handle it appropriately. +// +// To use this operation, you must have READ access to the bucket. +// +// The following operations are related to ListObjectVersions: +// +// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjects for more information on using the ListObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + output = &ListObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjects API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Returns some or all (up to 1,000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. A 200 OK response can contain valid or invalid XML. Be sure +// to design your application to parse the contents of the response and handle +// it appropriately. +// +// This action has been revised. We recommend that you use the newer version, +// ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html), +// when developing applications. For backward compatibility, Amazon S3 continues +// to support ListObjects. +// +// The following operations are related to ListObjects: +// +// - ListObjectsV2 (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html) +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - ListBuckets (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *s3.ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + output = &ListObjectsV2Output{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1,000) of the objects in a bucket with each request. +// You can use the request parameters as selection criteria to return a subset +// of the objects in a bucket. A 200 OK response can contain valid or invalid +// XML. Make sure to design your application to parse the contents of the response +// and handle it appropriately. For more information about listing objects, +// see Listing object keys programmatically (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html) +// in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html). +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - To use this operation, you must +// have READ access to the bucket. You must have permission to perform the +// s3:ListBucket action. The bucket owner has this permission by default +// and can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Sorting order of returned objects +// +// - General purpose bucket - For general purpose buckets, ListObjectsV2 +// returns objects in lexicographical order based on their key names. +// +// - Directory bucket - For directory buckets, ListObjectsV2 does not return +// objects in lexicographical order. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// This section describes the latest revision of this action. We recommend that +// you use this revised API operation for application development. For backward +// compatibility, Amazon S3 continues to support the prior version of this API +// operation, ListObjects (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html). +// +// The following operations are related to ListObjectsV2: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + output = &ListPartsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListParts API operation for Amazon Simple Storage Service. +// +// Lists the parts that have been uploaded for a specific multipart upload. +// +// To use this operation, you must provide the upload ID in the request. You +// obtain this uploadID by sending the initiate multipart upload request through +// CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// +// The ListParts request returns a maximum of 1,000 uploaded parts. The limit +// of 1,000 parts is also the default value. You can restrict the number of +// parts in a response by specifying the max-parts request parameter. If your +// multipart upload consists of more than 1,000 parts, the response returns +// an IsTruncated field with the value of true, and a NextPartNumberMarker element. +// To list remaining uploaded parts, in subsequent ListParts requests, include +// the part-number-marker query string parameter and set its value to the NextPartNumberMarker +// field value from the previous response. +// +// For more information on multipart uploads, see Uploading Objects Using Multipart +// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. If the upload was created using server-side +// encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer +// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you +// must have permission to the kms:Decrypt action for the ListParts request +// to succeed. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to ListParts: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// - GetObjectAttributes (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *s3.ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + output = &PutBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer +// Acceleration is a bucket-level feature that enables you to perform faster +// data transfers to Amazon S3. +// +// To use this operation, you must have permission to perform the s3:PutAccelerateConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The Transfer Acceleration state of a bucket can be set to one of the following +// two values: +// +// - Enabled – Enables accelerated data transfers to the bucket. +// +// - Suspended – Disables accelerated data transfers to the bucket. +// +// The GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// action returns the transfer acceleration state of a bucket. +// +// After setting the Transfer Acceleration state of a bucket to Enabled, it +// might take up to thirty minutes before the data transfer rates to the bucket +// increase. +// +// The name of the bucket used for Transfer Acceleration must be DNS-compliant +// and must not contain periods ("."). +// +// For more information about transfer acceleration, see Transfer Acceleration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html). +// +// The following operations are related to PutBucketAccelerateConfiguration: +// +// - GetBucketAccelerateConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html) +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAccelerateConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the permissions on an existing bucket using access control lists (ACL). +// For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). +// To set the ACL of a bucket, you must have the WRITE_ACP permission. +// +// You can use one of the following two ways to set a bucket's permissions: +// +// - Specify the ACL in the request body +// +// - Specify permissions using request headers +// +// You cannot specify access permission using both the body and the request +// headers. +// +// Depending on your application needs, you may choose to set the ACL on a bucket +// using either the request body or the headers. For example, if you have an +// existing application that updates a bucket ACL using the request body, then +// you can continue to use that approach. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies +// to grant access to your bucket and the objects in it. Requests to set ACLs +// or update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see Controlling +// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// You can set access permissions by using one of the following methods: +// +// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// - Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (Amazon +// Web Services accounts or Amazon S3 groups) who will receive the permission. +// If you use these ACL-specific headers, you cannot use the x-amz-acl header +// to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an Amazon Web Services account uri – if you are granting permissions +// to a predefined group emailAddress – if the value specified is the email +// address of an Amazon Web Services account Using email addresses to specify +// a grantee is only supported in the following Amazon Web Services Regions: +// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-write header grants create, overwrite, and delete objects +// permission to LogDelivery group predefined by Amazon S3 and two Amazon +// Web Services accounts identified by their email addresses. x-amz-grant-write: +// uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333", +// id="555566667777" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// # Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// - By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request +// +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: <>Grantees@email.com<>& +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following Amazon Web Services +// Regions: US East (N. Virginia) US West (N. California) US West (Oregon) +// Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe +// (Ireland) South America (São Paulo) For a list of all the Amazon S3 supported +// Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. +// +// The following operations are related to PutBucketAcl: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// - GetObjectAcl (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" + +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAnalyticsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &PutBucketAnalyticsConfigurationInput{} + } + + output = &PutBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). You can have up to 1,000 analytics configurations per +// bucket. +// +// You can choose to have storage class analysis export analysis reports sent +// to a comma-separated values (CSV) flat file. See the DataExport request element. +// Reports are updated daily and are based on the object filters that you configure. +// When selecting data export, you specify a destination bucket and an optional +// destination prefix where the file is written. You can export the data to +// a destination bucket in a different account. However, the destination bucket +// must be in the same Region as the bucket that you are making the PUT analytics +// configuration to. For more information, see Amazon S3 Analytics – Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html). +// +// You must create a bucket policy on the destination bucket where the exported +// file is written to grant permissions to Amazon S3 to write objects to the +// bucket. For an example policy, see Granting Permissions for Amazon S3 Inventory +// and Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// PutBucketAnalyticsConfiguration has the following special errors: +// +// - HTTP Error: HTTP 400 Bad Request Code: InvalidArgument Cause: Invalid +// argument. +// +// - HTTP Error: HTTP 400 Bad Request Code: TooManyConfigurations Cause: +// You are attempting to create a new configuration but have already reached +// the 1,000-configuration limit. +// +// - HTTP Error: HTTP 403 Forbidden Code: AccessDenied Cause: You are not +// the owner of the specified bucket, or you do not have the s3:PutAnalyticsConfiguration +// bucket permission to set the configuration on the bucket. +// +// The following operations are related to PutBucketAnalyticsConfiguration: +// +// - GetBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html) +// +// - DeleteBucketAnalyticsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html) +// +// - ListBucketAnalyticsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the cors configuration for your bucket. If the configuration exists, +// Amazon S3 replaces it. +// +// To use this operation, you must be allowed to perform the s3:PutBucketCORS +// action. By default, the bucket owner has this permission and can grant it +// to others. +// +// You set this configuration on a bucket so that the bucket can service cross-origin +// requests. For example, you might want to enable a request whose origin is +// http://www.example.com to access your Amazon S3 bucket at my.example.bucket.com +// by using the browser's XMLHttpRequest capability. +// +// To enable cross-origin resource sharing (CORS) on a bucket, you add the cors +// subresource to the bucket. The cors subresource is an XML document in which +// you configure rules that identify origins and the HTTP methods that can be +// executed on your bucket. The document is limited to 64 KB in size. +// +// When Amazon S3 receives a cross-origin request (or a pre-flight OPTIONS request) +// against a bucket, it evaluates the cors configuration on the bucket and uses +// the first CORSRule rule that matches the incoming browser request to enable +// a cross-origin request. For a rule to match, the following conditions must +// be met: +// +// - The request's Origin header must match AllowedOrigin elements. +// +// - The request method (for example, GET, PUT, HEAD, and so on) or the Access-Control-Request-Method +// header in case of a pre-flight OPTIONS request must be one of the AllowedMethod +// elements. +// +// - Every header specified in the Access-Control-Request-Headers request +// header of a pre-flight request must match an AllowedHeader element. +// +// For more information about CORS, go to Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// S3 User Guide. +// +// The following operations are related to PutBucketCors: +// +// - GetBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html) +// +// - DeleteBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html) +// +// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This action uses the encryption subresource to configure default encryption +// and Amazon S3 Bucket Keys for an existing bucket. +// +// By default, all buckets have a default encryption configuration that uses +// server-side encryption with Amazon S3 managed keys (SSE-S3). You can optionally +// configure default encryption for a bucket by using server-side encryption +// with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side +// encryption with Amazon Web Services KMS keys (DSSE-KMS). If you specify default +// encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html). If you +// use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 +// does not validate the KMS key ID provided in PutBucketEncryption requests. +// +// This action requires Amazon Web Services Signature Version 4. For more information, +// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). +// +// To use this operation, you must have permission to perform the s3:PutEncryptionConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// The following operations are related to PutBucketEncryption: +// +// - GetBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html) +// +// - DeleteBucketEncryption (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketIntelligentTieringConfiguration = "PutBucketIntelligentTieringConfiguration" + +// PutBucketIntelligentTieringConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketIntelligentTieringConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketIntelligentTieringConfiguration for more information on using the PutBucketIntelligentTieringConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketIntelligentTieringConfigurationRequest method. +// req, resp := client.PutBucketIntelligentTieringConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfigurationRequest(input *PutBucketIntelligentTieringConfigurationInput) (req *request.Request, output *PutBucketIntelligentTieringConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketIntelligentTieringConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?intelligent-tiering", + } + + if input == nil { + input = &PutBucketIntelligentTieringConfigurationInput{} + } + + output = &PutBucketIntelligentTieringConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketIntelligentTieringConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Puts a S3 Intelligent-Tiering configuration to the specified bucket. You +// can have up to 1,000 S3 Intelligent-Tiering configurations per bucket. +// +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without performance impact or operational overhead. S3 Intelligent-Tiering +// delivers automatic cost savings in three low latency and high throughput +// access tiers. To get the lowest storage cost on data that can be accessed +// in minutes to hours, you can choose to activate additional archiving capabilities. +// +// The S3 Intelligent-Tiering storage class is the ideal storage class for data +// with unknown, changing, or unpredictable access patterns, independent of +// object size or retention period. If the size of an object is less than 128 +// KB, it is not monitored and not eligible for auto-tiering. Smaller objects +// can be stored, but they are always charged at the Frequent Access tier rates +// in the S3 Intelligent-Tiering storage class. +// +// For more information, see Storage class for automatically optimizing frequently +// and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +// +// Operations related to PutBucketIntelligentTieringConfiguration include: +// +// - DeleteBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html) +// +// - GetBucketIntelligentTieringConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html) +// +// - ListBucketIntelligentTieringConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html) +// +// You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically +// move objects stored in the S3 Intelligent-Tiering storage class to the Archive +// Access or Deep Archive Access tier. +// +// PutBucketIntelligentTieringConfiguration has the following special errors: +// +// # HTTP 400 Bad Request Error +// +// Code: InvalidArgument +// +// Cause: Invalid Argument +// +// # HTTP 400 Bad Request Error +// +// Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// # HTTP 403 Forbidden Error +// +// Cause: You are not the owner of the specified bucket, or you do not have +// the s3:PutIntelligentTieringConfiguration bucket permission to set the configuration +// on the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketIntelligentTieringConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration +func (c *S3) PutBucketIntelligentTieringConfiguration(input *PutBucketIntelligentTieringConfigurationInput) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketIntelligentTieringConfigurationWithContext is the same as PutBucketIntelligentTieringConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketIntelligentTieringConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketIntelligentTieringConfigurationWithContext(ctx aws.Context, input *PutBucketIntelligentTieringConfigurationInput, opts ...request.Option) (*PutBucketIntelligentTieringConfigurationOutput, error) { + req, out := c.PutBucketIntelligentTieringConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" + +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This implementation of the PUT action adds an inventory configuration (identified +// by the inventory ID) to the bucket. You can have up to 1,000 inventory configurations +// per bucket. +// +// Amazon S3 inventory generates inventories of the objects in the bucket on +// a daily or weekly basis, and the results are published to a flat file. The +// bucket that is inventoried is called the source bucket, and the bucket where +// the inventory flat file is stored is called the destination bucket. The destination +// bucket must be in the same Amazon Web Services Region as the source bucket. +// +// When you configure an inventory for a source bucket, you specify the destination +// bucket where you want the inventory to be stored, and whether to generate +// the inventory daily or weekly. You can also configure what object metadata +// to include and whether to inventory all object versions or only current versions. +// For more information, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html) +// in the Amazon S3 User Guide. +// +// You must create a bucket policy on the destination bucket to grant permissions +// to Amazon S3 to write objects to the bucket in the defined location. For +// an example policy, see Granting Permissions for Amazon S3 Inventory and Storage +// Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9). +// +// # Permissions +// +// To use this operation, you must have permission to perform the s3:PutInventoryConfiguration +// action. The bucket owner has this permission by default and can grant this +// permission to others. +// +// The s3:PutInventoryConfiguration permission allows a user to create an S3 +// Inventory (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) +// report that includes all object metadata fields available and to specify +// the destination bucket to store the inventory. A user with read access to +// objects in the destination bucket can also access all object metadata fields +// that are available in the inventory report. +// +// To restrict access to an inventory report, see Restricting access to an Amazon +// S3 Inventory report (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html#example-bucket-policies-use-case-10) +// in the Amazon S3 User Guide. For more information about the metadata fields +// available in S3 Inventory, see Amazon S3 Inventory lists (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html#storage-inventory-contents) +// in the Amazon S3 User Guide. For more information about permissions, see +// Permissions related to bucket subresource operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Identity and access management in Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// PutBucketInventoryConfiguration has the following special errors: +// +// # HTTP 400 Bad Request Error +// +// Code: InvalidArgument +// +// Cause: Invalid Argument +// +// # HTTP 400 Bad Request Error +// +// Code: TooManyConfigurations +// +// Cause: You are attempting to create a new configuration but have already +// reached the 1,000-configuration limit. +// +// # HTTP 403 Forbidden Error +// +// Cause: You are not the owner of the specified bucket, or you do not have +// the s3:PutInventoryConfiguration bucket permission to set the configuration +// on the bucket. +// +// The following operations are related to PutBucketInventoryConfiguration: +// +// - GetBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html) +// +// - DeleteBucketInventoryConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html) +// +// - ListBucketInventoryConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + output = &PutBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// For an updated version of this API, see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html). +// This version has been deprecated. Existing lifecycle configurations will +// work. For new lifecycle configurations, use the updated API. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. For information about lifecycle configuration, see +// Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +// +// By default, all Amazon S3 resources, including buckets, objects, and related +// subresources (for example, lifecycle configuration and website configuration) +// are private. Only the resource owner, the Amazon Web Services account that +// created the resource, can access it. The resource owner can optionally grant +// access permissions to others by writing an access policy. For this operation, +// users must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. Explicit denial also supersedes +// any other permissions. If you want to prevent users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// - s3:DeleteObject +// +// - s3:DeleteObjectVersion +// +// - s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// For more examples of transitioning objects to storage classes such as STANDARD_IA +// or ONEZONE_IA, see Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#lifecycle-configuration-examples). +// +// The following operations are related to PutBucketLifecycle: +// +// - GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)(Deprecated) +// +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// - RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// +// - By default, a resource owner—in this case, a bucket owner, which is +// the Amazon Web Services account that created the bucket—can perform +// any of the operations. A resource owner can also grant others permission +// to perform the operation. For more information, see the following topics +// in the Amazon S3 User Guide: Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycle for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketLifecycleWithContext has been deprecated +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Creates a new lifecycle configuration for the bucket or replaces an existing +// lifecycle configuration. Keep in mind that this will overwrite an existing +// lifecycle configuration, so if you want to retain any configuration details, +// they must be included in the new lifecycle configuration. For information +// about lifecycle configuration, see Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html). +// +// Bucket lifecycle configuration now supports specifying a lifecycle rule using +// an object key name prefix, one or more object tags, object size, or any combination +// of these. Accordingly, this section describes the latest API. The previous +// version of the API supported filtering based only on an object key name prefix, +// which is supported for backward compatibility. For the related API description, +// see PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html). +// +// # Rules +// +// You specify the lifecycle configuration in your request body. The lifecycle +// configuration is specified as XML consisting of one or more rules. An Amazon +// S3 Lifecycle configuration can have up to 1,000 rules. This limit is not +// adjustable. Each rule consists of the following: +// +// - A filter identifying a subset of objects to which the rule applies. +// The filter can be based on a key name prefix, object tags, object size, +// or any combination of these. +// +// - A status indicating whether the rule is in effect. +// +// - One or more lifecycle transition and expiration actions that you want +// Amazon S3 to perform on the objects identified by the filter. If the state +// of your bucket is versioning-enabled or versioning-suspended, you can +// have many versions of the same object (one current version and zero or +// more noncurrent versions). Amazon S3 provides predefined actions that +// you can specify for current and noncurrent object versions. +// +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// and Lifecycle Configuration Elements (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html). +// +// # Permissions +// +// By default, all Amazon S3 resources are private, including buckets, objects, +// and related subresources (for example, lifecycle configuration and website +// configuration). Only the resource owner (that is, the Amazon Web Services +// account that created it) can access the resource. The resource owner can +// optionally grant access permissions to others by writing an access policy. +// For this operation, a user must get the s3:PutLifecycleConfiguration permission. +// +// You can also explicitly deny permissions. An explicit deny also supersedes +// any other permissions. If you want to block users or accounts from removing +// or deleting objects from your bucket, you must deny them permissions for +// the following actions: +// +// - s3:DeleteObject +// +// - s3:DeleteObjectVersion +// +// - s3:PutLifecycleConfiguration +// +// For more information about permissions, see Managing Access Permissions to +// Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// The following operations are related to PutBucketLifecycleConfiguration: +// +// - Examples of Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html) +// +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// - DeleteBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. All logs are saved to buckets +// in the same Amazon Web Services Region as the source bucket. To set the logging +// status of a bucket, you must be the bucket owner. +// +// The bucket owner is automatically granted FULL_CONTROL to all logs. You use +// the Grantee request element to grant access to other people. The Permissions +// request element specifies the kind of access the grantee has to the logs. +// +// If the target bucket for log delivery uses the bucket owner enforced setting +// for S3 Object Ownership, you can't use the Grantee request element to grant +// access to others. Permissions can only be granted using policies. For more +// information, see Permissions for server access log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. +// +// # Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (by using request elements) in the following ways: +// +// - By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// - By Email address: <>Grantees@email.com<> +// The grantee is resolved to the CanonicalUser and, in a response to a GETObjectAcl +// request, appears as the CanonicalUser. +// +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// To enable logging, you use LoggingEnabled and its children request elements. +// To disable logging, you use an empty BucketLoggingStatus request element: +// +// +// +// For more information about server access logging, see Server Access Logging +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html) in +// the Amazon S3 User Guide. +// +// For more information about creating a bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html). +// For more information about returning the logging status of a bucket, see +// GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html). +// +// The following operations are related to PutBucketLogging: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - GetBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" + +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketMetricsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &PutBucketMetricsConfigurationInput{} + } + + output = &PutBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. You can have up to 1,000 metrics configurations per bucket. +// If you're updating an existing metrics configuration, note that this is a +// full replacement of the existing metrics configuration. If you don't include +// the elements you want to keep, they are erased. +// +// To use this operation, you must have permissions to perform the s3:PutMetricsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. For more information about permissions, +// see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// For information about CloudWatch request metrics for Amazon S3, see Monitoring +// Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html). +// +// The following operations are related to PutBucketMetricsConfiguration: +// +// - DeleteBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html) +// +// - GetBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html) +// +// - ListBucketMetricsConfigurations (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html) +// +// PutBucketMetricsConfiguration has the following special error: +// +// - Error code: TooManyConfigurations Description: You are attempting to +// create a new configuration but have already reached the 1,000-configuration +// limit. HTTP Status Code: HTTP 400 Bad Request +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketMetricsConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + output = &PutBucketNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketNotification API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// No longer used, see the PutBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotificationConfiguration.html) +// operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotification for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketNotificationWithContext has been deprecated +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + output = &PutBucketNotificationConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Enables notifications of specified events for a bucket. For more information +// about event notifications, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// Using this API, you can replace an existing notification configuration. The +// configuration is an XML file that defines the event types that you want Amazon +// S3 to publish and the destination where you want Amazon S3 to publish an +// event notification when it detects an event of the specified type. +// +// By default, your bucket has no event notifications configured. That is, the +// notification configuration will be an empty NotificationConfiguration. +// +// +// +// +// +// This action replaces the existing notification configuration with the configuration +// you include in the request body. +// +// After Amazon S3 receives this request, it first verifies that any Amazon +// Simple Notification Service (Amazon SNS) or Amazon Simple Queue Service (Amazon +// SQS) destination exists, and that the bucket owner has permission to publish +// to it by sending a test notification. In the case of Lambda destinations, +// Amazon S3 verifies that the Lambda function permissions grant Amazon S3 permission +// to invoke the function from the Amazon S3 bucket. For more information, see +// Configuring Notifications for Amazon S3 Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). +// +// You can disable notifications by adding the empty NotificationConfiguration +// element. +// +// For more information about the number of event notification configurations +// that you can create per bucket, see Amazon S3 service quotas (https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3) +// in Amazon Web Services General Reference. +// +// By default, only the bucket owner can configure notifications on a bucket. +// However, bucket owners can use a bucket policy to grant permission to other +// users to set this configuration with the required s3:PutBucketNotification +// permission. +// +// The PUT notification is an atomic operation. For example, suppose your notification +// configuration includes SNS topic, SQS queue, and Lambda function configurations. +// When you send a PUT request with this configuration, Amazon S3 sends test +// messages to your SNS topic. If the message fails, the entire PUT action will +// fail, and Amazon S3 will not add the configuration to your bucket. +// +// If the configuration in the request body includes only one TopicConfiguration +// specifying only the s3:ReducedRedundancyLostObject event type, the response +// will also include the x-amz-sns-test-message-id header containing the message +// ID of the test notification sent to the topic. +// +// The following action is related to PutBucketNotificationConfiguration: +// +// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotificationConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketOwnershipControls = "PutBucketOwnershipControls" + +// PutBucketOwnershipControlsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketOwnershipControls operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketOwnershipControls for more information on using the PutBucketOwnershipControls +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketOwnershipControlsRequest method. +// req, resp := client.PutBucketOwnershipControlsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControlsInput) (req *request.Request, output *PutBucketOwnershipControlsOutput) { + op := &request.Operation{ + Name: opPutBucketOwnershipControls, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?ownershipControls", + } + + if input == nil { + input = &PutBucketOwnershipControlsInput{} + } + + output = &PutBucketOwnershipControlsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketOwnershipControls API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this +// operation, you must have the s3:PutBucketOwnershipControls permission. For +// more information about Amazon S3 permissions, see Specifying permissions +// in a policy (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html). +// +// For information about Amazon S3 Object Ownership, see Using object ownership +// (https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html). +// +// The following operations are related to PutBucketOwnershipControls: +// +// - GetBucketOwnershipControls +// +// - DeleteBucketOwnershipControls +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketOwnershipControls for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls +func (c *S3) PutBucketOwnershipControls(input *PutBucketOwnershipControlsInput) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + return out, req.Send() +} + +// PutBucketOwnershipControlsWithContext is the same as PutBucketOwnershipControls with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketOwnershipControls for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketOwnershipControlsWithContext(ctx aws.Context, input *PutBucketOwnershipControlsInput, opts ...request.Option) (*PutBucketOwnershipControlsOutput, error) { + req, out := c.PutBucketOwnershipControlsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Regional endpoint. These endpoints support path-style +// requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name +// . Virtual-hosted-style requests aren't supported. For more information, see +// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// If you are using an identity other than the root user of the Amazon Web Services +// account that owns the bucket, the calling identity must both have the PutBucketPolicy +// permissions on the specified bucket and belong to the bucket owner's account +// in order to use this operation. +// +// If you don't have PutBucketPolicy permissions, Amazon S3 returns a 403 Access +// Denied error. If you have the correct permissions, but you're not using an +// identity that belongs to the bucket owner's account, Amazon S3 returns a +// 405 Method Not Allowed error. +// +// To ensure that bucket owners don't inadvertently lock themselves out of their +// own buckets, the root principal in a bucket owner's Amazon Web Services account +// can perform the GetBucketPolicy, PutBucketPolicy, and DeleteBucketPolicy +// API actions, even if their bucket policy explicitly denies the root principal's +// access. Bucket owner root principals can only be blocked from performing +// these API actions by VPC endpoint policies and Amazon Web Services Organizations +// policies. +// +// - General purpose bucket permissions - The s3:PutBucketPolicy permission +// is required in a policy. For more information about general purpose buckets +// bucket policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation, +// you must have the s3express:PutBucketPolicy permission in an IAM identity-based +// policy instead of a bucket policy. Cross-account access to this API operation +// isn't supported. This operation can only be performed by the Amazon Web +// Services account that owns the resource. For more information about directory +// bucket policies and permissions, see Amazon Web Services Identity and +// Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html) +// in the Amazon S3 User Guide. +// +// # Example bucket policies +// +// General purpose buckets example bucket policies - See Bucket policy examples +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// Directory bucket example bucket policies - See Example bucket policies for +// S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// in the Amazon S3 User Guide. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com. +// +// The following operations are related to PutBucketPolicy: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketPolicy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + output = &PutBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketReplication API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Creates a replication configuration or replaces an existing one. For more +// information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) +// in the Amazon S3 User Guide. +// +// Specify the replication configuration in the request body. In the replication +// configuration, you provide the name of the destination bucket or buckets +// where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 +// can assume to replicate objects on your behalf, and other relevant information. +// You can invoke this request for a specific Amazon Web Services Region by +// using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) +// condition key. +// +// A replication configuration must include at least one rule, and can contain +// a maximum of 1,000. Each rule identifies a subset of objects to replicate +// by filtering the objects in the source bucket. To choose additional subsets +// of objects to replicate, add a rule for each subset. +// +// To specify a subset of the objects in the source bucket to apply a replication +// rule to, add the Filter element as a child of the Rule element. You can filter +// objects based on an object key prefix, one or more object tags, or both. +// When you add the Filter element in the configuration, you must also add the +// following elements: DeleteMarkerReplication, Status, and Priority. +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +// +// For information about enabling versioning on a bucket, see Using Versioning +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). +// +// # Handling Replication of Encrypted Objects +// +// By default, Amazon S3 doesn't replicate objects that are stored at rest using +// server-side encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted +// objects, add the following: SourceSelectionCriteria, SseKmsEncryptedObjects, +// Status, EncryptionConfiguration, and ReplicaKmsKeyID. For information about +// replication configuration, see Replicating Objects Created with SSE Using +// KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html). +// +// For information on PutBucketReplication errors, see List of replication-related +// error codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList) +// +// # Permissions +// +// To create a PutBucketReplication request, you must have s3:PutReplicationConfiguration +// permissions for the bucket. +// +// By default, a resource owner, in this case the Amazon Web Services account +// that created the bucket, can perform this operation. The resource owner can +// also grant others permissions to perform the operation. For more information +// about permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// To perform this operation, the user or role performing the action must have +// the iam:PassRole (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html) +// permission. +// +// The following operations are related to PutBucketReplication: +// +// - GetBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html) +// +// - DeleteBucketReplication (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketReplication for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + output = &PutBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. For more information, see Requester Pays +// Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html). +// +// The following operations are related to PutBucketRequestPayment: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - GetBucketRequestPayment (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketRequestPayment for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the tags for a bucket. +// +// Use tags to organize your Amazon Web Services bill to reflect your own cost +// structure. To do this, sign up to get your Amazon Web Services account bill +// with tag key values included. Then, to see the cost of combined resources, +// organize your billing information according to resources with the same tag +// key values. For example, you can tag several resources with a specific application +// name, and then organize your billing information to see the total cost of +// that application across several services. For more information, see Cost +// Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html). +// +// When this operation sets the tags for a bucket, it will overwrite any current +// tags the bucket already has. You cannot use this operation to add tags to +// an existing list of tags. +// +// To use this operation, you must have permissions to perform the s3:PutBucketTagging +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html). +// +// PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). +// +// - InvalidTag - The tag provided was not a valid tag. This error can occur +// if the tag did not pass input validation. For more information, see Using +// Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html). +// +// - MalformedXML - The XML provided does not match the schema. +// +// - OperationAborted - A conflicting conditional action is currently in +// progress against this resource. Please try again. +// +// - InternalError - The service was unable to apply the provided tag to +// the bucket. +// +// The following operations are related to PutBucketTagging: +// +// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) +// +// - DeleteBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the versioning state of an existing bucket. +// +// You can set the versioning state with one of the following values: +// +// Enabled—Enables versioning for the objects in the bucket. All objects added +// to the bucket receive a unique version ID. +// +// Suspended—Disables versioning for the objects in the bucket. All objects +// added to the bucket receive the version ID null. +// +// If the versioning state has never been set on a bucket, it has no versioning +// state; a GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// request does not return a versioning state value. +// +// In order to enable MFA Delete, you must be the bucket owner. If you are the +// bucket owner and want to enable MFA Delete in the bucket versioning configuration, +// you must include the x-amz-mfa request header and the Status and the MfaDelete +// request elements in a request to set the versioning state of the bucket. +// +// If you have an object expiration lifecycle configuration in your non-versioned +// bucket and you want to maintain the same permanent delete behavior when you +// enable versioning, you must add a noncurrent expiration policy. The noncurrent +// expiration lifecycle configuration will manage the deletes of the noncurrent +// object versions in the version-enabled bucket. (A version-enabled bucket +// maintains one current and zero or more noncurrent object versions.) For more +// information, see Lifecycle and Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config). +// +// The following operations are related to PutBucketVersioning: +// +// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html) +// +// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html) +// +// - GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the configuration of the website that is specified in the website subresource. +// To configure a bucket as a website, you can add this subresource on the bucket +// with website configuration information such as the file name of the index +// document and any redirect rules. For more information, see Hosting Websites +// on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html). +// +// This PUT action requires the S3:PutBucketWebsite permission. By default, +// only the bucket owner can configure the website attached to a bucket; however, +// bucket owners can allow other users to set the website configuration by writing +// a bucket policy that grants them the S3:PutBucketWebsite permission. +// +// To redirect all website requests sent to the bucket's website endpoint, you +// add a website configuration with the following elements. Because all requests +// are sent to another website, you don't need to provide index document name +// for the bucket. +// +// - WebsiteConfiguration +// +// - RedirectAllRequestsTo +// +// - HostName +// +// - Protocol +// +// If you want granular control over redirects, you can use the following elements +// to add routing rules that describe conditions for redirecting requests and +// information about the redirect destination. In this case, the website configuration +// must provide an index document for the bucket, because some requests might +// not be redirected. +// +// - WebsiteConfiguration +// +// - IndexDocument +// +// - Suffix +// +// - ErrorDocument +// +// - Key +// +// - RoutingRules +// +// - RoutingRule +// +// - Condition +// +// - HttpErrorCodeReturnedEquals +// +// - KeyPrefixEquals +// +// - Redirect +// +// - Protocol +// +// - HostName +// +// - ReplaceKeyPrefixWith +// +// - ReplaceKeyWith +// +// - HttpRedirectCode +// +// Amazon S3 has a limitation of 50 routing rules per website configuration. +// If you require more than 50 routing rules, you can use object redirect. For +// more information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) +// in the Amazon S3 User Guide. +// +// The maximum request length is limited to 128 KB. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. +// +// - Amazon S3 never adds partial objects; if you receive a success response, +// Amazon S3 added the entire object to the bucket. You cannot use PutObject +// to only update a single piece of metadata for an existing object. You +// must put the entire object with updated metadata if you want to update +// some values. +// +// - If your bucket uses the bucket owner enforced setting for Object Ownership, +// ACLs are disabled and no longer affect permissions. All objects written +// to the bucket by any account will be owned by the bucket owner. +// +// - Directory buckets - For directory buckets, you must make requests for +// this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Amazon S3 is a distributed system. If it receives multiple write requests +// for the same object simultaneously, it overwrites all but the last object +// written. However, Amazon S3 provides features that can modify this behavior: +// +// - S3 Object Lock - To prevent objects from being deleted or overwritten, +// you can use Amazon S3 Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html) +// in the Amazon S3 User Guide. This functionality is not supported for directory +// buckets. +// +// - S3 Versioning - When you enable versioning for a bucket, if Amazon S3 +// receives multiple write requests for the same object simultaneously, it +// stores all versions of the objects. For each write request that is made +// to the same object, Amazon S3 automatically generates a unique version +// ID of that object being stored in Amazon S3. You can retrieve, replace, +// or delete any version of the object. For more information about versioning, +// see Adding Objects to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) +// in the Amazon S3 User Guide. For information about returning the versioning +// state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). +// This functionality is not supported for directory buckets. +// +// Permissions +// +// - General purpose bucket permissions - The following permissions are required +// in your policies when your PutObject request includes specific headers. +// s3:PutObject - To successfully complete the PutObject request, you must +// always have the s3:PutObject permission on a bucket to add an object to +// it. s3:PutObjectAcl - To successfully change the objects ACL of your PutObject +// request, you must have the s3:PutObjectAcl. s3:PutObjectTagging - To successfully +// set the tag-set with your PutObject request, you must have the s3:PutObjectTagging. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// Data integrity with Content-MD5 +// +// - General purpose bucket - To ensure that data is not corrupted traversing +// the network, use the Content-MD5 header. When you use this header, Amazon +// S3 checks the object against the provided MD5 value and, if they do not +// match, Amazon S3 returns an error. Alternatively, when the object's ETag +// is its MD5 digest, you can calculate the MD5 while putting the object +// to Amazon S3 and compare the returned ETag to the calculated MD5 value. +// +// - Directory bucket - This functionality is not supported for directory +// buckets. +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// For more information about related Amazon S3 APIs, see the following: +// +// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Uses the acl subresource to set the access control list (ACL) permissions +// for a new or existing object in an S3 bucket. You must have the WRITE_ACP +// permission to set the ACL of an object. For more information, see What permissions +// can I grant? (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// Depending on your application needs, you can choose to set the ACL on an +// object using either the request body or the headers. For example, if you +// have an existing application that updates a bucket ACL using the request +// body, you can continue to use that approach. For more information, see Access +// Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) +// in the Amazon S3 User Guide. +// +// If your bucket uses the bucket owner enforced setting for S3 Object Ownership, +// ACLs are disabled and no longer affect permissions. You must use policies +// to grant access to your bucket and the objects in it. Requests to set ACLs +// or update ACLs fail and return the AccessControlListNotSupported error code. +// Requests to read ACLs are still supported. For more information, see Controlling +// object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// You can set access permissions using one of the following methods: +// +// - Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports +// a set of predefined ACLs, known as canned ACLs. Each canned ACL has a +// predefined set of grantees and permissions. Specify the canned ACL name +// as the value of x-amz-acl. If you use this header, you cannot use other +// access control-specific headers in your request. For more information, +// see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). +// +// - Specify access permissions explicitly with the x-amz-grant-read, x-amz-grant-read-acp, +// x-amz-grant-write-acp, and x-amz-grant-full-control headers. When using +// these headers, you specify explicit access permissions and grantees (Amazon +// Web Services accounts or Amazon S3 groups) who will receive the permission. +// If you use these ACL-specific headers, you cannot use x-amz-acl header +// to set a canned ACL. These parameters map to the set of permissions that +// Amazon S3 supports in an ACL. For more information, see Access Control +// List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). +// You specify each grantee as a type=value pair, where the type is one of +// the following: id – if the value specified is the canonical user ID +// of an Amazon Web Services account uri – if you are granting permissions +// to a predefined group emailAddress – if the value specified is the email +// address of an Amazon Web Services account Using email addresses to specify +// a grantee is only supported in the following Amazon Web Services Regions: +// US East (N. Virginia) US West (N. California) US West (Oregon) Asia Pacific +// (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) +// South America (São Paulo) For a list of all the Amazon S3 supported Regions +// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. For example, the following +// x-amz-grant-read header grants list objects permission to the two Amazon +// Web Services accounts identified by their email addresses. x-amz-grant-read: +// emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" +// +// You can use either a canned ACL or specify access permissions explicitly. +// You cannot do both. +// +// # Grantee Values +// +// You can specify the person (grantee) to whom you're assigning access rights +// (using request elements) in the following ways: +// +// - By the person's ID: <>ID<><>GranteesEmail<> +// DisplayName is optional and ignored in the request. +// +// - By URI: <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<> +// +// - By Email address: <>Grantees@email.com<>lt;/Grantee> +// The grantee is resolved to the CanonicalUser and, in a response to a GET +// Object acl request, appears as the CanonicalUser. Using email addresses +// to specify a grantee is only supported in the following Amazon Web Services +// Regions: US East (N. Virginia) US West (N. California) US West (Oregon) +// Asia Pacific (Singapore) Asia Pacific (Sydney) Asia Pacific (Tokyo) Europe +// (Ireland) South America (São Paulo) For a list of all the Amazon S3 supported +// Regions and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) +// in the Amazon Web Services General Reference. +// +// # Versioning +// +// The ACL of an object is set at the object version level. By default, PUT +// sets the ACL of the current version of an object. To set the ACL of a different +// version, use the versionId subresource. +// +// The following operations are related to PutObjectAcl: +// +// - CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// - ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLegalHold = "PutObjectLegalHold" + +// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLegalHold for more information on using the PutObjectLegalHold +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectLegalHoldRequest method. +// req, resp := client.PutObjectLegalHoldRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { + op := &request.Operation{ + Name: opPutObjectLegalHold, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", + } + + if input == nil { + input = &PutObjectLegalHoldInput{} + } + + output = &PutObjectLegalHoldOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectLegalHold API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Applies a legal hold configuration to the specified object. For more information, +// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + return out, req.Send() +} + +// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectLockConfiguration = "PutObjectLockConfiguration" + +// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectLockConfigurationRequest method. +// req, resp := client.PutObjectLockConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { + op := &request.Operation{ + Name: opPutObjectLockConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?object-lock", + } + + if input == nil { + input = &PutObjectLockConfigurationInput{} + } + + output = &PutObjectLockConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Places an Object Lock configuration on the specified bucket. The rule specified +// in the Object Lock configuration will be applied by default to every new +// object placed in the specified bucket. For more information, see Locking +// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// +// - The DefaultRetention settings require both a mode and a period. +// +// - The DefaultRetention period can be either Days or Years but you must +// select one. You cannot specify Days and Years at the same time. +// +// - You can enable Object Lock for new or existing buckets. For more information, +// see Configuring Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-configure.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectRetention = "PutObjectRetention" + +// PutObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectRetention for more information on using the PutObjectRetention +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectRetentionRequest method. +// req, resp := client.PutObjectRetentionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { + op := &request.Operation{ + Name: opPutObjectRetention, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?retention", + } + + if input == nil { + input = &PutObjectRetentionInput{} + } + + output = &PutObjectRetentionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectRetention API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Places an Object Retention configuration on an object. For more information, +// see Locking Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). +// Users or accounts require the s3:PutObjectRetention permission in order to +// place an Object Retention configuration on objects. Bypassing a Governance +// Retention configuration requires the s3:BypassGovernanceRetention permission. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + return out, req.Send() +} + +// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Sets the supplied tag-set to an object that already exists in a bucket. A +// tag is a key-value pair. For more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html). +// +// You can associate tags with an object by sending a PUT request against the +// tagging subresource that is associated with the object. You can retrieve +// tags by sending a GET request. For more information, see GetObjectTagging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html). +// +// For tagging-related restrictions related to characters and encodings, see +// Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html). +// Note that Amazon S3 limits the maximum number of tags to 10 tags per object. +// +// To use this operation, you must have permission to perform the s3:PutObjectTagging +// action. By default, the bucket owner has this permission and can grant this +// permission to others. +// +// To put tags of any other version, use the versionId query parameter. You +// also need permission for the s3:PutObjectVersionTagging action. +// +// PutObjectTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). +// +// - InvalidTag - The tag provided was not a valid tag. This error can occur +// if the tag did not pass input validation. For more information, see Object +// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html). +// +// - MalformedXML - The XML provided does not match the schema. +// +// - OperationAborted - A conflicting conditional action is currently in +// progress against this resource. Please try again. +// +// - InternalError - The service was unable to apply the provided tag to +// the object. +// +// The following operations are related to PutObjectTagging: +// +// - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// +// - DeleteObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) + return +} + +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock +// permission. For more information about Amazon S3 permissions, see Specifying +// Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html). +// +// When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket +// or an object, it checks the PublicAccessBlock configuration for both the +// bucket (or the bucket that contains the object) and the bucket owner's account. +// If the PublicAccessBlock configurations are different between the bucket +// and the account, Amazon S3 uses the most restrictive combination of the bucket-level +// and account-level settings. +// +// For more information about when Amazon S3 considers a bucket or an object +// public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status). +// +// The following operations are related to PutPublicAccessBlock: +// +// - GetPublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html) +// +// - DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html) +// +// - GetBucketPolicyStatus (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html) +// +// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// # Restores an archived copy of an object back into Amazon S3 +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// This action performs the following types of requests: +// +// - restore an archive - Restore an archived object +// +// For more information about the S3 structure in the request body, see the +// following: +// +// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) +// +// - Managing Access with ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) +// in the Amazon S3 User Guide +// +// - Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide +// +// # Permissions +// +// To use this operation, you must have permissions to perform the s3:RestoreObject +// action. The bucket owner has this permission by default and can grant this +// permission to others. For more information about permissions, see Permissions +// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) +// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) +// in the Amazon S3 User Guide. +// +// # Restoring objects +// +// Objects that you archive to the S3 Glacier Flexible Retrieval Flexible Retrieval +// or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive +// or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real +// time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval +// or S3 Glacier Deep Archive storage classes, you must first initiate a restore +// request, and then wait until a temporary copy of the object is available. +// If you want a permanent copy of the object, create a copy of it in the Amazon +// S3 Standard storage class in your S3 bucket. To access an archived object, +// you must restore the object for the duration (number of days) that you specify. +// For objects in the Archive Access or Deep Archive Access tiers of S3 Intelligent-Tiering, +// you must first initiate a restore request, and then wait until the object +// is moved into the Frequent Access tier. +// +// To restore a specific object version, you can provide a version ID. If you +// don't provide a version ID, Amazon S3 restores the current version. +// +// When restoring an archived object, you can specify one of the following data +// access tier options in the Tier element of the request body: +// +// - Expedited - Expedited retrievals allow you to quickly access your data +// stored in the S3 Glacier Flexible Retrieval Flexible Retrieval storage +// class or S3 Intelligent-Tiering Archive tier when occasional urgent requests +// for restoring archives are required. For all but the largest archived +// objects (250 MB+), data accessed using Expedited retrievals is typically +// made available within 1–5 minutes. Provisioned capacity ensures that +// retrieval capacity for Expedited retrievals is available when you need +// it. Expedited retrievals and provisioned capacity are not available for +// objects stored in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. +// +// - Standard - Standard retrievals allow you to access any of your archived +// objects within several hours. This is the default option for retrieval +// requests that do not specify the retrieval option. Standard retrievals +// typically finish within 3–5 hours for objects stored in the S3 Glacier +// Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering +// Archive tier. They typically finish within 12 hours for objects stored +// in the S3 Glacier Deep Archive storage class or S3 Intelligent-Tiering +// Deep Archive tier. Standard retrievals are free for objects stored in +// S3 Intelligent-Tiering. +// +// - Bulk - Bulk retrievals free for objects stored in the S3 Glacier Flexible +// Retrieval and S3 Intelligent-Tiering storage classes, enabling you to +// retrieve large amounts, even petabytes, of data at no cost. Bulk retrievals +// typically finish within 5–12 hours for objects stored in the S3 Glacier +// Flexible Retrieval Flexible Retrieval storage class or S3 Intelligent-Tiering +// Archive tier. Bulk retrievals are also the lowest-cost retrieval option +// when restoring objects from S3 Glacier Deep Archive. They typically finish +// within 48 hours for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. +// +// For more information about archive retrieval options and provisioned capacity +// for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) +// in the Amazon S3 User Guide. +// +// You can use Amazon S3 restore speed upgrade to change the restore speed to +// a faster speed while it is in progress. For more information, see Upgrading +// the speed of an in-progress restore (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html) +// in the Amazon S3 User Guide. +// +// To get the status of object restoration, you can send a HEAD request. Operations +// return the x-amz-restore header, which provides information about the restoration +// status, in the response. You can use Amazon S3 event notifications to notify +// you when a restore is initiated or completed. For more information, see Configuring +// Amazon S3 Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon S3 User Guide. +// +// After restoring an archived object, you can update the restoration period +// by reissuing the request with a new period. Amazon S3 updates the restoration +// period relative to the current time and charges only for the request-there +// are no data transfer charges. You cannot update the restoration period when +// Amazon S3 is actively processing your current restore request for the object. +// +// If your bucket has a lifecycle configuration with a rule that includes an +// expiration action, the object expiration overrides the life span that you +// specify in a restore request. For example, if you restore an object copy +// for 10 days, but the object is scheduled to expire in 3 days, Amazon S3 deletes +// the object in 3 days. For more information about lifecycle configuration, +// see PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// and Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in Amazon S3 User Guide. +// +// # Responses +// +// A successful action returns either the 200 OK or 202 Accepted status code. +// +// - If the object is not previously restored, then Amazon S3 returns 202 +// Accepted in the response. +// +// - If the object is previously restored, Amazon S3 returns 200 OK in the +// response. +// +// - Special errors: Code: RestoreAlreadyInProgress Cause: Object restore +// is already in progress. HTTP Status Code: 409 Conflict SOAP Fault Code +// Prefix: Client +// +// - Code: GlacierExpeditedRetrievalNotAvailable Cause: expedited retrievals +// are currently not available. Try again later. (Returned if there is insufficient +// capacity to process the Expedited request. This error applies only to +// Expedited retrievals and not to S3 Standard or Bulk retrievals.) HTTP +// Status Code: 503 SOAP Fault Code Prefix: N/A +// +// The following operations are related to RestoreObject: +// +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// - GetBucketNotificationConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// - ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This action is not allowed against this storage tier. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + + es := NewSelectObjectContentEventStream() + req.Handlers.Unmarshal.PushBack(es.setStreamCloser) + output.EventStream = es + + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(es.runOutputStream) + req.Handlers.Unmarshal.PushBack(es.runOnStreamPartClose) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// This action filters the contents of an Amazon S3 object based on a simple +// structured query language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON, +// CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse +// object data into records, and returns only records that match the specified +// SQL expression. You must also specify the data serialization format for the +// response. +// +// This functionality is not supported for Amazon S3 on Outposts. +// +// For more information about Amazon S3 Select, see Selecting Content from Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html) +// and SELECT Command (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html) +// in the Amazon S3 User Guide. +// +// # Permissions +// +// You must have the s3:GetObject permission for this operation. Amazon S3 Select +// does not support anonymous access. For more information about permissions, +// see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) +// in the Amazon S3 User Guide. +// +// # Object Data Formats +// +// You can use Amazon S3 Select to query objects that have the following format +// properties: +// +// - CSV, JSON, and Parquet - Objects must be in CSV, JSON, or Parquet format. +// +// - UTF-8 - UTF-8 is the only encoding type Amazon S3 Select supports. +// +// - GZIP or BZIP2 - CSV and JSON files can be compressed using GZIP or BZIP2. +// GZIP and BZIP2 are the only compression formats that Amazon S3 Select +// supports for CSV and JSON files. Amazon S3 Select supports columnar compression +// for Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object +// compression for Parquet objects. +// +// - Server-side encryption - Amazon S3 Select supports querying objects +// that are protected with server-side encryption. For objects that are encrypted +// with customer-provided encryption keys (SSE-C), you must use HTTPS, and +// you must use the headers that are documented in the GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided +// Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) +// in the Amazon S3 User Guide. For objects that are encrypted with Amazon +// S3 managed keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side +// encryption is handled transparently, so you don't need to specify anything. +// For more information about server-side encryption, including SSE-S3 and +// SSE-KMS, see Protecting Data Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) +// in the Amazon S3 User Guide. +// +// # Working with the Response Body +// +// Given the response size is unknown, Amazon S3 Select streams the response +// as a series of messages and includes a Transfer-Encoding header with chunked +// as its value in the response. For more information, see Appendix: SelectObjectContent +// Response (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html). +// +// # GetObject Support +// +// The SelectObjectContent action does not support the following GetObject functionality. +// For more information, see GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html). +// +// - Range: Although you can specify a scan range for an Amazon S3 Select +// request (see SelectObjectContentRequest - ScanRange (https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange) +// in the request parameters), you cannot specify the range of bytes of an +// object to return. +// +// - The GLACIER, DEEP_ARCHIVE, and REDUCED_REDUNDANCY storage classes, or +// the ARCHIVE_ACCESS and DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING +// storage class: You cannot query objects in the GLACIER, DEEP_ARCHIVE, +// or REDUCED_REDUNDANCY storage classes, nor objects in the ARCHIVE_ACCESS +// or DEEP_ARCHIVE_ACCESS access tiers of the INTELLIGENT_TIERING storage +// class. For more information about storage classes, see Using Amazon S3 +// storage classes (https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html) +// in the Amazon S3 User Guide. +// +// # Special Errors +// +// For a list of special errors for this operation, see List of SELECT Object +// Content Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList) +// +// The following operations are related to SelectObjectContent: +// +// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) +// +// - GetBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html) +// +// - PutBucketLifecycleConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +var _ awserr.Error +var _ time.Time + +// SelectObjectContentEventStream provides the event stream handling for the SelectObjectContent. +// +// For testing and mocking the event stream this type should be initialized via +// the NewSelectObjectContentEventStream constructor function. Using the functional options +// to pass in nested mock behavior. +type SelectObjectContentEventStream struct { + + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + outputReader io.ReadCloser + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer + + done chan struct{} + closeOnce sync.Once + err *eventstreamapi.OnceError +} + +// NewSelectObjectContentEventStream initializes an SelectObjectContentEventStream. +// This function should only be used for testing and mocking the SelectObjectContentEventStream +// stream within your application. +// +// The Reader member must be set before reading events from the stream. +// +// The StreamCloser member should be set to the underlying io.Closer, +// (e.g. http.Response.Body), that will be closed when the stream Close method +// is called. +// +// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream){ +// es.Reader = myMockStreamReader +// es.StreamCloser = myMockStreamCloser +// }) +func NewSelectObjectContentEventStream(opts ...func(*SelectObjectContentEventStream)) *SelectObjectContentEventStream { + es := &SelectObjectContentEventStream{ + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + + for _, fn := range opts { + fn(es) + } + + return es +} + +func (es *SelectObjectContentEventStream) setStreamCloser(r *request.Request) { + es.StreamCloser = r.HTTPResponse.Body +} + +func (es *SelectObjectContentEventStream) runOnStreamPartClose(r *request.Request) { + if es.done == nil { + return + } + go es.waitStreamPartClose() + +} + +func (es *SelectObjectContentEventStream) waitStreamPartClose() { + var outputErrCh <-chan struct{} + if v, ok := es.Reader.(interface{ ErrorSet() <-chan struct{} }); ok { + outputErrCh = v.ErrorSet() + } + var outputClosedCh <-chan struct{} + if v, ok := es.Reader.(interface{ Closed() <-chan struct{} }); ok { + outputClosedCh = v.Closed() + } + + select { + case <-es.done: + case <-outputErrCh: + es.err.SetError(es.Reader.Err()) + es.Close() + case <-outputClosedCh: + if err := es.Reader.Err(); err != nil { + es.err.SetError(es.Reader.Err()) + } + es.Close() + } +} + +// Events returns a channel to read events from. +// +// These events are: +// +// - ContinuationEvent +// - EndEvent +// - ProgressEvent +// - RecordsEvent +// - StatsEvent +// - SelectObjectContentEventStreamUnknownEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +func (es *SelectObjectContentEventStream) runOutputStream(r *request.Request) { + var opts []func(*eventstream.Decoder) + if r.Config.Logger != nil && r.Config.LogLevel.Matches(aws.LogDebugWithEventStreamBody) { + opts = append(opts, eventstream.DecodeWithLogger(r.Config.Logger)) + } + + unmarshalerForEvent := unmarshalerForSelectObjectContentEventStreamEvent{ + metadata: protocol.ResponseMetadata{ + StatusCode: r.HTTPResponse.StatusCode, + RequestID: r.RequestID, + }, + }.UnmarshalerForEventName + + decoder := eventstream.NewDecoder(r.HTTPResponse.Body, opts...) + eventReader := eventstreamapi.NewEventReader(decoder, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: r.Handlers.UnmarshalStream, + }, + unmarshalerForEvent, + ) + + es.outputReader = r.HTTPResponse.Body + es.Reader = newReadSelectObjectContentEventStream(eventReader) +} + +// Close closes the stream. This will also cause the stream to be closed. +// Close must be called when done using the stream API. Not calling Close +// may result in resource leaks. +// +// You can use the closing of the Reader's Events channel to terminate your +// application's read from the API's stream. +func (es *SelectObjectContentEventStream) Close() (err error) { + es.closeOnce.Do(es.safeClose) + return es.Err() +} + +func (es *SelectObjectContentEventStream) safeClose() { + if es.done != nil { + close(es.done) + } + + es.Reader.Close() + if es.outputReader != nil { + es.outputReader.Close() + } + + es.StreamCloser.Close() +} + +// Err returns any error that occurred while reading or writing EventStream +// Events from the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.err.Err(); err != nil { + return err + } + if err := es.Reader.Err(); err != nil { + return err + } + + return nil +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// In this operation, you provide new data as a part of an object in your request. +// However, you have an option to specify your existing Amazon S3 object as +// a data source for the part you are uploading. To upload a part from an existing +// object, you use the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html) +// operation. +// +// You must initiate a multipart upload (see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)) +// before you can upload any part. In response to your initiate request, Amazon +// S3 returns an upload ID, a unique identifier that you must include in your +// upload part request. +// +// Part numbers can be any number from 1 to 10,000, inclusive. A part number +// uniquely identifies a part and also defines its position within the object +// being created. If you upload a new part using the same part number that was +// used with a previous part, the previously uploaded part is overwritten. +// +// For information about maximum and minimum part sizes and other multipart +// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. +// +// After you initiate multipart upload and upload one or more parts, you must +// either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// For more information on multipart uploads, go to Multipart Upload Overview +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in the +// Amazon S3 User Guide . +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// Permissions +// +// - General purpose bucket permissions - For information on the permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - To grant access to this API operation +// on a directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html) +// API operation for session-based authorization. Specifically, you grant +// the s3express:CreateSession permission to the directory bucket in a bucket +// policy or an IAM identity-based policy. Then, you make the CreateSession +// API call on the bucket to obtain a session token. With the session token +// in your request header, you can make API requests to this operation. After +// the session token expires, you make another CreateSession API call to +// generate a new session token for use. Amazon Web Services CLI or SDKs +// create session and refresh the session token automatically to avoid service +// interruptions when a session expires. For more information about authorization, +// see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html). +// +// # Data integrity +// +// General purpose bucket - To ensure that data is not corrupted traversing +// the network, specify the Content-MD5 header in the upload part request. Amazon +// S3 checks the part data against the provided MD5 value. If they do not match, +// Amazon S3 returns an error. If the upload request is signed with Signature +// Version 4, then Amazon Web Services S3 uses the x-amz-content-sha256 header +// as a checksum instead of Content-MD5. For more information see Authenticating +// Requests: Using the Authorization Header (Amazon Web Services Signature Version +// 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html). +// +// Directory buckets - MD5 is not supported by directory buckets. You can use +// checksum algorithms to check object integrity. +// +// Encryption +// +// - General purpose bucket - Server-side encryption is for data encryption +// at rest. Amazon S3 encrypts your data as it writes it to disks in its +// data centers and decrypts it when you access it. You have mutually exclusive +// options to protect data using server-side encryption in Amazon S3, depending +// on how you choose to manage the encryption keys. Specifically, the encryption +// key options are Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS +// keys (SSE-KMS), and Customer-Provided Keys (SSE-C). Amazon S3 encrypts +// data with server-side encryption using Amazon S3 managed keys (SSE-S3) +// by default. You can optionally tell Amazon S3 to encrypt data at rest +// using server-side encryption with other key options. The option you use +// depends on whether you want to use KMS keys (SSE-KMS) or provide your +// own encryption key (SSE-C). Server-side encryption is supported by the +// S3 Multipart Upload operations. Unless you are using a customer-provided +// encryption key (SSE-C), you don't need to specify the encryption parameters +// in each UploadPart request. Instead, you only need to specify the server-side +// encryption parameters in the initial Initiate Multipart request. For more +// information, see CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html). +// If you request server-side encryption using a customer-provided encryption +// key (SSE-C) in your initiate multipart upload request, you must provide +// identical encryption information in each part upload using the following +// request headers. x-amz-server-side-encryption-customer-algorithm x-amz-server-side-encryption-customer-key +// x-amz-server-side-encryption-customer-key-MD5 +// +// - Directory bucket - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) +// in the Amazon S3 User Guide. +// +// Special errors +// +// - Error Code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// SOAP Fault Code Prefix: Client +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to UploadPart: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. To +// specify the data source, you add the request header x-amz-copy-source in +// your request. To specify a byte range, you add the request header x-amz-copy-source-range +// in your request. +// +// For information about maximum and minimum part sizes and other multipart +// upload specifications, see Multipart upload limits (https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html) +// in the Amazon S3 User Guide. +// +// Instead of copying data from an existing object as part data, you might use +// the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// action to upload new data as a part of an object in your request. +// +// You must initiate a multipart upload before you can upload any part. In response +// to your initiate request, Amazon S3 returns the upload ID, a unique identifier +// that you must include in your upload part request. +// +// For conceptual information about multipart uploads, see Uploading Objects +// Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html) +// in the Amazon S3 User Guide. For information about copying objects using +// a single atomic action vs. a multipart upload, see Operations on Objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html) in +// the Amazon S3 User Guide. +// +// Directory buckets - For directory buckets, you must make requests for this +// API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +// requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name +// . Path-style requests are not supported. For more information, see Regional +// and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html) +// in the Amazon S3 User Guide. +// +// # Authentication and authorization +// +// All UploadPartCopy requests must be authenticated and signed by using IAM +// credentials (access key ID and secret access key for the IAM identities). +// All headers with the x-amz- prefix, including x-amz-copy-source, must be +// signed. For more information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). +// +// Directory buckets - You must use IAM credentials to authenticate and authorize +// your access to the UploadPartCopy API operation, instead of using the temporary +// security credentials through the CreateSession API operation. +// +// Amazon Web Services CLI or SDKs handles authentication and authorization +// on your behalf. +// +// # Permissions +// +// You must have READ access to the source object and WRITE access to the destination +// bucket. +// +// - General purpose bucket permissions - You must have the permissions in +// a policy based on the bucket types of your source bucket and destination +// bucket in an UploadPartCopy operation. If the source object is in a general +// purpose bucket, you must have the s3:GetObject permission to read the +// source object that is being copied. If the destination bucket is a general +// purpose bucket, you must have the s3:PutObject permission to write the +// object copy to the destination bucket. For information about permissions +// required to use the multipart upload API, see Multipart Upload and Permissions +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html) +// in the Amazon S3 User Guide. +// +// - Directory bucket permissions - You must have permissions in a bucket +// policy or an IAM identity-based policy based on the source and destination +// bucket types in an UploadPartCopy operation. If the source object that +// you want to copy is in a directory bucket, you must have the s3express:CreateSession +// permission in the Action element of a policy to read the object . By default, +// the session is in the ReadWrite mode. If you want to restrict the access, +// you can explicitly set the s3express:SessionMode condition key to ReadOnly +// on the copy source bucket. If the copy destination is a directory bucket, +// you must have the s3express:CreateSession permission in the Action element +// of a policy to write the object to the destination. The s3express:SessionMode +// condition key cannot be set to ReadOnly on the copy destination. For example +// policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html) +// and Amazon Web Services Identity and Access Management (IAM) identity-based +// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html) +// in the Amazon S3 User Guide. +// +// Encryption +// +// - General purpose buckets - For information about using server-side encryption +// with customer-provided encryption keys with the UploadPartCopy operation, +// see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html) +// and UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html). +// +// - Directory buckets - For directory buckets, only server-side encryption +// with Amazon S3 managed keys (SSE-S3) (AES256) is supported. +// +// Special errors +// +// - Error Code: NoSuchUpload Description: The specified multipart upload +// does not exist. The upload ID might be invalid, or the multipart upload +// might have been aborted or completed. HTTP Status Code: 404 Not Found +// +// - Error Code: InvalidRequest Description: The specified copy source is +// not supported as a byte-range copy source. HTTP Status Code: 400 Bad Request +// +// # HTTP Host header syntax +// +// Directory buckets - The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +// +// The following operations are related to UploadPartCopy: +// +// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) +// +// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) +// +// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) +// +// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) +// +// - ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html) +// +// - ListMultipartUploads (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opWriteGetObjectResponse = "WriteGetObjectResponse" + +// WriteGetObjectResponseRequest generates a "aws/request.Request" representing the +// client's request for the WriteGetObjectResponse operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See WriteGetObjectResponse for more information on using the WriteGetObjectResponse +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the WriteGetObjectResponseRequest method. +// req, resp := client.WriteGetObjectResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse +func (c *S3) WriteGetObjectResponseRequest(input *WriteGetObjectResponseInput) (req *request.Request, output *WriteGetObjectResponseOutput) { + op := &request.Operation{ + Name: opWriteGetObjectResponse, + HTTPMethod: "POST", + HTTPPath: "/WriteGetObjectResponse", + } + + if input == nil { + input = &WriteGetObjectResponseInput{} + } + + output = &WriteGetObjectResponseOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Sign.Remove(v4.SignRequestHandler) + handler := v4.BuildNamedHandler("v4.CustomSignerHandler", v4.WithUnsignedPayload) + req.Handlers.Sign.PushFrontNamed(handler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(protocol.NewHostPrefixHandler("{RequestRoute}.", input.hostLabels)) + req.Handlers.Build.PushBackNamed(protocol.ValidateEndpointHostHandler) + return +} + +// WriteGetObjectResponse API operation for Amazon Simple Storage Service. +// +// This operation is not supported by directory buckets. +// +// Passes transformed objects to a GetObject operation when using Object Lambda +// access points. For information about Object Lambda access points, see Transforming +// objects with Object Lambda access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html) +// in the Amazon S3 User Guide. +// +// This operation supports metadata that can be returned by GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html), +// in addition to RequestRoute, RequestToken, StatusCode, ErrorCode, and ErrorMessage. +// The GetObject response metadata is supported so that the WriteGetObjectResponse +// caller, typically an Lambda function, can provide the same metadata when +// it internally invokes GetObject. When WriteGetObjectResponse is called by +// a customer-owned Lambda function, the metadata returned to the end user GetObject +// call might differ from what Amazon S3 would normally return. +// +// You can include any number of metadata headers. When including a metadata +// header, it should be prefaced with x-amz-meta. For example, x-amz-meta-my-custom-header: +// MyCustomValue. The primary use case for this is to forward GetObject metadata. +// +// Amazon Web Services provides some prebuilt Lambda functions that you can +// use with S3 Object Lambda to detect and redact personally identifiable information +// (PII) and decompress S3 objects. These Lambda functions are available in +// the Amazon Web Services Serverless Application Repository, and can be selected +// through the Amazon Web Services Management Console when you create your Object +// Lambda access point. +// +// Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, +// a natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically detects personally identifiable +// information (PII) such as names, addresses, dates, credit card numbers, and +// social security numbers from documents in your Amazon S3 bucket. +// +// Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a +// natural language processing (NLP) service using machine learning to find +// insights and relationships in text. It automatically redacts personally identifiable +// information (PII) such as names, addresses, dates, credit card numbers, and +// social security numbers from documents in your Amazon S3 bucket. +// +// Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, +// is equipped to decompress objects stored in S3 in one of six compressed file +// formats including bzip2, gzip, snappy, zlib, zstandard and ZIP. +// +// For information on how to view and use these functions, see Using Amazon +// Web Services built Lambda functions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html) +// in the Amazon S3 User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation WriteGetObjectResponse for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse +func (c *S3) WriteGetObjectResponse(input *WriteGetObjectResponseInput) (*WriteGetObjectResponseOutput, error) { + req, out := c.WriteGetObjectResponseRequest(input) + return out, req.Send() +} + +// WriteGetObjectResponseWithContext is the same as WriteGetObjectResponse with the addition of +// the ability to pass a context and additional request options. +// +// See WriteGetObjectResponse for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WriteGetObjectResponseWithContext(ctx aws.Context, input *WriteGetObjectResponseInput, opts ...request.Option) (*WriteGetObjectResponseOutput, error) { + req, out := c.WriteGetObjectResponseRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon S3 User Guide. +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +type AbortMultipartUploadInput struct { + _ struct{} `locationName:"AbortMultipartUploadRequest" type:"structure"` + + // The bucket name to which the upload was taking place. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key of the object for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID that identifies the multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *AbortMultipartUploadInput) SetExpectedBucketOwner(v string) *AbortMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +func (s *AbortMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *AbortMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s AbortMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon S3 User Guide. +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the transfer acceleration status of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +// Contains the elements that set the ACL permissions for an object per grantee. +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// A container for information about access control for replicas. +type AccessControlTranslation struct { + _ struct{} `type:"structure"` + + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon S3 API Reference. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +// Where to publish the analytics results. +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +// The filter used to describe a set of objects for analyses. A filter must +// have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). +// If no filter is provided, all objects will be considered in any analysis. +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +// Contains information about where to publish the analytics results. +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. + BucketAccountId *string `type:"string"` + + // Specifies the file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The prefix is prepended to all results. + Prefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +// In terms of implementation, a Bucket is a resource. +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. This date can change when making changes to + // your bucket, such as editing its bucket policy. + CreationDate *time.Time `type:"timestamp"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +// Specifies the information about the bucket that will be created. For more +// information about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +type BucketInfo struct { + _ struct{} `type:"structure"` + + // The number of Availability Zone that's used for redundancy for the bucket. + DataRedundancy *string `type:"string" enum:"DataRedundancy"` + + // The type of bucket. + Type *string `type:"string" enum:"BucketType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketInfo) GoString() string { + return s.String() +} + +// SetDataRedundancy sets the DataRedundancy field's value. +func (s *BucketInfo) SetDataRedundancy(v string) *BucketInfo { + s.DataRedundancy = &v + return s +} + +// SetType sets the Type field's value. +func (s *BucketInfo) SetType(v string) *BucketInfo { + s.Type = &v + return s +} + +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +// Container for logging status information. +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon S3 API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// S3 User Guide. +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + // + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +// Specifies a cross-origin access rule for an Amazon S3 bucket. +type CORSRule struct { + _ struct{} `type:"structure"` + + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetID sets the ID field's value. +func (s *CORSRule) SetID(v string) *CORSRule { + s.ID = &v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Describes how an uncompressed comma-separated values (CSV)-formatted input +// object is formatted. +type CSVInput struct { + _ struct{} `type:"structure"` + + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + + // A single character used to indicate that a row should be ignored when the + // character is present at the start of that row. You can specify any character + // to indicate a comment line. The default character is #. + // + // Default: # + Comments *string `type:"string"` + + // A single character used to separate individual fields in a record. You can + // specify an arbitrary delimiter. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values are: + // + // * NONE: First line is not a header. + // + // * IGNORE: First line is a header, but you can't use the header values + // to indicate the column in an expression. You can use column position (such + // as _1, _2, …) to indicate the column (SELECT s._1 FROM OBJECT s). + // + // * Use: First line is a header, and you can use the header value to identify + // a column in an expression (SELECT "name" FROM OBJECT). + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + // + // Type: String + // + // Default: " + // + // Ancestors: CSV + QuoteCharacter *string `type:"string"` + + // A single character used for escaping the quotation mark character inside + // an already escaped value. For example, the value """ a , b """ is parsed + // as " a , b ". + QuoteEscapeCharacter *string `type:"string"` + + // A single character used to separate individual records in the input. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVInput) GoString() string { + return s.String() +} + +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how uncompressed comma-separated values (CSV)-formatted results +// are formatted. +type CSVOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual fields in a record. You can specify + // an arbitrary delimiter. + FieldDelimiter *string `type:"string"` + + // A single character used for escaping when the field delimiter is part of + // the value. For example, if the value is a, b, Amazon S3 wraps this field + // value in quotation marks, as follows: " a , b ". + QuoteCharacter *string `type:"string"` + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether to use quotation marks around output fields. + // + // * ALWAYS: Always use quotation marks for output fields. + // + // * ASNEEDED: Use quotation marks for output fields when needed. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // A single character used to separate individual records in the output. Instead + // of the default value, you can specify an arbitrary delimiter. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + +// Contains all the possible checksum or digest values for an object. +type Checksum struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Checksum) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Checksum) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *Checksum) SetChecksumCRC32(v string) *Checksum { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *Checksum) SetChecksumCRC32C(v string) *Checksum { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *Checksum) SetChecksumSHA1(v string) *Checksum { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *Checksum) SetChecksumSHA256(v string) *Checksum { + s.ChecksumSHA256 = &v + return s +} + +// Container for specifying the Lambda notification configuration. +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + CloudFunction *string `type:"string"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // Bucket events for which to send notifications. + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The role supporting the invocation of the Lambda function + InvocationRole *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +// Container for all (if there are any) keys between Prefix and the next occurrence +// of the string specified by a delimiter. CommonPrefixes lists keys that act +// like subdirectories in the directory specified by Prefix. For example, if +// the prefix is notes/ and the delimiter is a slash (/) as in notes/summer/july, +// the common prefix is notes/summer/. +type CommonPrefix struct { + _ struct{} `type:"structure"` + + // Container for the specified common prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +type CompleteMultipartUploadInput struct { + _ struct{} `locationName:"CompleteMultipartUploadRequest" type:"structure" payload:"MultipartUpload"` + + // Name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The container for the multipart upload request information. + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is required only when the object was created using a checksum algorithm + // or if your bucket policy requires the use of SSE-C. For more information, + // see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CompleteMultipartUploadInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // ID for the initiated multipart upload. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumCRC32(v string) *CompleteMultipartUploadInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompleteMultipartUploadInput) SetChecksumCRC32C(v string) *CompleteMultipartUploadInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumSHA1(v string) *CompleteMultipartUploadInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompleteMultipartUploadInput) SetChecksumSHA256(v string) *CompleteMultipartUploadInput { + s.ChecksumSHA256 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CompleteMultipartUploadInput) SetExpectedBucketOwner(v string) *CompleteMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CompleteMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerKey(v string) *CompleteMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CompleteMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CompleteMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CompleteMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +func (s *CompleteMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CompleteMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CompleteMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket that contains the newly created object. Does not return + // the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. + Bucket *string `type:"string"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag that identifies the newly created object's data. Objects with + // different object data will have different entity tags. The entity tag is + // an opaque string. The entity tag may or may not be an MD5 digest of the object + // data. If the entity tag is not an MD5 digest of the object data, it will + // contain one or more nonhexadecimal characters and/or will consist of less + // than 32 or more than 32 hexadecimal digits. For more information about how + // the entity tag is calculated, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The object key of the newly created object. + Key *string `min:"1" type:"string"` + + // The URI that identifies the newly created object. + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CompleteMultipartUploadOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created object, in case the bucket has versioning + // turned on. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CompleteMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CompleteMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumCRC32(v string) *CompleteMultipartUploadOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumCRC32C(v string) *CompleteMultipartUploadOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumSHA1(v string) *CompleteMultipartUploadOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompleteMultipartUploadOutput) SetChecksumSHA256(v string) *CompleteMultipartUploadOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +// The container for the completed multipart upload details. +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + // Array of CompletedPart data types. + // + // If you do not supply a valid Part with your request, the service sends back + // an HTTP 400 response. + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +// Details of the parts that were uploaded. +type CompletedPart struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + // + // * General purpose buckets - In CompleteMultipartUpload, when a additional + // checksum (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, + // or x-amz-checksum-sha256) is applied to each part, the PartNumber must + // start at 1 and the part numbers must be consecutive. Otherwise, Amazon + // S3 generates an HTTP 400 Bad Request status code and an InvalidPartOrder + // error code. + // + // * Directory buckets - In CompleteMultipartUpload, the PartNumber must + // start at 1 and the part numbers must be consecutive. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CompletedPart) SetChecksumCRC32(v string) *CompletedPart { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CompletedPart) SetChecksumCRC32C(v string) *CompletedPart { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CompletedPart) SetChecksumSHA1(v string) *CompletedPart { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CompletedPart) SetChecksumSHA256(v string) *CompletedPart { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +// A container for describing a condition that must be met for the specified +// redirect to apply. For example, 1. If request is for pages in the /docs folder, +// redirect to the /documents folder. 2. If request results in HTTP error 4xx, +// redirect request to another host where you might process the error. +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +type CopyObjectInput struct { + _ struct{} `locationName:"CopyObjectRequest" type:"structure"` + + // The canned access control list (ACL) to apply to the object. + // + // When you copy an object, the ACL metadata is not preserved and is set to + // private by default. Only the owner has full access control. To override the + // default ACL setting, specify a new ACL when you generate a copy request. + // For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). + // + // If the destination bucket that you're copying objects to uses the bucket + // owner enforced setting for S3 Object Ownership, ACLs are disabled and no + // longer affect permissions. Buckets that use this setting only accept PUT + // requests that don't specify an ACL or PUT requests that specify bucket owner + // full control ACLs, such as the bucket-owner-full-control canned ACL or an + // equivalent form of this ACL expressed in the XML format. For more information, + // see Controlling ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * If your destination bucket uses the bucket owner enforced setting for + // Object Ownership, all objects written to the bucket by any account will + // be owned by the bucket owner. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the destination bucket. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the + // object. + // + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. Specifying this header with a COPY action + // doesn’t affect bucket-level settings for S3 Bucket Key. + // + // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies the caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Indicates the algorithm that you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // When you copy an object, if the source object has a checksum, that checksum + // value will be copied to the new object by default. If the CopyObject request + // does not include this x-amz-checksum-algorithm header, the checksum algorithm + // will be copied from the source object to the destination object (if it's + // present on the source object). You can optionally specify a different checksum + // algorithm to use with the x-amz-checksum-algorithm header. Unrecognized or + // unsupported values will respond with the HTTP status code 400 Bad Request. + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Specifies presentational information for the object. Indicates whether an + // object should be displayed in a web browser or downloaded as a file. It allows + // specifying the desired filename for the downloaded file. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type that describes the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies the source object for the copy operation. The source object can + // be up to 5 GB. If the source object is an object that was uploaded by using + // a multipart upload, the object copy will be a single part object after the + // source object is copied to the destination bucket. + // + // You specify the value of the copy source in one of two formats, depending + // on whether you want to access the source object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and the key of the source object, separated by a slash + // (/). For example, to copy the object reports/january.pdf from the general + // purpose bucket awsexamplebucket, use awsexamplebucket/reports/january.pdf. + // The value must be URL-encoded. To copy the object reports/january.pdf + // from the directory bucket awsexamplebucket--use1-az5--x-s3, use awsexamplebucket--use1-az5--x-s3/reports/january.pdf. + // The value must be URL-encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // Access points only when the source and destination buckets are in the + // same Amazon Web Services Region. Access points are not supported by directory + // buckets. Alternatively, for objects accessed through Amazon S3 on Outposts, + // specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL-encoded. + // + // If your source bucket versioning is enabled, the x-amz-copy-source header + // by default identifies the current version of an object to copy. If the current + // version is a delete marker, Amazon S3 behaves as if the object was deleted. + // To copy a different version, use the versionId query parameter. Specifically, + // append ?versionId= to the value (for example, awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // If you don't specify a version ID, Amazon S3 copies the latest version of + // the source object. + // + // If you enable versioning on the destination bucket, Amazon S3 generates a + // unique version ID for the copied object. This version ID is different from + // the version ID of the source object. Amazon S3 returns the version ID of + // the copied object in the x-amz-version-id response header in the response. + // + // If you do not enable versioning or suspend it on the destination bucket, + // the version ID that Amazon S3 generates in the x-amz-version-id response + // header is always null. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // + // * x-amz-copy-source-if-match condition evaluates to true + // + // * x-amz-copy-source-if-unmodified-since condition evaluates to false + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + // + // If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // the 412 Precondition Failed response code: + // + // * x-amz-copy-source-if-none-match condition evaluates to false + // + // * x-amz-copy-source-if-modified-since condition evaluates to true + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + // + // If both the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // the 412 Precondition Failed response code: + // + // * x-amz-copy-source-if-none-match condition evaluates to false + // + // * x-amz-copy-source-if-modified-since condition evaluates to true + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + // + // If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request and evaluate as follows, Amazon S3 returns + // 200 OK and copies the data: + // + // * x-amz-copy-source-if-match condition evaluates to true + // + // * x-amz-copy-source-if-unmodified-since condition evaluates to false + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be the + // same one that was used when the source object was created. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. + // + // CopySourceSSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // If the source object for the copy is stored in Amazon S3 using SSE-C, you + // must provide the necessary encryption information in your request so that + // Amazon S3 can decrypt the object for copying. + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account ID of the expected destination bucket owner. If the account ID + // that you provide does not match the actual owner of the destination bucket, + // the request fails with the HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account ID of the expected source bucket owner. If the account ID that + // you provide does not match the actual owner of the source bucket, the request + // fails with the HTTP status code 403 Forbidden (access denied). + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // The key of the destination object. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata that's provided in the request. When copying an object, you + // can preserve all metadata (the default) or specify new metadata. If this + // header isn’t specified, COPY is the default behavior. + // + // General purpose bucket - For general purpose buckets, when you grant permissions, + // you can use the s3:x-amz-metadata-directive condition key to enforce certain + // metadata behavior when objects are uploaded. For more information, see Amazon + // S3 condition key examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html) + // in the Amazon S3 User Guide. + // + // x-amz-website-redirect-location is unique to each object and is not copied + // when using the x-amz-metadata-directive header. To copy the value, you must + // specify x-amz-website-redirect-location in the request header. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Specifies whether you want to apply a legal hold to the object copy. + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to the object copy. + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want the Object Lock of the object copy to expire. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting + // in your request is different from the default encryption configuration of + // the destination bucket, the encryption setting in your request takes precedence. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded. Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. This value must be explicitly + // added to specify encryption context for CopyObject requests. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object encryption. + // All GET and PUT requests for an object protected by KMS will fail if they're + // not made via SSL or using SigV4. For information about configuring any of + // the officially supported Amazon Web Services SDKs and Amazon Web Services + // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // in the Amazon S3 User Guide. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). Unrecognized or unsupported + // values won’t write a destination object and will receive a 400 Bad Request + // response. + // + // Amazon S3 automatically encrypts all new objects that are copied to an S3 + // bucket. When copying an object, if you don't specify encryption information + // in your copy request, the encryption setting of the target object is set + // to the default encryption configuration of the destination bucket. By default, + // all buckets have a base level of encryption configuration that uses server-side + // encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket + // has a default encryption configuration that uses server-side encryption with + // Key Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption + // with Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with + // customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding + // KMS key, or a customer-provided key to encrypt the target object copy. + // + // When you perform a CopyObject operation, if you want to use a different type + // of encryption setting for the target object, you can specify appropriate + // encryption-related headers to encrypt the target object with an Amazon S3 + // managed key, a KMS key, or a customer-provided key. If the encryption setting + // in your request is different from the default encryption configuration of + // the destination bucket, the encryption setting in your request takes precedence. + // + // With server-side encryption, Amazon S3 encrypts your data as it writes your + // data to disks in its data centers and decrypts the data when you access it. + // For more information about server-side encryption, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html) + // in the Amazon S3 User Guide. + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // If the x-amz-storage-class header is not used, the copied object will be + // stored in the STANDARD Storage Class by default. The STANDARD storage class + // provides high durability and high availability. Depending on performance + // needs, you can specify a different Storage Class. + // + // * Directory buckets - For directory buckets, only the S3 Express One Zone + // storage class is supported to store newly created objects. Unsupported + // storage class values won't write a destination object and will respond + // with the HTTP status code 400 Bad Request. + // + // * Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage + // Class. + // + // You can use the CopyObject action to change the storage class of an object + // that is already stored in Amazon S3 by using the x-amz-storage-class header. + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // + // Before using an object as a source object for the copy operation, you must + // restore a copy of it if it meets any of the following conditions: + // + // * The storage class of the source object is GLACIER or DEEP_ARCHIVE. + // + // * The storage class of the source object is INTELLIGENT_TIERING and it's + // S3 Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) + // is Archive Access or Deep Archive Access. + // + // For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) + // in the Amazon S3 User Guide. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object copy in the destination bucket. This value must + // be used in conjunction with the x-amz-tagging-directive if you choose REPLACE + // for the x-amz-tagging-directive. If you choose COPY for the x-amz-tagging-directive, + // you don't need to set the x-amz-tagging header, because the tag-set will + // be copied from the source object directly. The tag-set must be encoded as + // URL Query parameters. + // + // The default value is the empty value. + // + // Directory buckets - For directory buckets in a CopyObject operation, only + // the empty tag-set is supported. Any requests that attempt to write non-empty + // tags into directory buckets will receive a 501 Not Implemented status code. + // When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // + // * When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // + // * When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging. + // + // * When you don't set the x-amz-tagging-directive header and the source + // object has non-empty tags. This is because the default value of x-amz-tagging-directive + // is COPY. + // + // Because only the empty tag-set is supported for directory buckets in a CopyObject + // operation, the following situations are allowed: + // + // * When you attempt to COPY the tag-set from a directory bucket source + // object that has no tags to a general purpose bucket. It copies an empty + // tag-set to the destination object. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and set the x-amz-tagging value of the directory bucket destination + // object to empty. + // + // * When you attempt to REPLACE the tag-set of a general purpose bucket + // source object that has non-empty tags and set the x-amz-tagging value + // of the directory bucket destination object to empty. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty + // value. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set is copied from the source object or + // replaced with the tag-set that's provided in the request. + // + // The default value is COPY. + // + // Directory buckets - For directory buckets in a CopyObject operation, only + // the empty tag-set is supported. Any requests that attempt to write non-empty + // tags into directory buckets will receive a 501 Not Implemented status code. + // When the destination bucket is a directory bucket, you will receive a 501 + // Not Implemented response in any of the following situations: + // + // * When you attempt to COPY the tag-set from an S3 source object that has + // non-empty tags. + // + // * When you attempt to REPLACE the tag-set of a source object and set a + // non-empty value to x-amz-tagging. + // + // * When you don't set the x-amz-tagging-directive header and the source + // object has non-empty tags. This is because the default value of x-amz-tagging-directive + // is COPY. + // + // Because only the empty tag-set is supported for directory buckets in a CopyObject + // operation, the following situations are allowed: + // + // * When you attempt to COPY the tag-set from a directory bucket source + // object that has no tags to a general purpose bucket. It copies an empty + // tag-set to the destination object. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and set the x-amz-tagging value of the directory bucket destination + // object to empty. + // + // * When you attempt to REPLACE the tag-set of a general purpose bucket + // source object that has non-empty tags and set the x-amz-tagging value + // of the directory bucket destination object to empty. + // + // * When you attempt to REPLACE the tag-set of a directory bucket source + // object and don't set the x-amz-tagging value of the directory bucket destination + // object. This is because the default value of x-amz-tagging is the empty + // value. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the destination bucket is configured as a website, redirects requests + // for this object copy to another object in the same bucket or to an external + // URL. Amazon S3 stores the value of this header in the object metadata. This + // value is unique to each object and is not copied when using the x-amz-metadata-directive + // header. Instead, you may opt to provide this header in combination with the + // x-amz-metadata-directive header. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectInput) SetBucketKeyEnabled(v bool) *CopyObjectInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CopyObjectInput) SetChecksumAlgorithm(v string) *CopyObjectInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedBucketOwner(v string) *CopyObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *CopyObjectInput) SetExpectedSourceBucketOwner(v string) *CopyObjectInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *CopyObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CopyObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CopyObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + // Indicates whether the copied object uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Container for all response elements. + CopyObjectResult *CopyObjectResult `type:"structure"` + + // Version ID of the source object that was copied. + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + // + // This functionality is not supported for directory buckets. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the Amazon Web Services KMS Encryption Context to use + // for object encryption. The value of this header is a base64-encoded UTF-8 + // string holding JSON with the encryption context key-value pairs. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectOutput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CopyObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectOutput) SetBucketKeyEnabled(v bool) *CopyObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +// Container for all response elements. +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Returns the ETag of the new object. The ETag reflects only changes to the + // contents of an object, not its metadata. + ETag *string `type:"string"` + + // Creation date of the object. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CopyObjectResult) SetChecksumCRC32(v string) *CopyObjectResult { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CopyObjectResult) SetChecksumCRC32C(v string) *CopyObjectResult { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CopyObjectResult) SetChecksumSHA1(v string) *CopyObjectResult { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CopyObjectResult) SetChecksumSHA256(v string) *CopyObjectResult { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +// Container for all response elements. +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *CopyPartResult) SetChecksumCRC32(v string) *CopyPartResult { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *CopyPartResult) SetChecksumCRC32C(v string) *CopyPartResult { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *CopyPartResult) SetChecksumSHA1(v string) *CopyPartResult { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *CopyPartResult) SetChecksumSHA256(v string) *CopyPartResult { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +// The configuration information for the bucket. +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the information about the bucket that will be created. + // + // This functionality is only supported by directory buckets. + Bucket *BucketInfo `type:"structure"` + + // Specifies the location where the bucket will be created. + // + // For directory buckets, the location type is Availability Zone. + // + // This functionality is only supported by directory buckets. + Location *LocationInfo `type:"structure"` + + // Specifies the Region where the bucket will be created. You might choose a + // Region to optimize latency, minimize costs, or address regulatory requirements. + // For example, if you reside in Europe, you will probably find it advantageous + // to create buckets in the Europe (Ireland) Region. For more information, see + // Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + // in the Amazon S3 User Guide. + // + // If you don't specify a Region, the bucket is created in the US East (N. Virginia) + // Region (us-east-1) by default. + // + // This functionality is not supported for directory buckets. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketConfiguration) SetBucket(v *BucketInfo) *CreateBucketConfiguration { + s.Bucket = v + return s +} + +func (s *CreateBucketConfiguration) getBucket() (v *BucketInfo) { + return s.Bucket +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketConfiguration) SetLocation(v *LocationInfo) *CreateBucketConfiguration { + s.Location = v + return s +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +type CreateBucketInput struct { + _ struct{} `locationName:"CreateBucketRequest" type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + // + // This functionality is not supported for directory buckets. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // The name of the bucket to create. + // + // General purpose buckets - For information about bucket naming restrictions, + // see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The configuration information for the bucket. + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + // + // This functionality is not supported for directory buckets. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for directory buckets. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for directory buckets. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. + // + // This functionality is not supported for directory buckets. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for directory buckets. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want S3 Object Lock to be enabled for the new bucket. + // + // This functionality is not supported for directory buckets. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` + + // The container element for object ownership for a bucket's ownership controls. + // + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + // the bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that + // don't specify an ACL or specify bucket owner full control ACLs (such as the + // predefined bucket-owner-full-control canned ACL or a custom ACL in XML format + // that grants the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. + // We recommend keeping ACLs disabled, except in uncommon use cases where you + // must control access for each object individually. For more information about + // S3 Object Ownership, see Controlling ownership of objects and disabling ACLs + // for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + ObjectOwnership *string `location:"header" locationName:"x-amz-object-ownership" type:"string" enum:"ObjectOwnership"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + +// SetObjectOwnership sets the ObjectOwnership field's value. +func (s *CreateBucketInput) SetObjectOwnership(v string) *CreateBucketInput { + s.ObjectOwnership = &v + return s +} + +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + // A forward slash followed by the name of the bucket. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +type CreateMultipartUploadInput struct { + _ struct{} `locationName:"CreateMultipartUploadRequest" type:"structure"` + + // The canned ACL to apply to the object. Amazon S3 supports a set of predefined + // ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees + // and permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can grant access permissions to individual + // Amazon Web Services accounts or to predefined groups defined by Amazon S3. + // These permissions are then added to the access control list (ACL) on the + // new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html). + // One way to grant the permissions using the request headers is to specify + // a canned ACL with the x-amz-acl request header. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // The name of the bucket where the multipart upload is initiated and where + // the object is uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. + // + // Specifying this header with an object action doesn’t affect bucket-level + // settings for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Indicates the algorithm that you want Amazon S3 to use to create the checksum + // for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + // + // For directory buckets, only the aws-chunked value is supported in this header + // field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language that the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Specify access permissions explicitly to give the grantee READ, READ_ACP, + // and WRITE_ACP permissions on the object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Specify access permissions explicitly to allow grantee to read the object + // data and its metadata. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Specify access permissions explicitly to allows grantee to read the object + // ACL. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Specify access permissions explicitly to allows grantee to allow grantee + // to write the ACL for the applicable object. + // + // By default, all objects are private. Only the owner has full access control. + // When uploading an object, you can use this header to explicitly grant access + // permissions to specific Amazon Web Services accounts or groups. This header + // maps to specific permissions that Amazon S3 supports in an ACL. For more + // information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // in the Amazon S3 User Guide. + // + // You specify each grantee as a type=value pair, where the type is one of the + // following: + // + // * id – if the value specified is the canonical user ID of an Amazon + // Web Services account + // + // * uri – if you are granting permissions to a predefined group + // + // * emailAddress – if the value specified is the email address of an Amazon + // Web Services account Using email addresses to specify a grantee is only + // supported in the following Amazon Web Services Regions: US East (N. Virginia) + // US West (N. California) US West (Oregon) Asia Pacific (Singapore) Asia + // Pacific (Sydney) Asia Pacific (Tokyo) Europe (Ireland) South America (São + // Paulo) For a list of all the Amazon S3 supported Regions and endpoints, + // see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + // + // For example, the following x-amz-grant-read header grants the Amazon Web + // Services accounts identified by account IDs permissions to read object data + // and its metadata: + // + // x-amz-grant-read: id="11112222333", id="444455556666" + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the multipart upload is to be initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether you want to apply a legal hold to the uploaded object. + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Specifies the Object Lock mode that you want to apply to the uploaded object. + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // Specifies the date and time when you want the Object Lock to expire. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity + // check to ensure that the encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption + // customer managed key to use for object encryption. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // + // This functionality is not supported for directory buckets. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadInput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetChecksumAlgorithm(v string) *CreateMultipartUploadInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *CreateMultipartUploadInput) SetExpectedBucketOwner(v string) *CreateMultipartUploadInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *CreateMultipartUploadInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateMultipartUploadInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateMultipartUploadInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, the response includes this header. The header indicates + // when the initiated multipart upload becomes eligible for an abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + // + // The response also includes the x-amz-abort-rule-id header that provides the + // ID of the lifecycle configuration rule that defines the abort action. + // + // This functionality is not supported for directory buckets. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // the applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. Does + // not return the access point ARN or access point alias if used. + // + // Access points are not supported by directory buckets. + Bucket *string `locationName:"Bucket" type:"string"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the Amazon Web Services KMS Encryption Context to use + // for object encryption. The value of this header is a base64-encoded UTF-8 + // string holding JSON with the encryption context key-value pairs. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateMultipartUploadOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetChecksumAlgorithm(v string) *CreateMultipartUploadOutput { + s.ChecksumAlgorithm = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +type CreateSessionInput struct { + _ struct{} `locationName:"CreateSessionRequest" type:"structure"` + + // The name of the bucket that you create a session for. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the mode of the session that will be created, either ReadWrite + // or ReadOnly. By default, a ReadWrite session is created. A ReadWrite session + // is capable of executing all the Zonal endpoint APIs on a directory bucket. + // A ReadOnly session is constrained to execute the following Zonal endpoint + // APIs: GetObject, HeadObject, ListObjectsV2, GetObjectAttributes, ListParts, + // and ListMultipartUploads. + SessionMode *string `location:"header" locationName:"x-amz-create-session-mode" type:"string" enum:"SessionMode"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSessionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSessionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CreateSessionInput) SetBucket(v string) *CreateSessionInput { + s.Bucket = &v + return s +} + +func (s *CreateSessionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetSessionMode sets the SessionMode field's value. +func (s *CreateSessionInput) SetSessionMode(v string) *CreateSessionInput { + s.SessionMode = &v + return s +} + +func (s *CreateSessionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *CreateSessionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s CreateSessionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type CreateSessionOutput struct { + _ struct{} `type:"structure"` + + // The established temporary security credentials for the created session.. + // + // Credentials is a required field + Credentials *SessionCredentials `locationName:"Credentials" type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateSessionOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *CreateSessionOutput) SetCredentials(v *SessionCredentials) *CreateSessionOutput { + s.Credentials = v + return s +} + +// The container element for specifying the default Object Lock retention settings +// for new objects placed in the specified bucket. +// +// - The DefaultRetention settings require both a mode and a period. +// +// - The DefaultRetention period can be either Days or Years but you must +// select one. You cannot specify Days and Years at the same time. +type DefaultRetention struct { + _ struct{} `type:"structure"` + + // The number of days that you want to specify for the default retention period. + // Must be used with Mode. + Days *int64 `type:"integer"` + + // The default Object Lock retention mode you want to apply to new objects placed + // in the specified bucket. Must be used with either Days or Years. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The number of years that you want to specify for the default retention period. + // Must be used with Mode. + Years *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DefaultRetention) GoString() string { + return s.String() +} + +// SetDays sets the Days field's value. +func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { + s.Days = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *DefaultRetention) SetMode(v string) *DefaultRetention { + s.Mode = &v + return s +} + +// SetYears sets the Years field's value. +func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { + s.Years = &v + return s +} + +// Container for the objects to delete. +type Delete struct { + _ struct{} `type:"structure"` + + // The object to delete. + // + // Directory buckets - For directory buckets, an object that's composed entirely + // of whitespace characters is not supported by the DeleteObjects API operation. + // The request will receive a 400 Bad Request error and none of the objects + // in the request will be deleted. + // + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketCorsInput struct { + _ struct{} `locationName:"DeleteBucketCorsRequest" type:"structure"` + + // Specifies the bucket whose cors configuration is being deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketCorsInput) SetExpectedBucketOwner(v string) *DeleteBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +type DeleteBucketEncryptionInput struct { + _ struct{} `locationName:"DeleteBucketEncryptionRequest" type:"structure"` + + // The name of the bucket containing the server-side encryption configuration + // to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketEncryptionInput) SetExpectedBucketOwner(v string) *DeleteBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `locationName:"DeleteBucketRequest" type:"structure"` + + // Specifies the bucket being deleted. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInput) SetExpectedBucketOwner(v string) *DeleteBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetBucket(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *DeleteBucketIntelligentTieringConfigurationInput) SetId(v string) *DeleteBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketLifecycleInput struct { + _ struct{} `locationName:"DeleteBucketLifecycleRequest" type:"structure"` + + // The bucket name of the lifecycle to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketLifecycleInput) SetExpectedBucketOwner(v string) *DeleteBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +type DeleteBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"DeleteBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *DeleteBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +func (s *DeleteBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +type DeleteBucketOwnershipControlsInput struct { + _ struct{} `locationName:"DeleteBucketOwnershipControlsRequest" type:"structure"` + + // The Amazon S3 bucket whose OwnershipControls you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketOwnershipControlsInput) SetBucket(v string) *DeleteBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *DeleteBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +type DeleteBucketPolicyInput struct { + _ struct{} `locationName:"DeleteBucketPolicyRequest" type:"structure"` + + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketPolicyInput) SetExpectedBucketOwner(v string) *DeleteBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +type DeleteBucketReplicationInput struct { + _ struct{} `locationName:"DeleteBucketReplicationRequest" type:"structure"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketReplicationInput) SetExpectedBucketOwner(v string) *DeleteBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +type DeleteBucketTaggingInput struct { + _ struct{} `locationName:"DeleteBucketTaggingRequest" type:"structure"` + + // The bucket that has the tag set to be removed. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketTaggingInput) SetExpectedBucketOwner(v string) *DeleteBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +type DeleteBucketWebsiteInput struct { + _ struct{} `locationName:"DeleteBucketWebsiteRequest" type:"structure"` + + // The bucket name for which you want to remove the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteBucketWebsiteInput) SetExpectedBucketOwner(v string) *DeleteBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeleteBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Information about the delete marker. +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time when the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // The account that created the delete marker.> + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +// Specifies whether Amazon S3 replicates delete markers. If you specify a Filter +// in your replication configuration, you must also include a DeleteMarkerReplication +// element. If your Filter includes a Tag element, the DeleteMarkerReplication +// Status must be set to Disabled, because Amazon S3 does not support replicating +// delete markers for tag-based rules. For an example configuration, see Basic +// Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). +// +// For more information about delete marker replication, see Basic Rule Configuration +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). +// +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). +type DeleteMarkerReplication struct { + _ struct{} `type:"structure"` + + // Indicates whether to replicate delete markers. + // + // Indicates whether to replicate delete markers. + Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteMarkerReplication) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { + s.Status = &v + return s +} + +type DeleteObjectInput struct { + _ struct{} `locationName:"DeleteObjectRequest" type:"structure"` + + // The bucket name of the bucket containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether S3 Object Lock should bypass Governance-mode restrictions + // to process this operation. To use this header, you must have the s3:BypassGovernanceRetention + // permission. + // + // This functionality is not supported for directory buckets. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + // + // This functionality is not supported for directory buckets. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectInput) SetExpectedBucketOwner(v string) *DeleteObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +func (s *DeleteObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object version that was permanently deleted + // was (true) or was not (false) a delete marker before deletion. In a simple + // DELETE, this header indicates whether (true) or not (false) the current version + // of the object is a delete marker. + // + // This functionality is not supported for directory buckets. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +type DeleteObjectTaggingInput struct { + _ struct{} `locationName:"DeleteObjectTaggingRequest" type:"structure"` + + // The bucket name containing the objects from which to remove the tags. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key that identifies the object in the bucket from which to remove all + // tags. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectTaggingInput) SetExpectedBucketOwner(v string) *DeleteObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *DeleteObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +type DeleteObjectsInput struct { + _ struct{} `locationName:"DeleteObjectsRequest" type:"structure" payload:"Delete"` + + // The bucket name containing the objects to delete. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether you want to delete this object even if it has a Governance-type + // Object Lock in place. To use this header, you must have the s3:BypassGovernanceRetention + // permission. + // + // This functionality is not supported for directory buckets. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Container for the request. + // + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. Required to + // permanently delete a versioned object if versioning is configured with MFA + // delete enabled. + // + // When performing the DeleteObjects operation on an MFA delete enabled bucket, + // which attempts to delete the specified versioned objects, you must include + // an MFA token. If you don't provide an MFA token, the entire request will + // fail, even if there are non-versioned objects that you are trying to delete. + // If you provide an invalid token, whether there are versioned object keys + // in the request or not, the entire Multi-Object Delete request will fail. + // For information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *DeleteObjectsInput) SetChecksumAlgorithm(v string) *DeleteObjectsInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeleteObjectsInput) SetExpectedBucketOwner(v string) *DeleteObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +func (s *DeleteObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeleteObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeleteObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + // Container element for a successful delete. It identifies the object that + // was successfully deleted. + Deleted []*DeletedObject `type:"list" flattened:"true"` + + // Container for a failed delete action that describes the object that Amazon + // S3 attempted to delete and the error it encountered. + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +type DeletePublicAccessBlockInput struct { + _ struct{} `locationName:"DeletePublicAccessBlockRequest" type:"structure"` + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *DeletePublicAccessBlockInput) SetExpectedBucketOwner(v string) *DeletePublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *DeletePublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s DeletePublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletePublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Information about the deleted object. +type DeletedObject struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object version that was permanently deleted + // was (true) or was not (false) a delete marker before deletion. In a simple + // DELETE, this header indicates whether (true) or not (false) the current version + // of the object is a delete marker. + // + // This functionality is not supported for directory buckets. + DeleteMarker *bool `type:"boolean"` + + // The version ID of the delete marker created as a result of the DELETE operation. + // If you delete a specific object version, the value returned by this header + // is the version ID of the object version deleted. + // + // This functionality is not supported for directory buckets. + DeleteMarkerVersionId *string `type:"string"` + + // The name of the deleted object. + Key *string `min:"1" type:"string"` + + // The version ID of the deleted object. + // + // This functionality is not supported for directory buckets. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket and S3 Replication Time Control (S3 RTC). +type Destination struct { + _ struct{} `type:"structure"` + + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the Amazon Web Services account that owns the destination bucket. If this + // is not specified in the replication configuration, the replicas are owned + // by same Amazon Web Services account that owns the source object. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the Amazon Web Services account + // that owns the destination bucket by specifying the AccessControlTranslation + // property, this is the account ID of the destination bucket owner. For more + // information, see Replication Additional Configuration: Changing the Replica + // Owner (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html) + // in the Amazon S3 User Guide. + Account *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to + // store the results. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // A container that provides information about encryption. If SourceSelectionCriteria + // is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // A container specifying replication metrics-related settings enabling replication + // metrics and events. + Metrics *Metrics `type:"structure"` + + // A container specifying S3 Replication Time Control (S3 RTC), including whether + // S3 RTC is enabled and the time when all objects and operations on objects + // must be replicated. Must be specified together with a Metrics block. + ReplicationTime *ReplicationTime `type:"structure"` + + // The storage class to use when replicating objects, such as S3 Standard or + // reduced redundancy. By default, Amazon S3 uses the storage class of the source + // object to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon S3 API Reference. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } + if s.Metrics != nil { + if err := s.Metrics.Validate(); err != nil { + invalidParams.AddNested("Metrics", err.(request.ErrInvalidParams)) + } + } + if s.ReplicationTime != nil { + if err := s.ReplicationTime.Validate(); err != nil { + invalidParams.AddNested("ReplicationTime", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *Destination) SetBucket(v string) *Destination { + s.Bucket = &v + return s +} + +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + +// SetMetrics sets the Metrics field's value. +func (s *Destination) SetMetrics(v *Metrics) *Destination { + s.Metrics = v + return s +} + +// SetReplicationTime sets the ReplicationTime field's value. +func (s *Destination) SetReplicationTime(v *ReplicationTime) *Destination { + s.ReplicationTime = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v + return s +} + +// Contains the type of server-side encryption used. +type Encryption struct { + _ struct{} `type:"structure"` + + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (for example, AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` + + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` + + // If the encryption type is aws:kms, this optional value specifies the ID of + // the symmetric encryption customer managed key to use for encryption of job + // results. Amazon S3 only supports symmetric encryption KMS keys. For more + // information, see Asymmetric keys in KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + // + // KMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Encryption's + // String and GoString methods. + KMSKeyId *string `type:"string" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Encryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Encryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v + return s +} + +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v + return s +} + +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v + return s +} + +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. +type EncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web + // Services KMS key stored in Amazon Web Services Key Management Service (KMS) + // for the destination bucket. Amazon S3 uses this key to encrypt replica objects. + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + ReplicaKmsKeyID *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EncryptionConfiguration) GoString() string { + return s.String() +} + +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} + +// A message that indicates the request is complete and no more messages will +// be sent. You should not assume that the request is complete until the client +// receives an EndEvent. +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *EndEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + return msg, err +} + +// Container for all error elements. +type Error struct { + _ struct{} `type:"structure"` + + // The error code is a string that uniquely identifies an error condition. It + // is meant to be read and understood by programs that detect and handle errors + // by type. The following is a list of Amazon S3 error codes. For more information, + // see Error responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html). + // + // * Code: AccessDenied Description: Access Denied HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AccountProblem Description: There is a problem with your Amazon + // Web Services account that prevents the action from completing successfully. + // Contact Amazon Web Services Support for further assistance. HTTP Status + // Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AllAccessDisabled Description: All access to this Amazon S3 resource + // has been disabled. Contact Amazon Web Services Support for further assistance. + // HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: AmbiguousGrantByEmailAddress Description: The email address you + // provided is associated with more than one account. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: AuthorizationHeaderMalformed Description: The authorization header + // you provided is invalid. HTTP Status Code: 400 Bad Request HTTP Status + // Code: N/A + // + // * Code: BadDigest Description: The Content-MD5 you specified did not match + // what we received. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: BucketAlreadyExists Description: The requested bucket name is + // not available. The bucket namespace is shared by all users of the system. + // Please select a different name and try again. HTTP Status Code: 409 Conflict + // SOAP Fault Code Prefix: Client + // + // * Code: BucketAlreadyOwnedByYou Description: The bucket you tried to create + // already exists, and you own it. Amazon S3 returns this error in all Amazon + // Web Services Regions except in the North Virginia Region. For legacy compatibility, + // if you re-create an existing bucket that you already own in the North + // Virginia Region, Amazon S3 returns 200 OK and resets the bucket access + // control lists (ACLs). Code: 409 Conflict (in all Regions except the North + // Virginia Region) SOAP Fault Code Prefix: Client + // + // * Code: BucketNotEmpty Description: The bucket you tried to delete is + // not empty. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: CredentialsNotSupported Description: This request does not support + // credentials. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: CrossLocationLoggingProhibited Description: Cross-location logging + // not allowed. Buckets in one geographic location cannot log information + // to a bucket in another location. HTTP Status Code: 403 Forbidden SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooSmall Description: Your proposed upload is smaller than + // the minimum allowed object size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: EntityTooLarge Description: Your proposed upload exceeds the maximum + // allowed object size. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: ExpiredToken Description: The provided token has expired. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IllegalVersioningConfigurationException Description: Indicates + // that the versioning configuration specified in the request is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncompleteBody Description: You did not provide the number of + // bytes specified by the Content-Length HTTP header HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: IncorrectNumberOfFilesInPostRequest Description: POST requires + // exactly one file upload per request. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: InlineDataTooLarge Description: Inline data exceeds the maximum + // allowed size. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InternalError Description: We encountered an internal error. Please + // try again. HTTP Status Code: 500 Internal Server Error SOAP Fault Code + // Prefix: Server + // + // * Code: InvalidAccessKeyId Description: The Amazon Web Services access + // key ID you provided does not exist in our records. HTTP Status Code: 403 + // Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidAddressingHeader Description: You must specify the Anonymous + // role. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: InvalidArgument Description: Invalid Argument HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketName Description: The specified bucket is not valid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidBucketState Description: The request is not valid with + // the current state of the bucket. HTTP Status Code: 409 Conflict SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidDigest Description: The Content-MD5 you specified is not + // valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidEncryptionAlgorithmError Description: The encryption request + // you specified is not valid. The valid value is AES256. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidLocationConstraint Description: The specified location + // constraint is not valid. For more information about Regions, see How to + // Select a Region for Your Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro). + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidObjectState Description: The action is not valid for the + // current state of the object. HTTP Status Code: 403 Forbidden SOAP Fault + // Code Prefix: Client + // + // * Code: InvalidPart Description: One or more of the specified parts could + // not be found. The part might not have been uploaded, or the specified + // entity tag might not have matched the part's entity tag. HTTP Status Code: + // 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPartOrder Description: The list of parts was not in ascending + // order. Parts list must be specified in order by part number. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidPayer Description: All access to this object has been disabled. + // Please contact Amazon Web Services Support for further assistance. HTTP + // Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: InvalidPolicyDocument Description: The content of the form does + // not meet the conditions specified in the policy document. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidRange Description: The requested range cannot be satisfied. + // HTTP Status Code: 416 Requested Range Not Satisfiable SOAP Fault Code + // Prefix: Client + // + // * Code: InvalidRequest Description: Please use AWS4-HMAC-SHA256. HTTP + // Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: SOAP requests must be made over an + // HTTPS connection. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with non-DNS compliant names. HTTP Status Code: + // 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported for buckets with periods (.) in their names. HTTP Status + // Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate endpoint + // only supports virtual style requests. HTTP Status Code: 400 Bad Request + // Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is not + // configured on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Accelerate is disabled + // on this bucket. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration is + // not supported on this bucket. Contact Amazon Web Services Support for + // more information. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidRequest Description: Amazon S3 Transfer Acceleration cannot + // be enabled on this bucket. Contact Amazon Web Services Support for more + // information. HTTP Status Code: 400 Bad Request Code: N/A + // + // * Code: InvalidSecurity Description: The provided security credentials + // are not valid. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidSOAPRequest Description: The SOAP request body is invalid. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidStorageClass Description: The storage class you specified + // is not valid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: InvalidTargetBucketForLogging Description: The target bucket for + // logging does not exist, is not owned by you, or does not have the appropriate + // grants for the log-delivery group. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: InvalidToken Description: The provided token is malformed or otherwise + // invalid. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: InvalidURI Description: Couldn't parse the specified URI. HTTP + // Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: KeyTooLongError Description: Your key is too long. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedACLError Description: The XML you provided was not well-formed + // or did not validate against our published schema. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MalformedPOSTRequest Description: The body of your POST request + // is not well-formed multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: MalformedXML Description: This happens when the user sends malformed + // XML (XML that doesn't conform to the published XSD) for the configuration. + // The error message is, "The XML you provided was not well-formed or did + // not validate against our published schema." HTTP Status Code: 400 Bad + // Request SOAP Fault Code Prefix: Client + // + // * Code: MaxMessageLengthExceeded Description: Your request was too big. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MaxPostPreDataLengthExceededError Description: Your POST request + // fields preceding the upload file were too large. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: MetadataTooLarge Description: Your metadata headers exceed the + // maximum allowed metadata size. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: MethodNotAllowed Description: The specified method is not allowed + // against this resource. HTTP Status Code: 405 Method Not Allowed SOAP Fault + // Code Prefix: Client + // + // * Code: MissingAttachment Description: A SOAP attachment was expected, + // but none were found. HTTP Status Code: N/A SOAP Fault Code Prefix: Client + // + // * Code: MissingContentLength Description: You must provide the Content-Length + // HTTP header. HTTP Status Code: 411 Length Required SOAP Fault Code Prefix: + // Client + // + // * Code: MissingRequestBodyError Description: This happens when the user + // sends an empty XML document as a request. The error message is, "Request + // body is empty." HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: + // Client + // + // * Code: MissingSecurityElement Description: The SOAP 1.1 request is missing + // a security element. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: MissingSecurityHeader Description: Your request is missing a required + // header. HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: NoLoggingStatusForKey Description: There is no such thing as a + // logging status subresource for a key. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucket Description: The specified bucket does not exist. + // HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchBucketPolicy Description: The specified bucket does not + // have a bucket policy. HTTP Status Code: 404 Not Found SOAP Fault Code + // Prefix: Client + // + // * Code: NoSuchKey Description: The specified key does not exist. HTTP + // Status Code: 404 Not Found SOAP Fault Code Prefix: Client + // + // * Code: NoSuchLifecycleConfiguration Description: The lifecycle configuration + // does not exist. HTTP Status Code: 404 Not Found SOAP Fault Code Prefix: + // Client + // + // * Code: NoSuchUpload Description: The specified multipart upload does + // not exist. The upload ID might be invalid, or the multipart upload might + // have been aborted or completed. HTTP Status Code: 404 Not Found SOAP Fault + // Code Prefix: Client + // + // * Code: NoSuchVersion Description: Indicates that the version ID specified + // in the request does not match an existing version. HTTP Status Code: 404 + // Not Found SOAP Fault Code Prefix: Client + // + // * Code: NotImplemented Description: A header you provided implies functionality + // that is not implemented. HTTP Status Code: 501 Not Implemented SOAP Fault + // Code Prefix: Server + // + // * Code: NotSignedUp Description: Your account is not signed up for the + // Amazon S3 service. You must sign up before you can use Amazon S3. You + // can sign up at the following URL: Amazon S3 (http://aws.amazon.com/s3) + // HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: OperationAborted Description: A conflicting conditional action + // is currently in progress against this resource. Try again. HTTP Status + // Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: PermanentRedirect Description: The bucket you are attempting to + // access must be addressed using the specified endpoint. Send all future + // requests to this endpoint. HTTP Status Code: 301 Moved Permanently SOAP + // Fault Code Prefix: Client + // + // * Code: PreconditionFailed Description: At least one of the preconditions + // you specified did not hold. HTTP Status Code: 412 Precondition Failed + // SOAP Fault Code Prefix: Client + // + // * Code: Redirect Description: Temporary redirect. HTTP Status Code: 307 + // Moved Temporarily SOAP Fault Code Prefix: Client + // + // * Code: RestoreAlreadyInProgress Description: Object restore is already + // in progress. HTTP Status Code: 409 Conflict SOAP Fault Code Prefix: Client + // + // * Code: RequestIsNotMultiPartContent Description: Bucket POST must be + // of the enclosure-type multipart/form-data. HTTP Status Code: 400 Bad Request + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeout Description: Your socket connection to the server + // was not read from or written to within the timeout period. HTTP Status + // Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: RequestTimeTooSkewed Description: The difference between the request + // time and the server's time is too large. HTTP Status Code: 403 Forbidden + // SOAP Fault Code Prefix: Client + // + // * Code: RequestTorrentOfBucketError Description: Requesting the torrent + // file of a bucket is not permitted. HTTP Status Code: 400 Bad Request SOAP + // Fault Code Prefix: Client + // + // * Code: SignatureDoesNotMatch Description: The request signature we calculated + // does not match the signature you provided. Check your Amazon Web Services + // secret access key and signing method. For more information, see REST Authentication + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) + // and SOAP Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html) + // for details. HTTP Status Code: 403 Forbidden SOAP Fault Code Prefix: Client + // + // * Code: ServiceUnavailable Description: Service is unable to handle request. + // HTTP Status Code: 503 Service Unavailable SOAP Fault Code Prefix: Server + // + // * Code: SlowDown Description: Reduce your request rate. HTTP Status Code: + // 503 Slow Down SOAP Fault Code Prefix: Server + // + // * Code: TemporaryRedirect Description: You are being redirected to the + // bucket while DNS updates. HTTP Status Code: 307 Moved Temporarily SOAP + // Fault Code Prefix: Client + // + // * Code: TokenRefreshRequired Description: The provided token must be refreshed. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: TooManyBuckets Description: You have attempted to create more + // buckets than allowed. HTTP Status Code: 400 Bad Request SOAP Fault Code + // Prefix: Client + // + // * Code: UnexpectedContent Description: This request does not support content. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UnresolvableGrantByEmailAddress Description: The email address + // you provided does not match any account on record. HTTP Status Code: 400 + // Bad Request SOAP Fault Code Prefix: Client + // + // * Code: UserKeyMustBeSpecified Description: The bucket POST must contain + // the specified field name. If it is specified, check the order of the fields. + // HTTP Status Code: 400 Bad Request SOAP Fault Code Prefix: Client + Code *string `type:"string"` + + // The error key. + Key *string `min:"1" type:"string"` + + // The error message contains a generic description of the error condition in + // English. It is intended for a human audience. Simple programs display the + // message directly to the end user if they encounter an error condition they + // don't know how or don't care to handle. Sophisticated programs with more + // exhaustive error handling and proper internationalization are more likely + // to ignore the error message. + Message *string `type:"string"` + + // The version ID of the error. + // + // This functionality is not supported for directory buckets. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +// The error information. +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// A container for specifying the configuration for Amazon EventBridge. +type EventBridgeConfiguration struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EventBridgeConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s EventBridgeConfiguration) GoString() string { + return s.String() +} + +// Optional configuration to replicate existing source bucket objects. For more +// information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) +// in the Amazon S3 User Guide. +type ExistingObjectReplication struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates existing source bucket objects. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExistingObjectReplicationStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExistingObjectReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExistingObjectReplication) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExistingObjectReplication) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExistingObjectReplication"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ExistingObjectReplication) SetStatus(v string) *ExistingObjectReplication { + s.Status = &v + return s +} + +// Specifies the Amazon S3 object key name to filter on. An object key name +// is the name assigned to an object in your Amazon S3 bucket. You specify whether +// to filter on the suffix or prefix of the object key name. A prefix is a specific +// string of characters at the beginning of an object key name, which you can +// use to organize objects. For example, you can start the key names of related +// objects with a prefix, such as 2023- or engineering/. Then, you can use FilterRule +// to find objects in a bucket with key names that have the same prefix. A suffix +// is similar to a prefix, but it is at the end of the object key name instead +// of at the beginning. +type FilterRule struct { + _ struct{} `type:"structure"` + + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + Name *string `type:"string" enum:"FilterRuleName"` + + // The value that the filter searches for in object key names. + Value *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FilterRule) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"GetBucketAccelerateConfigurationRequest" type:"structure"` + + // The name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetBucketAccelerateConfigurationInput) SetRequestPayer(v string) *GetBucketAccelerateConfigurationInput { + s.RequestPayer = &v + return s +} + +func (s *GetBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetRequestCharged(v string) *GetBucketAccelerateConfigurationOutput { + s.RequestCharged = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v + return s +} + +type GetBucketAclInput struct { + _ struct{} `locationName:"GetBucketAclRequest" type:"structure"` + + // Specifies the S3 bucket whose ACL is being requested. + // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAclInput) SetExpectedBucketOwner(v string) *GetBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +type GetBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"GetBucketAnalyticsConfigurationRequest" type:"structure"` + + // The name of the bucket from which an analytics configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +type GetBucketCorsInput struct { + _ struct{} `locationName:"GetBucketCorsRequest" type:"structure"` + + // The bucket name for which to get the cors configuration. + // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketCorsInput) SetExpectedBucketOwner(v string) *GetBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + // A set of origins and methods (cross-origin access that you want to allow). + // You can add up to 100 rules to the configuration. + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +type GetBucketEncryptionInput struct { + _ struct{} `locationName:"GetBucketEncryptionRequest" type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketEncryptionInput) SetExpectedBucketOwner(v string) *GetBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies the default server-side-encryption configuration. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + +type GetBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"GetBucketIntelligentTieringConfigurationRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetBucket(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *GetBucketIntelligentTieringConfigurationInput) SetId(v string) *GetBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure" payload:"IntelligentTieringConfiguration"` + + // Container for S3 Intelligent-Tiering configuration. + IntelligentTieringConfiguration *IntelligentTieringConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *GetBucketIntelligentTieringConfigurationOutput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *GetBucketIntelligentTieringConfigurationOutput { + s.IntelligentTieringConfiguration = v + return s +} + +type GetBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"GetBucketInventoryConfigurationRequest" type:"structure"` + + // The name of the bucket containing the inventory configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"GetBucketLifecycleConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + // Container for a lifecycle rule. + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +type GetBucketLifecycleInput struct { + _ struct{} `locationName:"GetBucketLifecycleRequest" type:"structure"` + + // The name of the bucket for which to get the lifecycle information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLifecycleInput) SetExpectedBucketOwner(v string) *GetBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + // Container for a lifecycle rule. + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v + return s +} + +type GetBucketLocationInput struct { + _ struct{} `locationName:"GetBucketLocationRequest" type:"structure"` + + // The name of the bucket for which to get the location. + // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLocationInput) SetExpectedBucketOwner(v string) *GetBucketLocationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLocationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLocationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLocationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + // Specifies the Region where the bucket resides. For a list of all the Amazon + // S3 supported location constraints by Region, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region). + // Buckets in Region us-east-1 have a LocationConstraint of null. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +type GetBucketLoggingInput struct { + _ struct{} `locationName:"GetBucketLoggingRequest" type:"structure"` + + // The bucket name for which to get the logging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketLoggingInput) SetExpectedBucketOwner(v string) *GetBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon S3 API Reference. + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +type GetBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"GetBucketMetricsConfigurationRequest" type:"structure"` + + // The name of the bucket containing the metrics configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *GetBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +func (s *GetBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `locationName:"GetBucketNotificationConfigurationRequest" type:"structure"` + + // The name of the bucket for which to get the notification configuration. + // + // When you use this API operation with an access point, provide the alias of + // the access point in place of the bucket name. + // + // When you use this API operation with an Object Lambda access point, provide + // the alias of the Object Lambda access point in place of the bucket name. + // If the Object Lambda access point alias in a request is not valid, the error + // code InvalidAccessPointAliasError is returned. For more information about + // InvalidAccessPointAliasError, see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { + s.Bucket = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketNotificationConfigurationRequest) SetExpectedBucketOwner(v string) *GetBucketNotificationConfigurationRequest { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketNotificationConfigurationRequest) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketNotificationConfigurationRequest) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketNotificationConfigurationRequest) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsInput struct { + _ struct{} `locationName:"GetBucketOwnershipControlsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketOwnershipControlsInput) SetBucket(v string) *GetBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *GetBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure" payload:"OwnershipControls"` + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) + // currently in effect for this Amazon S3 bucket. + OwnershipControls *OwnershipControls `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *GetBucketOwnershipControlsOutput) SetOwnershipControls(v *OwnershipControls) *GetBucketOwnershipControlsOutput { + s.OwnershipControls = v + return s +} + +type GetBucketPolicyInput struct { + _ struct{} `locationName:"GetBucketPolicyRequest" type:"structure"` + + // The bucket name to get the bucket policy for. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Access points - When you use this API operation with an access point, provide + // the alias of the access point in place of the bucket name. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError, see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyInput) SetExpectedBucketOwner(v string) *GetBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +type GetBucketPolicyStatusInput struct { + _ struct{} `locationName:"GetBucketPolicyStatusRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { + s.Bucket = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketPolicyStatusInput) SetExpectedBucketOwner(v string) *GetBucketPolicyStatusInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketPolicyStatusInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketPolicyStatusInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketPolicyStatusInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketPolicyStatusOutput struct { + _ struct{} `type:"structure" payload:"PolicyStatus"` + + // The policy status for the specified bucket. + PolicyStatus *PolicyStatus `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketPolicyStatusOutput) GoString() string { + return s.String() +} + +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { + s.PolicyStatus = v + return s +} + +type GetBucketReplicationInput struct { + _ struct{} `locationName:"GetBucketReplicationRequest" type:"structure"` + + // The bucket name for which to get the replication information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketReplicationInput) SetExpectedBucketOwner(v string) *GetBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `locationName:"GetBucketRequestPaymentRequest" type:"structure"` + + // The name of the bucket for which to get the payment request configuration + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *GetBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v + return s +} + +type GetBucketTaggingInput struct { + _ struct{} `locationName:"GetBucketTaggingRequest" type:"structure"` + + // The name of the bucket for which to get the tagging information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketTaggingInput) SetExpectedBucketOwner(v string) *GetBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +type GetBucketVersioningInput struct { + _ struct{} `locationName:"GetBucketVersioningRequest" type:"structure"` + + // The name of the bucket for which to get the versioning information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketVersioningInput) SetExpectedBucketOwner(v string) *GetBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +type GetBucketWebsiteInput struct { + _ struct{} `locationName:"GetBucketWebsiteRequest" type:"structure"` + + // The bucket name for which to get the website configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetBucketWebsiteInput) SetExpectedBucketOwner(v string) *GetBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + // The object key name of the website error document to use for 4XX class errors. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website (for example index.html). + IndexDocument *IndexDocument `type:"structure"` + + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +type GetObjectAclInput struct { + _ struct{} `locationName:"GetObjectAclRequest" type:"structure"` + + // The bucket name that contains the object for which to get the ACL information. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key of the object for which to get the ACL information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAclInput) SetExpectedBucketOwner(v string) *GetObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +func (s *GetObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + // Container for the bucket owner's display name and ID. + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +type GetObjectAttributesInput struct { + _ struct{} `locationName:"GetObjectAttributesRequest" type:"structure"` + + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"header" locationName:"x-amz-max-parts" type:"integer"` + + // Specifies the fields at the root level that you want returned in the response. + // Fields that you do not specify are not returned. + // + // ObjectAttributes is a required field + ObjectAttributes []*string `location:"header" locationName:"x-amz-object-attributes" type:"list" required:"true" enum:"ObjectAttributes"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"header" locationName:"x-amz-part-number-marker" type:"integer"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectAttributesInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // The version ID used to reference a specific version of the object. + // + // S3 Versioning isn't enabled and supported for directory buckets. For this + // API operation, only the null value of the version ID is supported by directory + // buckets. You can only specify null to the versionId query parameter in the + // request. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAttributesInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.ObjectAttributes == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectAttributes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAttributesInput) SetBucket(v string) *GetObjectAttributesInput { + s.Bucket = &v + return s +} + +func (s *GetObjectAttributesInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectAttributesInput) SetExpectedBucketOwner(v string) *GetObjectAttributesInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAttributesInput) SetKey(v string) *GetObjectAttributesInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *GetObjectAttributesInput) SetMaxParts(v int64) *GetObjectAttributesInput { + s.MaxParts = &v + return s +} + +// SetObjectAttributes sets the ObjectAttributes field's value. +func (s *GetObjectAttributesInput) SetObjectAttributes(v []*string) *GetObjectAttributesInput { + s.ObjectAttributes = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *GetObjectAttributesInput) SetPartNumberMarker(v int64) *GetObjectAttributesInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAttributesInput) SetRequestPayer(v string) *GetObjectAttributesInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectAttributesInput) SetSSECustomerAlgorithm(v string) *GetObjectAttributesInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectAttributesInput) SetSSECustomerKey(v string) *GetObjectAttributesInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectAttributesInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectAttributesInput) SetSSECustomerKeyMD5(v string) *GetObjectAttributesInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAttributesInput) SetVersionId(v string) *GetObjectAttributesInput { + s.VersionId = &v + return s +} + +func (s *GetObjectAttributesInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectAttributesInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectAttributesInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectAttributesOutput struct { + _ struct{} `type:"structure"` + + // The checksum or digest of the object. + Checksum *Checksum `type:"structure"` + + // Specifies whether the object retrieved was (true) or was not (false) a delete + // marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL. + ETag *string `type:"string"` + + // The creation date of the object. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A collection of parts associated with a multipart upload. + ObjectParts *GetObjectAttributesParts `type:"structure"` + + // The size of the object in bytes. + ObjectSize *int64 `type:"long"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides the storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The version ID of the object. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesOutput) GoString() string { + return s.String() +} + +// SetChecksum sets the Checksum field's value. +func (s *GetObjectAttributesOutput) SetChecksum(v *Checksum) *GetObjectAttributesOutput { + s.Checksum = v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectAttributesOutput) SetDeleteMarker(v bool) *GetObjectAttributesOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectAttributesOutput) SetETag(v string) *GetObjectAttributesOutput { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectAttributesOutput) SetLastModified(v time.Time) *GetObjectAttributesOutput { + s.LastModified = &v + return s +} + +// SetObjectParts sets the ObjectParts field's value. +func (s *GetObjectAttributesOutput) SetObjectParts(v *GetObjectAttributesParts) *GetObjectAttributesOutput { + s.ObjectParts = v + return s +} + +// SetObjectSize sets the ObjectSize field's value. +func (s *GetObjectAttributesOutput) SetObjectSize(v int64) *GetObjectAttributesOutput { + s.ObjectSize = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAttributesOutput) SetRequestCharged(v string) *GetObjectAttributesOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectAttributesOutput) SetStorageClass(v string) *GetObjectAttributesOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAttributesOutput) SetVersionId(v string) *GetObjectAttributesOutput { + s.VersionId = &v + return s +} + +// A collection of parts associated with a multipart upload. +type GetObjectAttributesParts struct { + _ struct{} `type:"structure"` + + // Indicates whether the returned list of parts is truncated. A value of true + // indicates that the list was truncated. A list can be truncated if the number + // of parts exceeds the limit returned in the MaxParts element. + IsTruncated *bool `type:"boolean"` + + // The maximum number of parts allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the PartNumberMarker request parameter in + // a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + // The marker for the current part. + PartNumberMarker *int64 `type:"integer"` + + // A container for elements related to a particular part. A response can contain + // zero or more Parts elements. + // + // * General purpose buckets - For GetObjectAttributes, if a additional checksum + // (including x-amz-checksum-crc32, x-amz-checksum-crc32c, x-amz-checksum-sha1, + // or x-amz-checksum-sha256) isn't applied to the object specified in the + // request, the response doesn't return Part. + // + // * Directory buckets - For GetObjectAttributes, no matter whether a additional + // checksum is applied to the object specified in the request, the response + // returns Part. + Parts []*ObjectPart `locationName:"Part" type:"list" flattened:"true"` + + // The total number of parts. + TotalPartsCount *int64 `locationName:"PartsCount" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesParts) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectAttributesParts) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *GetObjectAttributesParts) SetIsTruncated(v bool) *GetObjectAttributesParts { + s.IsTruncated = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *GetObjectAttributesParts) SetMaxParts(v int64) *GetObjectAttributesParts { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *GetObjectAttributesParts) SetNextPartNumberMarker(v int64) *GetObjectAttributesParts { + s.NextPartNumberMarker = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *GetObjectAttributesParts) SetPartNumberMarker(v int64) *GetObjectAttributesParts { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *GetObjectAttributesParts) SetParts(v []*ObjectPart) *GetObjectAttributesParts { + s.Parts = v + return s +} + +// SetTotalPartsCount sets the TotalPartsCount field's value. +func (s *GetObjectAttributesParts) SetTotalPartsCount(v int64) *GetObjectAttributesParts { + s.TotalPartsCount = &v + return s +} + +type GetObjectInput struct { + _ struct{} `locationName:"GetObjectRequest" type:"structure"` + + // The bucket name containing the object. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this action with an Object Lambda + // access point, you must direct requests to the Object Lambda access point + // hostname. The Object Lambda access point hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // To retrieve the checksum, this mode must be enabled. + // + // The AWS SDK for Go v1 does not support automatic response payload checksum + // validation. This feature is available in the AWS SDK for Go v2. + ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified + // in this header; otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since + // condition evaluates to false; then, S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since + // condition evaluates to true; then, S3 returns 304 Not Modified status code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified in this header; otherwise, return a 304 Not Modified error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows:If-None-Match condition evaluates to false, and; If-Modified-Since + // condition evaluates to true; then, S3 returns 304 Not Modified HTTP status + // code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 Precondition Failed error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: If-Match condition evaluates to true, and; If-Unmodified-Since + // condition evaluates to false; then, S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // Key of the object to get. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified byte range of an object. For more information about + // the HTTP Range header, see https://www.rfc-editor.org/rfc/rfc9110.html#name-range + // (https://www.rfc-editor.org/rfc/rfc9110.html#name-range). + // + // Amazon S3 doesn't support retrieving multiple ranges of data per GET request. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response. + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the object (for example, AES256). + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key that you originally provided + // for Amazon S3 to encrypt the data before storing it. This value is used to + // decrypt the object when recovering it and must match the one used when storing + // the data. The key must be appropriate for use with the algorithm specified + // in the x-amz-server-side-encryption-customer-algorithm header. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the customer-provided encryption key + // according to RFC 1321. Amazon S3 uses this header for a message integrity + // check to ensure that the encryption key was transmitted without error. + // + // If you encrypt an object by using server-side encryption with customer-provided + // encryption keys (SSE-C) when you store the object in Amazon S3, then when + // you GET the object, you must use the following headers: + // + // * x-amz-server-side-encryption-customer-algorithm + // + // * x-amz-server-side-encryption-customer-key + // + // * x-amz-server-side-encryption-customer-key-MD5 + // + // For more information about SSE-C, see Server-Side Encryption (Using Customer-Provided + // Encryption Keys) (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Version ID used to reference a specific version of the object. + // + // By default, the GetObject operation returns the current version of an object. + // To return a different version, use the versionId subresource. + // + // * If you include a versionId in your request header, you must have the + // s3:GetObjectVersion permission to access a specific version of an object. + // The s3:GetObject permission is not required in this scenario. + // + // * If you request the current version of an object without a specific versionId + // in the request header, only the s3:GetObject permission is required. The + // s3:GetObjectVersion permission is not required in this scenario. + // + // * Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. For this API operation, only the null value of the version ID + // is supported by directory buckets. You can only specify null to the versionId + // query parameter in the request. + // + // For more information about versioning, see PutBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html). + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumMode sets the ChecksumMode field's value. +func (s *GetObjectInput) SetChecksumMode(v string) *GetObjectInput { + s.ChecksumMode = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectInput) SetExpectedBucketOwner(v string) *GetObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +func (s *GetObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLegalHoldInput struct { + _ struct{} `locationName:"GetObjectLegalHoldRequest" type:"structure"` + + // The bucket name containing the object whose legal hold status you want to + // retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object whose legal hold status you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object whose legal hold status you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLegalHoldInput) SetExpectedBucketOwner(v string) *GetObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *GetObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLegalHoldOutput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + + // The current legal hold status for the specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetLegalHold sets the LegalHold field's value. +func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { + s.LegalHold = v + return s +} + +type GetObjectLockConfigurationInput struct { + _ struct{} `locationName:"GetObjectLockConfigurationRequest" type:"structure"` + + // The bucket whose Object Lock configuration you want to retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *GetObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectLockConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The specified bucket's Object Lock configuration. + ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { + s.ObjectLockConfiguration = v + return s +} + +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified in the request. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. For more information, see Checking + // object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. For more information, see + // Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Indicates what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Indicates whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + // + // * If the current version of the object is a delete marker, Amazon S3 behaves + // as if the object was deleted and includes x-amz-delete-marker: true in + // the response. + // + // * If the specified version in the request is a delete marker, the response + // returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An entity tag (ETag) is an opaque identifier assigned by a web server to + // a specific version of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Date and time when the object was last modified. + // + // General purpose buckets - When you specify a versionId of the object in your + // request, if the specified version in the request is a delete marker, the + // response returns a 405 Method Not Allowed error and the Last-Modified: timestamp + // response header. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in the headers + // that are prefixed with x-amz-meta-. This can happen if you create metadata + // using an API like SOAP that supports more flexible metadata than the REST + // API. For example, using SOAP, you can create metadata whose values are not + // legal HTTP headers. + // + // This functionality is not supported for directory buckets. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that's currently in place for this object. + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's Object Lock will expire. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this if your request involves a bucket that is either + // a source or destination in a replication rule. + // + // This functionality is not supported for directory buckets. + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration action and expiration time + // of the restored object copy. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object, when you have the relevant permission + // to read object tags. + // + // You can use GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) + // to retrieve the tag set associated with an object. + // + // This functionality is not supported for directory buckets. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version ID of the object. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *GetObjectOutput) SetBucketKeyEnabled(v bool) *GetObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *GetObjectOutput) SetChecksumCRC32(v string) *GetObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *GetObjectOutput) SetChecksumCRC32C(v string) *GetObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *GetObjectOutput) SetChecksumSHA1(v string) *GetObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *GetObjectOutput) SetChecksumSHA256(v string) *GetObjectOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +type GetObjectRetentionInput struct { + _ struct{} `locationName:"GetObjectRetentionRequest" type:"structure"` + + // The bucket name containing the object whose retention settings you want to + // retrieve. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object whose retention settings you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID for the object whose retention settings you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *GetObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectRetentionInput) SetExpectedBucketOwner(v string) *GetObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *GetObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectRetentionOutput struct { + _ struct{} `type:"structure" payload:"Retention"` + + // The container element for an object's retention settings. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRetention sets the Retention field's value. +func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { + s.Retention = v + return s +} + +type GetObjectTaggingInput struct { + _ struct{} `locationName:"GetObjectTaggingRequest" type:"structure"` + + // The bucket name containing the object for which to get the tagging information. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which to get the tagging information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The versionId of the object for which to get the tagging information. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTaggingInput) SetExpectedBucketOwner(v string) *GetObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTaggingInput) SetRequestPayer(v string) *GetObjectTaggingInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *GetObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // Contains the tag set. + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + // The versionId of the object for which you got the tagging information. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +type GetObjectTorrentInput struct { + _ struct{} `locationName:"GetObjectTorrentRequest" type:"structure"` + + // The name of the bucket containing the object for which to get the torrent + // files. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The object key for which to get the information. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTorrentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetObjectTorrentInput) SetExpectedBucketOwner(v string) *GetObjectTorrentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v + return s +} + +func (s *GetObjectTorrentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetObjectTorrentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetObjectTorrentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + // A Bencoded dictionary as defined by the BitTorrent specification + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +type GetPublicAccessBlockInput struct { + _ struct{} `locationName:"GetPublicAccessBlockRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *GetPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *GetPublicAccessBlockInput) SetExpectedBucketOwner(v string) *GetPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *GetPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *GetPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s GetPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v + return s +} + +// Container for S3 Glacier job parameters. +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +// Container for grant information. +type Grant struct { + _ struct{} `type:"structure"` + + // The person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +// Container for the person being granted permissions. +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + // + // Using email addresses to specify a grantee is only supported in the following + // Amazon Web Services Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // For a list of all the Amazon S3 supported Regions and endpoints, see Regions + // and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) + // in the Amazon Web Services General Reference. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +type HeadBucketInput struct { + _ struct{} `locationName:"HeadBucketRequest" type:"structure"` + + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Object Lambda access points - When you use this API operation with an Object + // Lambda access point, provide the alias of the Object Lambda access point + // in place of the bucket name. If the Object Lambda access point alias in a + // request is not valid, the error code InvalidAccessPointAliasError is returned. + // For more information about InvalidAccessPointAliasError, see List of Error + // Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList). + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadBucketInput) SetExpectedBucketOwner(v string) *HeadBucketInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *HeadBucketInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadBucketInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadBucketInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type HeadBucketOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the bucket name used in the request is an access point + // alias. + // + // This functionality is not supported for directory buckets. + AccessPointAlias *bool `location:"header" locationName:"x-amz-access-point-alias" type:"boolean"` + + // The name of the location where the bucket will be created. + // + // For directory buckets, the AZ ID of the Availability Zone where the bucket + // is created. An example AZ ID value is usw2-az1. + // + // This functionality is only supported by directory buckets. + BucketLocationName *string `location:"header" locationName:"x-amz-bucket-location-name" type:"string"` + + // The type of location where the bucket is created. + // + // This functionality is only supported by directory buckets. + BucketLocationType *string `location:"header" locationName:"x-amz-bucket-location-type" type:"string" enum:"LocationType"` + + // The Region that the bucket is located. + // + // This functionality is not supported for directory buckets. + BucketRegion *string `location:"header" locationName:"x-amz-bucket-region" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +// SetAccessPointAlias sets the AccessPointAlias field's value. +func (s *HeadBucketOutput) SetAccessPointAlias(v bool) *HeadBucketOutput { + s.AccessPointAlias = &v + return s +} + +// SetBucketLocationName sets the BucketLocationName field's value. +func (s *HeadBucketOutput) SetBucketLocationName(v string) *HeadBucketOutput { + s.BucketLocationName = &v + return s +} + +// SetBucketLocationType sets the BucketLocationType field's value. +func (s *HeadBucketOutput) SetBucketLocationType(v string) *HeadBucketOutput { + s.BucketLocationType = &v + return s +} + +// SetBucketRegion sets the BucketRegion field's value. +func (s *HeadBucketOutput) SetBucketRegion(v string) *HeadBucketOutput { + s.BucketRegion = &v + return s +} + +type HeadObjectInput struct { + _ struct{} `locationName:"HeadObjectRequest" type:"structure"` + + // The name of the bucket that contains the object. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // To retrieve the checksum, this parameter must be enabled. + // + // In addition, if you enable ChecksumMode and the object is encrypted with + // Amazon Web Services Key Management Service (Amazon Web Services KMS), you + // must have permission to use the kms:Decrypt action for the request to succeed. + ChecksumMode *string `location:"header" locationName:"x-amz-checksum-mode" type:"string" enum:"ChecksumMode"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Return the object only if its entity tag (ETag) is the same as the one specified; + // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // + // * If-Match condition evaluates to true, and; + // + // * If-Unmodified-Since condition evaluates to false; + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time; + // otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows: + // + // * If-None-Match condition evaluates to false, and; + // + // * If-Modified-Since condition evaluates to true; + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified; otherwise, return a 304 (not modified) error. + // + // If both of the If-None-Match and If-Modified-Since headers are present in + // the request as follows: + // + // * If-None-Match condition evaluates to false, and; + // + // * If-Modified-Since condition evaluates to true; + // + // Then Amazon S3 returns the 304 Not Modified response code. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time; + // otherwise, return a 412 (precondition failed) error. + // + // If both of the If-Match and If-Unmodified-Since headers are present in the + // request as follows: + // + // * If-Match condition evaluates to true, and; + // + // * If-Unmodified-Since condition evaluates to false; + // + // Then Amazon S3 returns 200 OK and the data requested. + // + // For more information about conditional requests, see RFC 7232 (https://tools.ietf.org/html/rfc7232). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // HeadObject returns only the metadata for an object. If the Range is satisfiable, + // only the ContentLength is affected in the response. If the Range is not satisfiable, + // S3 returns a 416 - Requested Range Not Satisfiable error. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by HeadObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Version ID used to reference a specific version of the object. + // + // For directory buckets in this API operation, only the null value of the version + // ID is supported. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumMode sets the ChecksumMode field's value. +func (s *HeadObjectInput) SetChecksumMode(v string) *HeadObjectInput { + s.ChecksumMode = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *HeadObjectInput) SetExpectedBucketOwner(v string) *HeadObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +func (s *HeadObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *HeadObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s HeadObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // The archive state of the head object. + // + // This functionality is not supported for directory buckets. + ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` + + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Indicates what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + // + // This functionality is not supported for directory buckets. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An entity tag (ETag) is an opaque identifier assigned by a web server to + // a specific version of a resource found at a URL. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)), + // the response includes this header. It includes the expiry-date and rule-id + // key-value pairs providing object expiration information. The value of the + // rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Date and time when the object was last modified. + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + // + // By default unmarshaled keys are written as a map keys in following canonicalized format: + // the first letter and any letter following a hyphen will be capitalized, and the rest as lowercase. + // Set `aws.Config.LowerCaseHeaderMaps` to `true` to write unmarshaled keys to the map as lowercase. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + // + // This functionality is not supported for directory buckets. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // Specifies whether a legal hold is in effect for this object. This header + // is only returned if the requester has the s3:GetObjectLegalHold permission. + // This header is not returned if the specified version of this object has never + // had a legal hold applied. For more information about S3 Object Lock, see + // Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode, if any, that's in effect for this object. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when the Object Lock retention period expires. This header + // is only returned if the requester has the s3:GetObjectRetention permission. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. This value is only returned if you specify + // partNumber in your request and the object was uploaded as a multipart upload. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + // Amazon S3 can return this header if your request involves a bucket that is + // either a source or a destination in a replication rule. + // + // In replication, you have a source bucket on which you configure replication + // and destination bucket or buckets where Amazon S3 stores object replicas. + // When you request an object (GetObject) or object metadata (HeadObject) from + // these buckets, Amazon S3 will return the x-amz-replication-status header + // in the response as follows: + // + // * If requesting an object from the source bucket, Amazon S3 will return + // the x-amz-replication-status header if the object in your request is eligible + // for replication. For example, suppose that in your replication configuration, + // you specify object prefix TaxDocs requesting Amazon S3 to replicate objects + // with key prefix TaxDocs. Any objects you upload with this key name prefix, + // for example TaxDocs/document1.pdf, are eligible for replication. For any + // object request with this key name prefix, Amazon S3 will return the x-amz-replication-status + // header with value PENDING, COMPLETED or FAILED indicating object replication + // status. + // + // * If requesting an object from a destination bucket, Amazon S3 will return + // the x-amz-replication-status header with value REPLICA if the object in + // your request is a replica that Amazon S3 created and there is no replica + // modification replication in progress. + // + // * When replicating objects to multiple destination buckets, the x-amz-replication-status + // header acts differently. The header of the source object will only return + // a value of COMPLETED when replication is successful to all destinations. + // The header will remain at value PENDING until replication has completed + // for all destinations. If one or more destinations fails replication the + // header will return FAILED. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). + // + // This functionality is not supported for directory buckets. + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If the object is an archived object (an object whose storage class is GLACIER), + // the response includes this header if either the archive restoration is in + // progress (see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) + // or an archive copy is already restored. + // + // If an archive copy is already restored, the header value indicates when Amazon + // S3 is scheduled to delete the object copy. For example: + // + // x-amz-restore: ongoing-request="false", expiry-date="Fri, 21 Dec 2012 00:00:00 + // GMT" + // + // If the object restoration is in progress, the header returns the value ongoing-request="true". + // + // For more information about archiving objects, see Transitioning Objects: + // General Considerations (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-transition-general-considerations). + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by HeadObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version ID of the object. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetArchiveStatus sets the ArchiveStatus field's value. +func (s *HeadObjectOutput) SetArchiveStatus(v string) *HeadObjectOutput { + s.ArchiveStatus = &v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *HeadObjectOutput) SetBucketKeyEnabled(v bool) *HeadObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *HeadObjectOutput) SetChecksumCRC32(v string) *HeadObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *HeadObjectOutput) SetChecksumCRC32C(v string) *HeadObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *HeadObjectOutput) SetChecksumSHA1(v string) *HeadObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *HeadObjectOutput) SetChecksumSHA256(v string) *HeadObjectOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Container for the Suffix element. +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (for example,if the suffix is index.html and you make a request + // to samplebucket/images/ the data that is returned will be for the object + // with the key name images/index.html) The suffix must not be empty and must + // not include a slash character. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +// Container element that identifies who initiated the multipart upload. +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + // + // This functionality is not supported for directory buckets. + DisplayName *string `type:"string"` + + // If the principal is an Amazon Web Services account, it provides the Canonical + // User ID. If the principal is an IAM User, it provides a user ARN value. + // + // Directory buckets - If the principal is an Amazon Web Services account, it + // provides the Amazon Web Services account ID. If the principal is an IAM User, + // it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +// Describes the serialization format of the object. +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v + return s +} + +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v + return s +} + +// SetParquet sets the Parquet field's value. +func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { + s.Parquet = v + return s +} + +// A container for specifying S3 Intelligent-Tiering filters. The filters determine +// the subset of objects to which the rule applies. +type IntelligentTieringAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the configuration applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the configuration + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringAndOperator) SetPrefix(v string) *IntelligentTieringAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *IntelligentTieringAndOperator) SetTags(v []*Tag) *IntelligentTieringAndOperator { + s.Tags = v + return s +} + +// Specifies the S3 Intelligent-Tiering configuration for an Amazon S3 bucket. +// +// For information about the S3 Intelligent-Tiering storage class, see Storage +// class for automatically optimizing frequently and infrequently accessed objects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access). +type IntelligentTieringConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a bucket filter. The configuration only includes objects that meet + // the filter's criteria. + Filter *IntelligentTieringFilter `type:"structure"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies the status of the configuration. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"IntelligentTieringStatus"` + + // Specifies the S3 Intelligent-Tiering storage class tier of the configuration. + // + // Tierings is a required field + Tierings []*Tiering `locationName:"Tiering" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Tierings == nil { + invalidParams.Add(request.NewErrParamRequired("Tierings")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Tierings != nil { + for i, v := range s.Tierings { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tierings", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *IntelligentTieringConfiguration) SetFilter(v *IntelligentTieringFilter) *IntelligentTieringConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *IntelligentTieringConfiguration) SetId(v string) *IntelligentTieringConfiguration { + s.Id = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *IntelligentTieringConfiguration) SetStatus(v string) *IntelligentTieringConfiguration { + s.Status = &v + return s +} + +// SetTierings sets the Tierings field's value. +func (s *IntelligentTieringConfiguration) SetTierings(v []*Tiering) *IntelligentTieringConfiguration { + s.Tierings = v + return s +} + +// The Filter is used to identify objects that the S3 Intelligent-Tiering configuration +// applies to. +type IntelligentTieringFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *IntelligentTieringAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // A container of a key value name pair. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s IntelligentTieringFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IntelligentTieringFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IntelligentTieringFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *IntelligentTieringFilter) SetAnd(v *IntelligentTieringAndOperator) *IntelligentTieringFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *IntelligentTieringFilter) SetPrefix(v string) *IntelligentTieringFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *IntelligentTieringFilter) SetTag(v *Tag) *IntelligentTieringFilter { + s.Tag = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon S3 API Reference. +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list" enum:"InventoryOptionalField"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v + return s +} + +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v + return s +} + +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v + return s +} + +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delivered inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delivered inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + +// Specifies an inventory filter. The inventory only includes objects that meet +// the filter's criteria. +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v + return s +} + +// Contains the bucket name, file format, bucket owner (optional), and prefix +// (optional) where inventory results are published. +type InventoryS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The account ID that owns the destination S3 bucket. If no account ID is provided, + // the owner is not validated before exporting data. + // + // Although this value is optional, we strongly recommend that you set it to + // help prevent problems if the destination bucket ownership changes. + AccountId *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where inventory results will + // be published. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventoryS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { + s.Bucket = &v + return s +} + +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s +} + +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} + +// Specifies the schedule for generating inventory results. +type InventorySchedule struct { + _ struct{} `type:"structure"` + + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventorySchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InventorySchedule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v + return s +} + +// Specifies JSON as object's input serialization format. +type JSONInput struct { + _ struct{} `type:"structure"` + + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONInput) GoString() string { + return s.String() +} + +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v + return s +} + +// Specifies JSON as request's output serialization format. +type JSONOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual records in the output. If no value + // is specified, Amazon S3 uses a newline character ('\n'). + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s JSONOutput) GoString() string { + return s.String() +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} + +// A container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for the key-value pair that defines the criteria for + // the filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// A container for specifying the configuration for Lambda notifications. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event for which to invoke the Lambda function. For more + // information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes + // when the specified event type occurs. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +// Container for lifecycle rules. You can add as many as 1000 rules. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies lifecycle configuration rules for an Amazon S3 bucket. + // + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v + return s +} + +// Container for the expiration for the lifecycle of the object. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. The date value + // must conform to the ISO 8601 format. The time is always midnight UTC. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +// A lifecycle rule for individual objects in an Amazon S3 bucket. +// +// For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) +// in the Amazon S3 User Guide. +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object in the form of date, + // days and, whether the object has a delete marker. + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. Filter is + // required if the LifecycleRule does not contain a Prefix element. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Specifies the transition rule for the lifecycle rule that describes when + // noncurrent objects transition to a specific storage class. If your bucket + // is versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to a specific + // storage class at a set period in the object's lifetime. + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // no longer used; use Filter instead. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an Amazon S3 object transitions to a specified storage class. + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +type LifecycleRuleAndOperator struct { + _ struct{} `type:"structure"` + + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 `type:"long"` + + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 `type:"long"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectSizeGreaterThan sets the ObjectSizeGreaterThan field's value. +func (s *LifecycleRuleAndOperator) SetObjectSizeGreaterThan(v int64) *LifecycleRuleAndOperator { + s.ObjectSizeGreaterThan = &v + return s +} + +// SetObjectSizeLessThan sets the ObjectSizeLessThan field's value. +func (s *LifecycleRuleAndOperator) SetObjectSizeLessThan(v int64) *LifecycleRuleAndOperator { + s.ObjectSizeLessThan = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter can have exactly one of Prefix, Tag, ObjectSizeGreaterThan, ObjectSizeLessThan, +// or And specified. If the Filter element is left empty, the Lifecycle Rule +// applies to all objects in the bucket. +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` + + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` + + // Minimum object size to which the rule applies. + ObjectSizeGreaterThan *int64 `type:"long"` + + // Maximum object size to which the rule applies. + ObjectSizeLessThan *int64 `type:"long"` + + // Prefix identifying one or more objects to which the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LifecycleRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v + return s +} + +// SetObjectSizeGreaterThan sets the ObjectSizeGreaterThan field's value. +func (s *LifecycleRuleFilter) SetObjectSizeGreaterThan(v int64) *LifecycleRuleFilter { + s.ObjectSizeGreaterThan = &v + return s +} + +// SetObjectSizeLessThan sets the ObjectSizeLessThan field's value. +func (s *LifecycleRuleFilter) SetObjectSizeLessThan(v int64) *LifecycleRuleFilter { + s.ObjectSizeLessThan = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v + return s +} + +type ListBucketAnalyticsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketAnalyticsConfigurationsRequest" type:"structure"` + + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketAnalyticsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketAnalyticsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketAnalyticsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketAnalyticsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketAnalyticsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` + + // The marker that is used as a starting point for this analytics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketIntelligentTieringConfigurationsInput struct { + _ struct{} `locationName:"ListBucketIntelligentTieringConfigurationsRequest" type:"structure"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketIntelligentTieringConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketIntelligentTieringConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetBucket(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsInput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsInput { + s.ContinuationToken = &v + return s +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketIntelligentTieringConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketIntelligentTieringConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketIntelligentTieringConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `type:"string"` + + // The list of S3 Intelligent-Tiering configurations for a bucket. + IntelligentTieringConfigurationList []*IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketIntelligentTieringConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIntelligentTieringConfigurationList sets the IntelligentTieringConfigurationList field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIntelligentTieringConfigurationList(v []*IntelligentTieringConfiguration) *ListBucketIntelligentTieringConfigurationsOutput { + s.IntelligentTieringConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetIsTruncated(v bool) *ListBucketIntelligentTieringConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketIntelligentTieringConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketIntelligentTieringConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `locationName:"ListBucketInventoryConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketInventoryConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketInventoryConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketInventoryConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketInventoryConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketInventoryConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` + + // Tells whether the returned list of inventory configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // is provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketInventoryConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `locationName:"ListBucketMetricsConfigurationsRequest" type:"structure"` + + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListBucketMetricsConfigurationsInput) SetExpectedBucketOwner(v string) *ListBucketMetricsConfigurationsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *ListBucketMetricsConfigurationsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListBucketMetricsConfigurationsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListBucketMetricsConfigurationsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListBucketMetricsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsInput) GoString() string { + return s.String() +} + +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + // The list of buckets owned by the requester. + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + // The owner of the buckets listed. + Owner *Owner `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +type ListDirectoryBucketsInput struct { + _ struct{} `locationName:"ListDirectoryBucketsRequest" type:"structure"` + + // ContinuationToken indicates to Amazon S3 that the list is being continued + // on this bucket with a token. ContinuationToken is obfuscated and is not a + // real key. You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // Maximum number of buckets to be returned in response. When the number is + // more than the count of buckets that are owned by an Amazon Web Services account, + // return all the buckets in response. + MaxDirectoryBuckets *int64 `location:"querystring" locationName:"max-directory-buckets" type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsInput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListDirectoryBucketsInput) SetContinuationToken(v string) *ListDirectoryBucketsInput { + s.ContinuationToken = &v + return s +} + +// SetMaxDirectoryBuckets sets the MaxDirectoryBuckets field's value. +func (s *ListDirectoryBucketsInput) SetMaxDirectoryBuckets(v int64) *ListDirectoryBucketsInput { + s.MaxDirectoryBuckets = &v + return s +} + +type ListDirectoryBucketsOutput struct { + _ struct{} `type:"structure"` + + // The list of buckets owned by the requester. + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + ContinuationToken *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListDirectoryBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListDirectoryBucketsOutput) SetBuckets(v []*Bucket) *ListDirectoryBucketsOutput { + s.Buckets = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListDirectoryBucketsOutput) SetContinuationToken(v string) *ListDirectoryBucketsOutput { + s.ContinuationToken = &v + return s +} + +type ListMultipartUploadsInput struct { + _ struct{} `locationName:"ListMultipartUploadsRequest" type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + // + // All keys that contain the same string between the prefix, if specified, and + // the first occurrence of the delimiter after the prefix are grouped under + // a single result element, CommonPrefixes. If you don't specify the prefix + // parameter, then the substring starts at the beginning of the key. The keys + // that are grouped under CommonPrefixes result element are not returned elsewhere + // in the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the multipart upload after which listing should begin. + // + // * General purpose buckets - For general purpose buckets, key-marker is + // an object key. Together with upload-id-marker, this parameter specifies + // the multipart upload after which listing should begin. If upload-id-marker + // is not specified, only the keys lexicographically greater than the specified + // key-marker will be included in the list. If upload-id-marker is specified, + // any multipart uploads for a key equal to the key-marker might also be + // included, provided those multipart uploads have upload IDs lexicographically + // greater than the specified upload-id-marker. + // + // * Directory buckets - For directory buckets, key-marker is obfuscated + // and isn't a real object key. The upload-id-marker parameter isn't supported + // by directory buckets. To list the additional multipart uploads, you only + // need to set the value of key-marker to the NextKeyMarker value from the + // previous response. In the ListMultipartUploads response, the multipart + // uploads aren't sorted lexicographically based on the object keys. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different grouping + // of keys. (You can think of using prefix to make groups in the same way that + // you'd use a folder in a file system.) + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + // + // This functionality is not supported for directory buckets. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListMultipartUploadsInput) SetExpectedBucketOwner(v string) *ListMultipartUploadsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListMultipartUploadsInput) SetRequestPayer(v string) *ListMultipartUploadsInput { + s.RequestPayer = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s +} + +func (s *ListMultipartUploadsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListMultipartUploadsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListMultipartUploadsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // The name of the bucket to which the multipart upload was initiated. Does + // not return the access point ARN or access point alias if used. + Bucket *string `type:"string"` + + // If you specify a delimiter in the request, then the result returns each distinct + // key prefix containing the delimiter in a CommonPrefixes element. The distinct + // key prefixes are returned in the Prefix child element. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Contains the delimiter you specified in the request. If you don't specify + // a delimiter in your request, this element is absent from the response. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, KeyMarker, Prefix, NextKeyMarker, Key. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + // + // This functionality is not supported for directory buckets. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. + Prefix *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. Otherwise, any multipart uploads for a key equal to the key-marker + // might be included in the list only if they have an upload ID lexicographically + // greater than the specified upload-id-marker. + // + // This functionality is not supported for directory buckets. + UploadIdMarker *string `type:"string"` + + // Container for elements related to a particular multipart upload. A response + // can contain zero or more Upload elements. + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} + +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListMultipartUploadsOutput) SetRequestCharged(v string) *ListMultipartUploadsOutput { + s.RequestCharged = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +type ListObjectVersionsInput struct { + _ struct{} `locationName:"ListObjectVersionsRequest" type:"structure"` + + // The bucket name that contains the objects. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character that you specify to group keys. All keys that + // contain the same string between the prefix and the first occurrence of the + // delimiter are grouped under a single result element in CommonPrefixes. These + // groups are counted as one result against the max-keys limitation. These keys + // are not returned elsewhere in the response. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. If additional keys satisfy the search criteria, + // but were not returned because max-keys was exceeded, the response contains + // true. To return the additional keys, see key-marker + // and version-id-marker. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + + // Use this parameter to select only those keys that begin with the specified + // prefix. You can use prefixes to separate a bucket into different groupings + // of keys. (You can think of using prefix to make groups in the same way that + // you'd use a folder in a file system.) You can use prefix with delimiter to + // roll up numerous objects into a single result under CommonPrefixes. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectVersionsInput) SetExpectedBucketOwner(v string) *ListObjectVersionsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} + +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectVersionsInput) SetOptionalObjectAttributes(v []*string) *ListObjectVersionsInput { + s.OptionalObjectAttributes = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectVersionsInput) SetRequestPayer(v string) *ListObjectVersionsInput { + s.RequestPayer = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +func (s *ListObjectVersionsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectVersionsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectVersionsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys rolled up into a common prefix count as a single return when + // calculating the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Container for an object that is a delete marker. + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + // The delimiter grouping the included keys. A delimiter is a character that + // you specify to group keys. All keys that contain the same string between + // the prefix and the first occurrence of the delimiter are grouped under a + // single result element in CommonPrefixes. These groups are counted as one + // result against the max-keys limitation. These keys are not returned elsewhere + // in the response. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // KeyMarker, NextKeyMarker, Prefix, Key, and Delimiter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. If your results were truncated, you can make + // a follow-up paginated request by using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last key returned in a truncated response. + KeyMarker *string `type:"string"` + + // Specifies the maximum number of objects to return. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextKeyMarker + // specifies the first key not returned that satisfies the search criteria. + // Use this value for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When the number of responses exceeds the value of MaxKeys, NextVersionIdMarker + // specifies the first object version not returned that satisfies the search + // criteria. Use this value for the version-id-marker request parameter in a + // subsequent request. + NextVersionIdMarker *string `type:"string"` + + // Selects objects that start with the value supplied by this parameter. + Prefix *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Marks the last version of the key returned in a truncated response. + VersionIdMarker *string `type:"string"` + + // Container for version information. + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s +} + +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectVersionsOutput) SetRequestCharged(v string) *ListObjectVersionsOutput { + s.RequestCharged = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} + +type ListObjectsInput struct { + _ struct{} `locationName:"ListObjectsRequest" type:"structure"` + + // The name of the bucket containing the objects. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character that you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key can contain any Unicode character; + // however, the XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Marker is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. Marker can be any key in the bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsInput) SetExpectedBucketOwner(v string) *ListObjectsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectsInput) SetOptionalObjectAttributes(v []*string) *ListObjectsInput { + s.OptionalObjectAttributes = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +func (s *ListObjectsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + // All of the keys (up to 1,000) rolled up in a common prefix count as a single + // return when calculating the number of returns. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by the delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/), as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. If + // using url, non-ASCII characters used in an object's key name will be URL + // encoded. For example, the object test_file(3).png will appear as test_file%283%29.png. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether Amazon S3 returned all of the results that + // satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // Indicates where in the bucket listing begins. Marker is included in the response + // if it was sent with the request. + Marker *string `type:"string"` + + // The maximum number of keys returned in the response body. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // When the response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as the marker parameter + // in the subsequent request to get the next set of objects. Amazon S3 lists + // objects in alphabetical order. + // + // This element is returned only if you have the delimiter request parameter + // specified. If the response does not include the NextMarker element and it + // is truncated, you can use the value of the last Key element in the response + // as the marker parameter in the subsequent request to get the next set of + // object keys. + NextMarker *string `type:"string"` + + // Keys that begin with the indicated prefix. + Prefix *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectsOutput) SetRequestCharged(v string) *ListObjectsOutput { + s.RequestCharged = &v + return s +} + +type ListObjectsV2Input struct { + _ struct{} `locationName:"ListObjectsV2Request" type:"structure"` + + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates to Amazon S3 that the list is being continued + // on this bucket with a token. ContinuationToken is obfuscated and is not a + // real key. You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character that you use to group keys. + // + // * Directory buckets - For directory buckets, / is the only supported delimiter. + // + // * Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. + // For more information about multipart uploads, see Multipart Upload Overview + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in + // the Amazon S3 User Guide. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. If + // using url, non-ASCII characters used in an object's key name will be URL + // encoded. For example, the object test_file(3).png will appear as test_file%283%29.png. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The owner field is not present in ListObjectsV2 by default. If you want to + // return the owner field with each key in the result, then set the FetchOwner + // field to true. + // + // Directory buckets - For directory buckets, the bucket owner is returned as + // the object owner for all objects. + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Specifies the optional fields that you want returned in the response. Fields + // that you do not specify are not returned. + // + // This functionality is not supported for directory buckets. + OptionalObjectAttributes []*string `location:"header" locationName:"x-amz-optional-object-attributes" type:"list" enum:"OptionalObjectAttributes"` + + // Limits the response to keys that begin with the specified prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket. + // + // This functionality is not supported for directory buckets. + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} + +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListObjectsV2Input) SetExpectedBucketOwner(v string) *ListObjectsV2Input { + s.ExpectedBucketOwner = &v + return s +} + +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} + +// SetOptionalObjectAttributes sets the OptionalObjectAttributes field's value. +func (s *ListObjectsV2Input) SetOptionalObjectAttributes(v []*string) *ListObjectsV2Input { + s.OptionalObjectAttributes = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s +} + +func (s *ListObjectsV2Input) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListObjectsV2Input) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListObjectsV2Input) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // All of the keys (up to 1,000) that share the same prefix are grouped together. + // When counting the total numbers of returns by this API operation, this group + // of keys is considered as one item. + // + // A response can contain CommonPrefixes only if you specify a delimiter. + // + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by a delimiter. + // + // CommonPrefixes lists keys that act like subdirectories in the directory specified + // by Prefix. + // + // For example, if the prefix is notes/ and the delimiter is a slash (/) as + // in notes/summer/july, the common prefix is notes/summer/. All of the keys + // that roll up into a common prefix count as a single return when calculating + // the number of returns. + // + // * Directory buckets - For directory buckets, only prefixes that end in + // a delimiter (/) are supported. + // + // * Directory buckets - When you query ListObjectsV2 with a delimiter during + // in-progress multipart uploads, the CommonPrefixes response parameter contains + // the prefixes that are associated with the in-progress multipart uploads. + // For more information about multipart uploads, see Multipart Upload Overview + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html) in + // the Amazon S3 User Guide. + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // If ContinuationToken was sent with the request, it is included in the response. + // You can use the returned ContinuationToken for pagination of the list response. + // You can use this ContinuationToken for pagination of the list results. + ContinuationToken *string `type:"string"` + + // Causes keys that contain the same string between the prefix and the first + // occurrence of the delimiter to be rolled up into a single result element + // in the CommonPrefixes collection. These rolled-up keys are not returned elsewhere + // in the response. Each rolled-up result counts as only one return against + // the MaxKeys value. + // + // Directory buckets - For directory buckets, / is the only supported delimiter. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object key names in the XML response. + // + // If you specify the encoding-type request parameter, Amazon S3 includes this + // element in the response, and returns encoded key name values in the following + // response elements: + // + // Delimiter, Prefix, Key, and StartAfter. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Set to false if all of the results were returned. Set to true if more keys + // are available to return. If the number of results exceeds that specified + // by MaxKeys, all of the results might not be returned. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than or equal to the MaxKeys field. For example, if you ask + // for 50 keys, your result will include 50 keys or fewer. + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. By default, the + // action returns up to 1,000 key names. The response might contain fewer keys + // but will never contain more. + MaxKeys *int64 `type:"integer"` + + // The bucket name. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true, which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Keys that begin with the indicated prefix. + // + // Directory buckets - For directory buckets, only prefixes that end in a delimiter + // (/) are supported. + Prefix *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If StartAfter was sent with the request, it is included in the response. + // + // This functionality is not supported for directory buckets. + StartAfter *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s +} + +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListObjectsV2Output) SetRequestCharged(v string) *ListObjectsV2Output { + s.RequestCharged = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +type ListPartsInput struct { + _ struct{} `locationName:"ListPartsRequest" type:"structure"` + + // The name of the bucket to which the parts are being uploaded. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListPartsInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s +} + +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *ListPartsInput) SetExpectedBucketOwner(v string) *ListPartsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *ListPartsInput) SetSSECustomerAlgorithm(v string) *ListPartsInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *ListPartsInput) SetSSECustomerKey(v string) *ListPartsInput { + s.SSECustomerKey = &v + return s +} + +func (s *ListPartsInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *ListPartsInput) SetSSECustomerKeyMD5(v string) *ListPartsInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +func (s *ListPartsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *ListPartsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s ListPartsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // If the bucket has a lifecycle rule configured with an action to abort incomplete + // multipart uploads and the prefix in the lifecycle rule matches the object + // name in the request, then the response includes this header indicating when + // the initiated multipart upload will become eligible for abort operation. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config). + // + // The response will also include the x-amz-abort-rule-id header that will provide + // the ID of the lifecycle configuration rule that defines this action. + // + // This functionality is not supported for directory buckets. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` + + // This header is returned along with the x-amz-abort-date header. It identifies + // applicable lifecycle configuration rule that defines the action to abort + // incomplete multipart uploads. + // + // This functionality is not supported for directory buckets. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // The name of the bucket to which the multipart upload was initiated. Does + // not return the access point ARN or access point alias if used. + Bucket *string `type:"string"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `type:"string" enum:"ChecksumAlgorithm"` + + // Container element that identifies who initiated the multipart upload. If + // the initiator is an Amazon Web Services account, this element provides the + // same information as the Owner element. If the initiator is an IAM User, this + // element provides the user ARN and display name. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. A true value indicates + // that the list was truncated. A list can be truncated if the number of parts + // exceeds the limit returned in the MaxParts element. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + // Container element that identifies the object owner, after the object is created. + // If multipart upload is initiated by an IAM user, this element provides the + // parent account ID and display name. + // + // Directory buckets - The bucket owner is returned as the object owner for + // all the parts. + Owner *Owner `type:"structure"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `type:"integer"` + + // Container for elements related to a particular part. A response can contain + // zero or more Part elements. + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the uploaded object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s +} + +func (s *ListPartsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *ListPartsOutput) SetChecksumAlgorithm(v string) *ListPartsOutput { + s.ChecksumAlgorithm = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s +} + +// Describes an Amazon S3 location that will receive the results of the restore +// request. +type Location struct { + _ struct{} `type:"structure"` + + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Contains the type of server-side encryption used. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v + return s +} + +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// Specifies the location where the bucket will be created. +// +// For directory buckets, the location type is Availability Zone. For more information +// about directory buckets, see Directory buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html) +// in the Amazon S3 User Guide. +// +// This functionality is only supported by directory buckets. +type LocationInfo struct { + _ struct{} `type:"structure"` + + // The name of the location where the bucket will be created. + // + // For directory buckets, the name of the location is the AZ ID of the Availability + // Zone where the bucket will be created. An example AZ ID value is usw2-az1. + Name *string `type:"string"` + + // The type of location where the bucket will be created. + Type *string `type:"string" enum:"LocationType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocationInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LocationInfo) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *LocationInfo) SetName(v string) *LocationInfo { + s.Name = &v + return s +} + +// SetType sets the Type field's value. +func (s *LocationInfo) SetType(v string) *LocationInfo { + s.Type = &v + return s +} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon S3 API Reference. +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case, you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` + + // Container for granting information. + // + // Buckets that use the bucket owner enforced setting for Object Ownership don't + // support target grants. For more information, see Permissions for server access + // log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) + // in the Amazon S3 User Guide. + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // Amazon S3 key format for log objects. + TargetObjectKeyFormat *TargetObjectKeyFormat `type:"structure"` + + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetObjectKeyFormat sets the TargetObjectKeyFormat field's value. +func (s *LoggingEnabled) SetTargetObjectKeyFormat(v *TargetObjectKeyFormat) *LoggingEnabled { + s.TargetObjectKeyFormat = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + _ struct{} `type:"structure"` + + // Name of the object. + Name *string `type:"string"` + + // Value of the object. + Value *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetadataEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetadataEntry) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + +// A container specifying replication metrics-related settings enabling replication +// metrics and events. +type Metrics struct { + _ struct{} `type:"structure"` + + // A container specifying the time threshold for emitting the s3:Replication:OperationMissedThreshold + // event. + EventThreshold *ReplicationTimeValue `type:"structure"` + + // Specifies whether the replication metrics are enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"MetricsStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Metrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Metrics) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Metrics) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Metrics"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventThreshold sets the EventThreshold field's value. +func (s *Metrics) SetEventThreshold(v *ReplicationTimeValue) *Metrics { + s.EventThreshold = v + return s +} + +// SetStatus sets the Status field's value. +func (s *Metrics) SetStatus(v string) *Metrics { + s.Status = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates, and an object +// must match all of the predicates in order for the filter to apply. +type MetricsAndOperator struct { + _ struct{} `type:"structure"` + + // The access point ARN used when evaluating an AND predicate. + AccessPointArn *string `type:"string"` + + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *MetricsAndOperator) SetAccessPointArn(v string) *MetricsAndOperator { + s.AccessPointArn = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v + return s +} + +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PutBucketMetricsConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html). +type MetricsConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, an object tag, an access point ARN, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // Id is a required field + Id *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v + return s +} + +// Specifies a metrics configuration filter. The metrics configuration only +// includes objects that meet the filter's criteria. A filter must be a prefix, +// an object tag, an access point ARN, or a conjunction (MetricsAndOperator). +// For more information, see PutBucketMetricsConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html). +type MetricsFilter struct { + _ struct{} `type:"structure"` + + // The access point ARN used when evaluating a metrics filter. + AccessPointArn *string `type:"string"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` + + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` + + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MetricsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessPointArn sets the AccessPointArn field's value. +func (s *MetricsFilter) SetAccessPointArn(v string) *MetricsFilter { + s.AccessPointArn = &v + return s +} + +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} + +// Container for the MultipartUpload for the Amazon S3 object. +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm *string `type:"string" enum:"ChecksumAlgorithm"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Specifies the owner of the object that is part of the multipart upload. + // + // Directory buckets - The bucket owner is returned as the object owner for + // all the objects. + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s MultipartUpload) GoString() string { + return s.String() +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *MultipartUpload) SetChecksumAlgorithm(v string) *MultipartUpload { + s.ChecksumAlgorithm = &v + return s +} + +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} + +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies how many newer noncurrent versions must exist before Amazon S3 + // can perform the associated action on a given version. If there are this many + // more recent noncurrent versions, Amazon S3 will take the associated action. + // For more information about noncurrent versions, see Lifecycle configuration + // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions *int64 `type:"integer"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. The value must be a non-zero positive integer. + // For information about the noncurrent days calculations, see How Amazon S3 + // Calculates When an Object Became Noncurrent (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// SetNewerNoncurrentVersions sets the NewerNoncurrentVersions field's value. +func (s *NoncurrentVersionExpiration) SetNewerNoncurrentVersions(v int64) *NoncurrentVersionExpiration { + s.NewerNoncurrentVersions = &v + return s +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, +// GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled +// (or versioning is suspended), you can set this action to request that Amazon +// S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, +// INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at +// a specific period in the object's lifetime. +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies how many newer noncurrent versions must exist before Amazon S3 + // can perform the associated action on a given version. If there are this many + // more recent noncurrent versions, Amazon S3 will take the associated action. + // For more information about noncurrent versions, see Lifecycle configuration + // elements (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html) + // in the Amazon S3 User Guide. + NewerNoncurrentVersions *int64 `type:"integer"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates How Long an Object Has Been Noncurrent + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon S3 User Guide. + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// SetNewerNoncurrentVersions sets the NewerNoncurrentVersions field's value. +func (s *NoncurrentVersionTransition) SetNewerNoncurrentVersions(v int64) *NoncurrentVersionTransition { + s.NewerNoncurrentVersions = &v + return s +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} + +// A container for specifying the notification configuration of the bucket. +// If this element is empty, notifications are turned off for the bucket. +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + // Enables delivery of events to Amazon EventBridge. + EventBridgeConfiguration *EventBridgeConfiguration `type:"structure"` + + // Describes the Lambda functions to invoke and the events for which to invoke + // them. + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + // The topic to which notifications are sent and the events for which notifications + // are generated. + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEventBridgeConfiguration sets the EventBridgeConfiguration field's value. +func (s *NotificationConfiguration) SetEventBridgeConfiguration(v *EventBridgeConfiguration) *NotificationConfiguration { + s.EventBridgeConfiguration = v + return s +} + +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v + return s +} + +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v + return s +} + +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v + return s +} + +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Container for specifying the Lambda notification configuration. + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + // This data type is deprecated. This data type specifies the configuration + // for publishing messages to an Amazon Simple Queue Service (Amazon SQS) queue + // when Amazon S3 detects specified events. + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + // This data type is deprecated. A container for specifying the configuration + // for publication of messages to an Amazon Simple Notification Service (Amazon + // SNS) topic when Amazon S3 detects specified events. + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v + return s +} + +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v + return s +} + +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v + return s +} + +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring event notifications using object key name filtering +// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) +// in the Amazon S3 User Guide. +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // A container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} + +// An object consists of data and its descriptive metadata. +type Object struct { + _ struct{} `type:"structure"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []*string `type:"list" flattened:"true" enum:"ChecksumAlgorithm"` + + // The entity tag is a hash of the object. The ETag reflects changes only to + // the contents of an object, not its metadata. The ETag may or may not be an + // MD5 digest of the object data. Whether or not it is depends on how the object + // was created and how it is encrypted as described below: + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the Amazon Web Services Management Console, and are encrypted + // by SSE-S3 or plaintext, have ETags that are an MD5 digest of their object + // data. + // + // * Objects created by the PUT Object, POST Object, or Copy operation, or + // through the Amazon Web Services Management Console, and are encrypted + // by SSE-C or SSE-KMS, have ETags that are not an MD5 digest of their object + // data. + // + // * If an object is created by either the Multipart Upload or Part Copy + // operation, the ETag is not an MD5 digest, regardless of the method of + // encryption. If an object is larger than 16 MB, the Amazon Web Services + // Management Console will upload or copy that object as a Multipart Upload, + // and therefore the ETag will not be an MD5 digest. + // + // Directory buckets - MD5 is not supported by directory buckets. + ETag *string `type:"string"` + + // The name that you assign to an object. You use the object key to retrieve + // the object. + Key *string `min:"1" type:"string"` + + // Creation date of the object. + LastModified *time.Time `type:"timestamp"` + + // The owner of the object + // + // Directory buckets - The bucket owner is returned as the object owner. + Owner *Owner `type:"structure"` + + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Only the S3 Express + // One Zone storage class is supported by directory buckets to store objects. + RestoreStatus *RestoreStatus `type:"structure"` + + // Size in bytes of the object + Size *int64 `type:"long"` + + // The class of storage used to store the object. + // + // Directory buckets - Only the S3 Express One Zone storage class is supported + // by directory buckets to store objects. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Object) GoString() string { + return s.String() +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *Object) SetChecksumAlgorithm(v []*string) *Object { + s.ChecksumAlgorithm = v + return s +} + +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s +} + +// SetRestoreStatus sets the RestoreStatus field's value. +func (s *Object) SetRestoreStatus(v *RestoreStatus) *Object { + s.RestoreStatus = v + return s +} + +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s +} + +// Object Identifier is unique value to identify objects. +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Version ID for the specific version of the object to delete. + // + // This functionality is not supported for directory buckets. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// The container element for Object Lock configuration parameters. +type ObjectLockConfiguration struct { + _ struct{} `type:"structure"` + + // Indicates whether this bucket has an Object Lock configuration enabled. Enable + // ObjectLockEnabled when you apply ObjectLockConfiguration to a bucket. + ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` + + // Specifies the Object Lock rule for the specified object. Enable the this + // rule when you apply ObjectLockConfiguration to a bucket. Bucket settings + // require both a mode and a period. The period can be either Days or Years + // but you must select one. You cannot specify Days and Years at the same time. + Rule *ObjectLockRule `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockConfiguration) GoString() string { + return s.String() +} + +// SetObjectLockEnabled sets the ObjectLockEnabled field's value. +func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { + s.ObjectLockEnabled = &v + return s +} + +// SetRule sets the Rule field's value. +func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { + s.Rule = v + return s +} + +// A legal hold configuration for an object. +type ObjectLockLegalHold struct { + _ struct{} `type:"structure"` + + // Indicates whether the specified object has a legal hold in place. + Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockLegalHold) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockLegalHold) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { + s.Status = &v + return s +} + +// A Retention configuration for an object. +type ObjectLockRetention struct { + _ struct{} `type:"structure"` + + // Indicates the Retention mode for the specified object. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The date on which this Object Lock Retention will expire. + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRetention) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { + s.Mode = &v + return s +} + +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { + s.RetainUntilDate = &v + return s +} + +// The container element for an Object Lock rule. +type ObjectLockRule struct { + _ struct{} `type:"structure"` + + // The default Object Lock retention mode and period that you want to apply + // to new objects placed in the specified bucket. Bucket settings require both + // a mode and a period. The period can be either Days or Years but you must + // select one. You cannot specify Days and Years at the same time. + DefaultRetention *DefaultRetention `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectLockRule) GoString() string { + return s.String() +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { + s.DefaultRetention = v + return s +} + +// A container for elements related to an individual part. +type ObjectPart struct { + _ struct{} `type:"structure"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // The part number identifying the part. This value is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` + + // The size of the uploaded part in bytes. + Size *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectPart) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *ObjectPart) SetChecksumCRC32(v string) *ObjectPart { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *ObjectPart) SetChecksumCRC32C(v string) *ObjectPart { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *ObjectPart) SetChecksumSHA1(v string) *ObjectPart { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *ObjectPart) SetChecksumSHA256(v string) *ObjectPart { + s.ChecksumSHA256 = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *ObjectPart) SetPartNumber(v int64) *ObjectPart { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectPart) SetSize(v int64) *ObjectPart { + s.Size = &v + return s +} + +// The version of an object. +type ObjectVersion struct { + _ struct{} `type:"structure"` + + // The algorithm that was used to create a checksum of the object. + ChecksumAlgorithm []*string `type:"list" flattened:"true" enum:"ChecksumAlgorithm"` + + // The entity tag is an MD5 hash of that version of the object. + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time when the object was last modified. + LastModified *time.Time `type:"timestamp"` + + // Specifies the owner of the object. + Owner *Owner `type:"structure"` + + // Specifies the restoration status of an object. Objects in certain storage + // classes must be restored before they can be retrieved. For more information + // about these storage classes and how to work with archived objects, see Working + // with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) + // in the Amazon S3 User Guide. + RestoreStatus *RestoreStatus `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"long"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *ObjectVersion) SetChecksumAlgorithm(v []*string) *ObjectVersion { + s.ChecksumAlgorithm = v + return s +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} + +// SetRestoreStatus sets the RestoreStatus field's value. +func (s *ObjectVersion) SetRestoreStatus(v *RestoreStatus) *ObjectVersion { + s.RestoreStatus = v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s +} + +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OutputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} + +// Container for the owner's display name and ID. +type Owner struct { + _ struct{} `type:"structure"` + + // Container for the display name of the owner. This value is only supported + // in the following Amazon Web Services Regions: + // + // * US East (N. Virginia) + // + // * US West (N. California) + // + // * US West (Oregon) + // + // * Asia Pacific (Singapore) + // + // * Asia Pacific (Sydney) + // + // * Asia Pacific (Tokyo) + // + // * Europe (Ireland) + // + // * South America (São Paulo) + // + // This functionality is not supported for directory buckets. + DisplayName *string `type:"string"` + + // Container for the ID of the owner. + ID *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Owner) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +// The container element for a bucket's ownership controls. +type OwnershipControls struct { + _ struct{} `type:"structure"` + + // The container element for an ownership control rule. + // + // Rules is a required field + Rules []*OwnershipControlsRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControls) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControls) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControls) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControls"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *OwnershipControls) SetRules(v []*OwnershipControlsRule) *OwnershipControls { + s.Rules = v + return s +} + +// The container element for an ownership control rule. +type OwnershipControlsRule struct { + _ struct{} `type:"structure"` + + // The container element for object ownership for a bucket's ownership controls. + // + // BucketOwnerPreferred - Objects uploaded to the bucket change ownership to + // the bucket owner if the objects are uploaded with the bucket-owner-full-control + // canned ACL. + // + // ObjectWriter - The uploading account will own the object if the object is + // uploaded with the bucket-owner-full-control canned ACL. + // + // BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer + // affect permissions. The bucket owner automatically owns and has full control + // over every object in the bucket. The bucket only accepts PUT requests that + // don't specify an ACL or specify bucket owner full control ACLs (such as the + // predefined bucket-owner-full-control canned ACL or a custom ACL in XML format + // that grants the same permissions). + // + // By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. + // We recommend keeping ACLs disabled, except in uncommon use cases where you + // must control access for each object individually. For more information about + // S3 Object Ownership, see Controlling ownership of objects and disabling ACLs + // for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. Directory buckets + // use the bucket owner enforced setting for S3 Object Ownership. + // + // ObjectOwnership is a required field + ObjectOwnership *string `type:"string" required:"true" enum:"ObjectOwnership"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControlsRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s OwnershipControlsRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OwnershipControlsRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OwnershipControlsRule"} + if s.ObjectOwnership == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectOwnership")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjectOwnership sets the ObjectOwnership field's value. +func (s *OwnershipControlsRule) SetObjectOwnership(v string) *OwnershipControlsRule { + s.ObjectOwnership = &v + return s +} + +// Container for Parquet. +type ParquetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParquetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ParquetInput) GoString() string { + return s.String() +} + +// Container for elements related to a part. +type Part struct { + _ struct{} `type:"structure"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `type:"string"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size in bytes of the uploaded part data. + Size *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Part) GoString() string { + return s.String() +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *Part) SetChecksumCRC32(v string) *Part { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *Part) SetChecksumCRC32C(v string) *Part { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *Part) SetChecksumSHA1(v string) *Part { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *Part) SetChecksumSHA256(v string) *Part { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +// Amazon S3 keys for log objects are partitioned in the following format: +// +// [DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +// +// PartitionedPrefix defaults to EventTime delivery when server access logs +// are delivered. +type PartitionedPrefix struct { + _ struct{} `locationName:"PartitionedPrefix" type:"structure"` + + // Specifies the partition date source for the partitioned prefix. PartitionDateSource + // can be EventTime or DeliveryTime. + PartitionDateSource *string `type:"string" enum:"PartitionDateSource"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PartitionedPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PartitionedPrefix) GoString() string { + return s.String() +} + +// SetPartitionDateSource sets the PartitionDateSource field's value. +func (s *PartitionedPrefix) SetPartitionDateSource(v string) *PartitionedPrefix { + s.PartitionDateSource = &v + return s +} + +// The container element for a bucket's policy status. +type PolicyStatus struct { + _ struct{} `type:"structure"` + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic *bool `locationName:"IsPublic" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyStatus) GoString() string { + return s.String() +} + +// SetIsPublic sets the IsPublic field's value. +func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { + s.IsPublic = &v + return s +} + +// This data type contains information about progress of an operation. +type Progress struct { + _ struct{} `type:"structure"` + + // The current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The current number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Progress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Progress) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v + return s +} + +// This data type contains information about the progress event of an operation. +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProgressEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProgressEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v + return s +} + +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// The PublicAccessBlock configuration that you want to apply to this Amazon +// S3 bucket. You can enable the configuration options in any combination. For +// more information about when Amazon S3 considers a bucket or object public, +// see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) +// in the Amazon S3 User Guide. +type PublicAccessBlockConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket ACL and PUT Object ACL calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // * PUT Bucket calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to + // PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // Amazon Web Service principals and authorized users within this account if + // the bucket has a public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PublicAccessBlockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PublicAccessBlockConfiguration) GoString() string { + return s.String() +} + +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v + return s +} + +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicPolicy = &v + return s +} + +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v + return s +} + +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { + s.RestrictPublicBuckets = &v + return s +} + +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `locationName:"PutBucketAccelerateConfigurationRequest" type:"structure" payload:"AccelerateConfiguration"` + + // Container for setting the transfer acceleration state. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket for which the accelerate configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketAccelerateConfigurationInput) SetChecksumAlgorithm(v string) *PutBucketAccelerateConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAccelerateConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAccelerateConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketAccelerateConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAccelerateConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAccelerateConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketAclInput struct { + _ struct{} `locationName:"PutBucketAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket to which to apply the ACL. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketAclInput) SetChecksumAlgorithm(v string) *PutBucketAclInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAclInput) SetExpectedBucketOwner(v string) *PutBucketAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +func (s *PutBucketAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `locationName:"PutBucketAnalyticsConfigurationRequest" type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID that identifies the analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketAnalyticsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +func (s *PutBucketAnalyticsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketAnalyticsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketAnalyticsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketCorsInput struct { + _ struct{} `locationName:"PutBucketCorsRequest" type:"structure" payload:"CORSConfiguration"` + + // Specifies the bucket impacted by the corsconfiguration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // S3 User Guide. + // + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketCorsInput) SetChecksumAlgorithm(v string) *PutBucketCorsInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketCorsInput) SetExpectedBucketOwner(v string) *PutBucketCorsInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketCorsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketCorsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketCorsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +type PutBucketEncryptionInput struct { + _ struct{} `locationName:"PutBucketEncryptionRequest" type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies default encryption for a bucket using server-side encryption with + // different key options. By default, all buckets have a default encryption + // configuration that uses server-side encryption with Amazon S3 managed keys + // (SSE-S3). You can optionally configure default encryption for a bucket by + // using server-side encryption with an Amazon Web Services KMS key (SSE-KMS) + // or a customer-provided key (SSE-C). For information about the bucket default + // encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Specifies the default server-side-encryption configuration. + // + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketEncryptionInput) SetChecksumAlgorithm(v string) *PutBucketEncryptionInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketEncryptionInput) SetExpectedBucketOwner(v string) *PutBucketEncryptionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v + return s +} + +func (s *PutBucketEncryptionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketEncryptionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketEncryptionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketEncryptionOutput) GoString() string { + return s.String() +} + +type PutBucketIntelligentTieringConfigurationInput struct { + _ struct{} `locationName:"PutBucketIntelligentTieringConfigurationRequest" type:"structure" payload:"IntelligentTieringConfiguration"` + + // The name of the Amazon S3 bucket whose configuration you want to modify or + // retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the S3 Intelligent-Tiering configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Container for S3 Intelligent-Tiering configuration. + // + // IntelligentTieringConfiguration is a required field + IntelligentTieringConfiguration *IntelligentTieringConfiguration `locationName:"IntelligentTieringConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketIntelligentTieringConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketIntelligentTieringConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IntelligentTieringConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("IntelligentTieringConfiguration")) + } + if s.IntelligentTieringConfiguration != nil { + if err := s.IntelligentTieringConfiguration.Validate(); err != nil { + invalidParams.AddNested("IntelligentTieringConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetBucket(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetId sets the Id field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetId(v string) *PutBucketIntelligentTieringConfigurationInput { + s.Id = &v + return s +} + +// SetIntelligentTieringConfiguration sets the IntelligentTieringConfiguration field's value. +func (s *PutBucketIntelligentTieringConfigurationInput) SetIntelligentTieringConfiguration(v *IntelligentTieringConfiguration) *PutBucketIntelligentTieringConfigurationInput { + s.IntelligentTieringConfiguration = v + return s +} + +func (s *PutBucketIntelligentTieringConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketIntelligentTieringConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketIntelligentTieringConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketIntelligentTieringConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketIntelligentTieringConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketInventoryConfigurationInput struct { + _ struct{} `locationName:"PutBucketInventoryConfigurationRequest" type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketInventoryConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketInventoryConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +func (s *PutBucketInventoryConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketInventoryConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketInventoryConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `locationName:"PutBucketLifecycleConfigurationRequest" type:"structure" payload:"LifecycleConfiguration"` + + // The name of the bucket for which to set the configuration. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1,000 rules. + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLifecycleConfigurationInput) SetChecksumAlgorithm(v string) *PutBucketLifecycleConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} + +func (s *PutBucketLifecycleConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketLifecycleInput struct { + _ struct{} `locationName:"PutBucketLifecycleRequest" type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for lifecycle rules. You can add as many as 1000 rules. + // + // For more information see, Managing your storage lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html) + // in the Amazon S3 User Guide. + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLifecycleInput) SetChecksumAlgorithm(v string) *PutBucketLifecycleInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLifecycleInput) SetExpectedBucketOwner(v string) *PutBucketLifecycleInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v + return s +} + +func (s *PutBucketLifecycleInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLifecycleInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLifecycleInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +type PutBucketLoggingInput struct { + _ struct{} `locationName:"PutBucketLoggingRequest" type:"structure" payload:"BucketLoggingStatus"` + + // The name of the bucket for which to set the logging parameters. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for logging status information. + // + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketLoggingInput) SetChecksumAlgorithm(v string) *PutBucketLoggingInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketLoggingInput) SetExpectedBucketOwner(v string) *PutBucketLoggingInput { + s.ExpectedBucketOwner = &v + return s +} + +func (s *PutBucketLoggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketLoggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketLoggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +type PutBucketMetricsConfigurationInput struct { + _ struct{} `locationName:"PutBucketMetricsConfigurationRequest" type:"structure" payload:"MetricsConfiguration"` + + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The ID used to identify the metrics configuration. The ID has a 64 character + // limit and can only contain letters, numbers, periods, dashes, and underscores. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketMetricsConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketMetricsConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v + return s +} + +func (s *PutBucketMetricsConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketMetricsConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketMetricsConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationConfigurationInput struct { + _ struct{} `locationName:"PutBucketNotificationConfigurationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for specifying the notification configuration of the bucket. + // If this element is empty, notifications are turned off for the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True + // or false value. + SkipDestinationValidation *bool `location:"header" locationName:"x-amz-skip-destination-validation" type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationConfigurationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} + +// SetSkipDestinationValidation sets the SkipDestinationValidation field's value. +func (s *PutBucketNotificationConfigurationInput) SetSkipDestinationValidation(v bool) *PutBucketNotificationConfigurationInput { + s.SkipDestinationValidation = &v + return s +} + +func (s *PutBucketNotificationConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +type PutBucketNotificationInput struct { + _ struct{} `locationName:"PutBucketNotificationRequest" type:"structure" payload:"NotificationConfiguration"` + + // The name of the bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The container for the configuration. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketNotificationInput) SetChecksumAlgorithm(v string) *PutBucketNotificationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketNotificationInput) SetExpectedBucketOwner(v string) *PutBucketNotificationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v + return s +} + +func (s *PutBucketNotificationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketNotificationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketNotificationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +type PutBucketOwnershipControlsInput struct { + _ struct{} `locationName:"PutBucketOwnershipControlsRequest" type:"structure" payload:"OwnershipControls"` + + // The name of the Amazon S3 bucket whose OwnershipControls you want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) + // that you want to apply to this Amazon S3 bucket. + // + // OwnershipControls is a required field + OwnershipControls *OwnershipControls `locationName:"OwnershipControls" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketOwnershipControlsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketOwnershipControlsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.OwnershipControls == nil { + invalidParams.Add(request.NewErrParamRequired("OwnershipControls")) + } + if s.OwnershipControls != nil { + if err := s.OwnershipControls.Validate(); err != nil { + invalidParams.AddNested("OwnershipControls", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketOwnershipControlsInput) SetBucket(v string) *PutBucketOwnershipControlsInput { + s.Bucket = &v + return s +} + +func (s *PutBucketOwnershipControlsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketOwnershipControlsInput) SetExpectedBucketOwner(v string) *PutBucketOwnershipControlsInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetOwnershipControls sets the OwnershipControls field's value. +func (s *PutBucketOwnershipControlsInput) SetOwnershipControls(v *OwnershipControls) *PutBucketOwnershipControlsInput { + s.OwnershipControls = v + return s +} + +func (s *PutBucketOwnershipControlsInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketOwnershipControlsInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketOwnershipControlsInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketOwnershipControlsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketOwnershipControlsOutput) GoString() string { + return s.String() +} + +type PutBucketPolicyInput struct { + _ struct{} `locationName:"PutBucketPolicyRequest" type:"structure" payload:"Policy"` + + // The name of the bucket. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use path-style requests in the format https://s3express-control.region_code.amazonaws.com/bucket-name + // . Virtual-hosted-style requests aren't supported. Directory bucket names + // must be unique in the chosen Availability Zone. Bucket names must also follow + // the format bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). + // For information about bucket naming restrictions, see Directory bucket naming + // rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + // + // This functionality is not supported for directory buckets. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + // + // For directory buckets, this header is not supported in this API operation. + // If you specify this header, the request fails with the HTTP status code 501 + // Not Implemented. + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The bucket policy as a JSON document. + // + // For directory buckets, the only IAM action supported in the bucket policy + // is s3express:CreateSession. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v + return s +} + +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketPolicyInput) SetChecksumAlgorithm(v string) *PutBucketPolicyInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketPolicyInput) SetExpectedBucketOwner(v string) *PutBucketPolicyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v + return s +} + +func (s *PutBucketPolicyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketPolicyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketPolicyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +type PutBucketReplicationInput struct { + _ struct{} `locationName:"PutBucketReplicationRequest" type:"structure" payload:"ReplicationConfiguration"` + + // The name of the bucket + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v + return s +} + +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketReplicationInput) SetChecksumAlgorithm(v string) *PutBucketReplicationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketReplicationInput) SetExpectedBucketOwner(v string) *PutBucketReplicationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v + return s +} + +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + +func (s *PutBucketReplicationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketReplicationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketReplicationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +type PutBucketRequestPaymentInput struct { + _ struct{} `locationName:"PutBucketRequestPaymentRequest" type:"structure" payload:"RequestPaymentConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for Payer. + // + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketRequestPaymentInput) SetChecksumAlgorithm(v string) *PutBucketRequestPaymentInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketRequestPaymentInput) SetExpectedBucketOwner(v string) *PutBucketRequestPaymentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v + return s +} + +func (s *PutBucketRequestPaymentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketRequestPaymentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketRequestPaymentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +type PutBucketTaggingInput struct { + _ struct{} `locationName:"PutBucketTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the TagSet and Tag elements. + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketTaggingInput) SetChecksumAlgorithm(v string) *PutBucketTaggingInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketTaggingInput) SetExpectedBucketOwner(v string) *PutBucketTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v + return s +} + +func (s *PutBucketTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +type PutBucketVersioningInput struct { + _ struct{} `locationName:"PutBucketVersioningRequest" type:"structure" payload:"VersioningConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Container for setting the versioning state. + // + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} + +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketVersioningInput) SetChecksumAlgorithm(v string) *PutBucketVersioningInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketVersioningInput) SetExpectedBucketOwner(v string) *PutBucketVersioningInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +func (s *PutBucketVersioningInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketVersioningInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketVersioningInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +type PutBucketWebsiteInput struct { + _ struct{} `locationName:"PutBucketWebsiteRequest" type:"structure" payload:"WebsiteConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Container for the request. + // + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} + +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutBucketWebsiteInput) SetChecksumAlgorithm(v string) *PutBucketWebsiteInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutBucketWebsiteInput) SetExpectedBucketOwner(v string) *PutBucketWebsiteInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +func (s *PutBucketWebsiteInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutBucketWebsiteInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutBucketWebsiteInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +type PutObjectAclInput struct { + _ struct{} `locationName:"PutObjectAclRequest" type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL). + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The bucket name that contains the object to which you want to attach the + // ACL. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + // + // This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create new objects in the bucket. + // + // For the bucket and object owners of existing objects, also allows deletions + // and overwrites of those objects. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + // + // This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key for which the PUT action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Version ID used to reference a specific version of the object. + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} + +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectAclInput) SetChecksumAlgorithm(v string) *PutObjectAclInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectAclInput) SetExpectedBucketOwner(v string) *PutObjectAclInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +func (s *PutObjectAclInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectAclInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectAclInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +type PutObjectInput struct { + _ struct{} `locationName:"PutObjectRequest" type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. For more information, see Canned ACL + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL) + // in the Amazon S3 User Guide. + // + // When adding a new object, you can use headers to grant ACL-based permissions + // to individual Amazon Web Services accounts or to predefined groups defined + // by Amazon S3. These permissions are then added to the ACL on the object. + // By default, all objects are private. Only the owner has full access control. + // For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) + // and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html) + // in the Amazon S3 User Guide. + // + // If the bucket that you're uploading objects to uses the bucket owner enforced + // setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + // Buckets that use this setting only accept PUT requests that don't specify + // an ACL or PUT requests that specify bucket owner full control ACLs, such + // as the bucket-owner-full-control canned ACL or an equivalent form of this + // ACL expressed in the XML format. PUT requests that contain other ACLs (for + // example, custom grants to certain Amazon Web Services accounts) fail and + // return a 400 error with the error code AccessControlListNotSupported. For + // more information, see Controlling ownership of objects and disabling ACLs + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) + // in the Amazon S3 User Guide. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The bucket name to which the PUT action was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using Key Management Service (KMS) keys (SSE-KMS). + // Setting this header to true causes Amazon S3 to use an S3 Bucket Key for + // object encryption with SSE-KMS. + // + // Specifying this header with a PUT action doesn’t affect bucket-level settings + // for S3 Bucket Key. + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Can be used to specify caching behavior along the request/reply chain. For + // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 + // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon + // S3 fails the request with the HTTP status code 400 Bad Request. + // + // For the x-amz-checksum-algorithm header, replace algorithm with the supported + // algorithm from the following list: + // + // * CRC32 + // + // * CRC32C + // + // * SHA1 + // + // * SHA256 + // + // For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If the individual checksum value you provide through x-amz-checksum-algorithm + // doesn't match the checksum algorithm you set through x-amz-sdk-checksum-algorithm, + // Amazon S3 ignores any provided ChecksumAlgorithm parameter and uses the checksum + // algorithm that matches the provided value in x-amz-checksum-algorithm . + // + // For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the + // default checksum algorithm that's used for performance. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. For more information, + // see https://www.rfc-editor.org/rfc/rfc6266#section-4 (https://www.rfc-editor.org/rfc/rfc6266#section-4). + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding + // (https://www.rfc-editor.org/rfc/rfc9110.html#field.content-encoding). + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. For more information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length + // (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-length). + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the message (without the headers) + // according to RFC 1864. This header can be used as a message integrity check + // to verify that the data is the same data that was originally sent. Although + // it is optional, we recommend using the Content-MD5 mechanism as an end-to-end + // integrity check. For more information about REST request authentication, + // see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html). + // + // The Content-MD5 header is required for any request to upload an object with + // a retention period configured using Amazon S3 Object Lock. For more information + // about Amazon S3 Object Lock, see Amazon S3 Object Lock Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // A standard MIME type describing the format of the contents. For more information, + // see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type (https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type). + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The date and time at which the object is no longer cacheable. For more information, + // see https://www.rfc-editor.org/rfc/rfc7234#section-5.3 (https://www.rfc-editor.org/rfc/rfc7234#section-5.3). + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + // + // * This functionality is not supported for directory buckets. + // + // * This functionality is not supported for Amazon S3 on Outposts. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether a legal hold will be applied to this object. For more information + // about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The Object Lock mode that you want to apply to this object. + // + // This functionality is not supported for directory buckets. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's Object Lock to expire. Must + // be formatted as a timestamp parameter. + // + // This functionality is not supported for directory buckets. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the Amazon Web Services KMS Encryption Context to use for object + // encryption. The value of this header is a base64-encoded UTF-8 string holding + // JSON with the encryption context key-value pairs. This value is stored as + // object metadata and automatically gets passed on to Amazon Web Services KMS + // for future GetObject or CopyObject operations on this object. This value + // must be explicitly added during CopyObject operations. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, + // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key Management + // Service (KMS) symmetric encryption customer managed key that was used for + // the object. If you specify x-amz-server-side-encryption:aws:kms or x-amz-server-side-encryption:aws:kms:dsse, + // but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 + // uses the Amazon Web Services managed key (aws/s3) to protect the data. If + // the KMS key does not exist in the same account that's issuing the command, + // you must use the full ARN and not just the ID. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm that was used when you store this object + // in Amazon S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // General purpose buckets - You have four mutually exclusive options to protect + // data using server-side encryption in Amazon S3, depending on how you choose + // to manage the encryption keys. Specifically, the encryption key options are + // Amazon S3 managed keys (SSE-S3), Amazon Web Services KMS keys (SSE-KMS or + // DSSE-KMS), and customer-provided keys (SSE-C). Amazon S3 encrypts data with + // server-side encryption by using Amazon S3 managed keys (SSE-S3) by default. + // You can optionally tell Amazon S3 to encrypt data at rest by using server-side + // encryption with other key options. For more information, see Using Server-Side + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html) + // in the Amazon S3 User Guide. + // + // Directory buckets - For directory buckets, only the server-side encryption + // with Amazon S3 managed keys (SSE-S3) (AES256) value is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // By default, Amazon S3 uses the STANDARD Storage Class to store newly created + // objects. The STANDARD storage class provides high durability and high availability. + // Depending on performance needs, you can specify a different Storage Class. + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // in the Amazon S3 User Guide. + // + // * For directory buckets, only the S3 Express One Zone storage class is + // supported to store newly created objects. + // + // * Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") + // + // This functionality is not supported for directory buckets. + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. For information about object + // metadata, see Object Key and Metadata (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html) + // in the Amazon S3 User Guide. + // + // In the following example, the request header sets the redirect to an object + // (anotherPage.html) in the same bucket: + // + // x-amz-website-redirect-location: /anotherPage.html + // + // In the following example, the request header sets the object redirect to + // another website: + // + // x-amz-website-redirect-location: http://www.example.com/ + // + // For more information about website hosting in Amazon S3, see Hosting Websites + // on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) + // and How to Configure Website Page Redirects (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} + +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s +} + +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectInput) SetBucketKeyEnabled(v bool) *PutObjectInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectInput) SetChecksumAlgorithm(v string) *PutObjectInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *PutObjectInput) SetChecksumCRC32(v string) *PutObjectInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *PutObjectInput) SetChecksumCRC32C(v string) *PutObjectInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *PutObjectInput) SetChecksumSHA1(v string) *PutObjectInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *PutObjectInput) SetChecksumSHA256(v string) *PutObjectInput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectInput) SetExpectedBucketOwner(v string) *PutObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +func (s *PutObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLegalHoldInput struct { + _ struct{} `locationName:"PutObjectLegalHoldRequest" type:"structure" payload:"LegalHold"` + + // The bucket name containing the object that you want to place a legal hold + // on. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object that you want to place a legal hold on. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Container element for the legal hold configuration you want to apply to the + // specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object that you want to place a legal hold on. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectLegalHoldInput) SetChecksumAlgorithm(v string) *PutObjectLegalHoldInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLegalHoldInput) SetExpectedBucketOwner(v string) *PutObjectLegalHoldInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetLegalHold sets the LegalHold field's value. +func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { + s.LegalHold = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { + s.VersionId = &v + return s +} + +func (s *PutObjectLegalHoldInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLegalHoldInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLegalHoldInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLegalHoldOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { + s.RequestCharged = &v + return s +} + +type PutObjectLockConfigurationInput struct { + _ struct{} `locationName:"PutObjectLockConfigurationRequest" type:"structure" payload:"ObjectLockConfiguration"` + + // The bucket whose Object Lock configuration you want to create or replace. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The Object Lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // A token to allow Object Lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectLockConfigurationInput) SetChecksumAlgorithm(v string) *PutObjectLockConfigurationInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectLockConfigurationInput) SetExpectedBucketOwner(v string) *PutObjectLockConfigurationInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { + s.ObjectLockConfiguration = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { + s.RequestPayer = &v + return s +} + +// SetToken sets the Token field's value. +func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { + s.Token = &v + return s +} + +func (s *PutObjectLockConfigurationInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectLockConfigurationInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectLockConfigurationInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectLockConfigurationOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { + s.RequestCharged = &v + return s +} + +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Entity tag for the uploaded object. + // + // General purpose buckets - To ensure that data is not corrupted traversing + // the network, for objects where the ETag is the MD5 digest of the object, + // you can calculate the MD5 while putting an object to Amazon S3 and compare + // the returned ETag to the calculated MD5 value. + // + // Directory buckets - The ETag for the object in a directory bucket isn't the + // MD5 digest of the object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the expiration is configured for the object (see PutBucketLifecycleConfiguration + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html)) + // in the Amazon S3 User Guide, the response includes this header. It includes + // the expiry-date and rule-id key-value pairs that provide information about + // object expiration. The value of the rule-id is URL-encoded. + // + // This functionality is not supported for directory buckets. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the Amazon Web Services KMS Encryption Context to use + // for object encryption. The value of this header is a base64-encoded UTF-8 + // string holding JSON with the encryption context key-value pairs. This value + // is stored as object metadata and automatically gets passed on to Amazon Web + // Services KMS for future GetObject or CopyObject operations on this object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSEncryptionContext is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectOutput's + // String and GoString methods. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse, + // this header indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by PutObjectOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms, aws:kms:dsse). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the object. + // + // If you enable versioning for a bucket, Amazon S3 automatically generates + // a unique version ID for the object being stored. Amazon S3 returns this ID + // in the response. When you enable versioning for a bucket, if Amazon S3 receives + // multiple write requests for the same object simultaneously, it stores all + // of the objects. For more information about versioning, see Adding Objects + // to Versioning-Enabled Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html) + // in the Amazon S3 User Guide. For information about returning the versioning + // state of a bucket, see GetBucketVersioning (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). + // + // This functionality is not supported for directory buckets. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectOutput) SetBucketKeyEnabled(v bool) *PutObjectOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *PutObjectOutput) SetChecksumCRC32(v string) *PutObjectOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *PutObjectOutput) SetChecksumCRC32C(v string) *PutObjectOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *PutObjectOutput) SetChecksumSHA1(v string) *PutObjectOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *PutObjectOutput) SetChecksumSHA256(v string) *PutObjectOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +type PutObjectRetentionInput struct { + _ struct{} `locationName:"PutObjectRetentionRequest" type:"structure" payload:"Retention"` + + // The bucket name that contains the object you want to apply this Object Retention + // configuration to. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether this action should bypass Governance-mode restrictions. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The container element for the Object Retention configuration. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { + s.Bucket = &v + return s +} + +func (s *PutObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { + s.BypassGovernanceRetention = &v + return s +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectRetentionInput) SetChecksumAlgorithm(v string) *PutObjectRetentionInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectRetentionInput) SetExpectedBucketOwner(v string) *PutObjectRetentionInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetRetention sets the Retention field's value. +func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { + s.Retention = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { + s.VersionId = &v + return s +} + +func (s *PutObjectRetentionInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectRetentionInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectRetentionInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectRetentionOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { + s.RequestCharged = &v + return s +} + +type PutObjectTaggingInput struct { + _ struct{} `locationName:"PutObjectTaggingRequest" type:"structure" payload:"Tagging"` + + // The bucket name containing the object. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Name of the object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for the TagSet and Tag elements + // + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The versionId of the object that the tag-set will be added to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutObjectTaggingInput) SetChecksumAlgorithm(v string) *PutObjectTaggingInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutObjectTaggingInput) SetExpectedBucketOwner(v string) *PutObjectTaggingInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectTaggingInput) SetRequestPayer(v string) *PutObjectTaggingInput { + s.RequestPayer = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +func (s *PutObjectTaggingInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutObjectTaggingInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutObjectTaggingInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was added to. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +type PutPublicAccessBlockInput struct { + _ struct{} `locationName:"PutPublicAccessBlockRequest" type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + // + // The SDK will automatically compute the Content-MD5 checksum for this operation. + // The AWS SDK for Go v2 allows you to configure alternative checksum algorithm + // to be used. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon S3 User Guide. + // + // PublicAccessBlockConfiguration is a required field + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.PublicAccessBlockConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *PutPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *PutPublicAccessBlockInput) SetChecksumAlgorithm(v string) *PutPublicAccessBlockInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *PutPublicAccessBlockInput) SetExpectedBucketOwner(v string) *PutPublicAccessBlockInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { + s.PublicAccessBlockConfiguration = v + return s +} + +func (s *PutPublicAccessBlockInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *PutPublicAccessBlockInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s PutPublicAccessBlockInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type PutPublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PutPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + // A collection of bucket events for which to send notifications + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +// This data type is deprecated. Use QueueConfiguration (https://docs.aws.amazon.com/AmazonS3/latest/API/API_QueueConfiguration.html) +// for the same purposes. This data type specifies the configuration for publishing +// messages to an Amazon Simple Queue Service (Amazon SQS) queue when Amazon +// S3 detects specified events. +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // A collection of bucket events for which to send notifications. + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + Queue *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +// The container for the records event. +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RecordsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RecordsEvent) GoString() string { + return s.String() +} + +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} + +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + msg.Headers.Set(":content-type", eventstream.StringValue("application/octet-stream")) + msg.Payload = s.Payload + return msg, err +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the siblings is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests are redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +// A filter that you can specify for selection for modifications on replicas. +// Amazon S3 doesn't replicate replica modifications by default. In the latest +// version of replication configuration (when Filter is specified), you can +// specify this element and set the status to Enabled to replicate modifications +// on replicas. +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, this element +// is not allowed. +type ReplicaModifications struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates modifications on replicas. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicaModificationsStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicaModifications) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaModifications) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaModifications"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicaModifications) SetStatus(v string) *ReplicaModifications { + s.Status = &v + return s +} + +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) + // role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html) + // in the Amazon S3 User Guide. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // A container for one or more replication rules. A replication configuration + // must have at least one rule and can contain a maximum of 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} + +// Specifies which Amazon S3 objects to replicate and where to store the replicas. +type ReplicationRule struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates delete markers. If you specify a Filter + // in your replication configuration, you must also include a DeleteMarkerReplication + // element. If your Filter includes a Tag element, the DeleteMarkerReplication + // Status must be set to Disabled, because Amazon S3 does not support replicating + // delete markers for tag-based rules. For an example configuration, see Basic + // Rule Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-config-min-rule-config). + // + // For more information about delete marker replication, see Basic Rule Configuration + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/delete-marker-replication.html). + // + // If you are using an earlier version of the replication configuration, Amazon + // S3 handles replication of delete markers differently. For more information, + // see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). + DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` + + // A container for information about the replication destination and its configurations + // including enabling the S3 Replication Time Control (S3 RTC). + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // Optional configuration to replicate existing source bucket objects. For more + // information, see Replicating Existing Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication) + // in the Amazon S3 User Guide. + ExistingObjectReplication *ExistingObjectReplication `type:"structure"` + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter *ReplicationRuleFilter `type:"structure"` + + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string `type:"string"` + + // An object key name prefix that identifies the object or objects to which + // the rule applies. The maximum prefix length is 1,024 characters. To include + // all objects in a bucket, specify an empty string. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // The priority indicates which rule has precedence whenever two or more replication + // rules conflict. Amazon S3 will attempt to replicate objects according to + // all replication rules. However, if there are two or more rules with the same + // destination bucket, then objects will be replicated according to the rule + // with the highest priority. The higher the number, the higher the priority. + // + // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) + // in the Amazon S3 User Guide. + Priority *int64 `type:"integer"` + + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter + // that you can specify for objects created with server-side encryption using + // a customer managed key stored in Amazon Web Services Key Management Service + // (SSE-KMS). + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` + + // Specifies whether the rule is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.ExistingObjectReplication != nil { + if err := s.ExistingObjectReplication.Validate(); err != nil { + invalidParams.AddNested("ExistingObjectReplication", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. +func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { + s.DeleteMarkerReplication = v + return s +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetExistingObjectReplication sets the ExistingObjectReplication field's value. +func (s *ReplicationRule) SetExistingObjectReplication(v *ExistingObjectReplication) *ReplicationRule { + s.ExistingObjectReplication = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { + s.Priority = &v + return s +} + +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +// A container for specifying rule filters. The filters determine the subset +// of objects to which the rule applies. This element is required only if you +// specify more than one filter. +// +// For example: +// +// - If you specify both a Prefix and a Tag filter, wrap these filters in +// an And tag. +// +// - If you specify a filter based on multiple tags, wrap the Tag elements +// in an And tag. +type ReplicationRuleAndOperator struct { + _ struct{} `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + Prefix *string `type:"string"` + + // An array of tags containing key and value pairs. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { + s.Tags = v + return s +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +type ReplicationRuleFilter struct { + _ struct{} `type:"structure"` + + // A container for specifying rule filters. The filters determine the subset + // of objects to which the rule applies. This element is required only if you + // specify more than one filter. For example: + // + // * If you specify both a Prefix and a Tag filter, wrap these filters in + // an And tag. + // + // * If you specify a filter based on multiple tags, wrap the Tag elements + // in an And tag. + And *ReplicationRuleAndOperator `type:"structure"` + + // An object key name prefix that identifies the subset of objects to which + // the rule applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + Prefix *string `type:"string"` + + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { + s.Tag = v + return s +} + +// A container specifying S3 Replication Time Control (S3 RTC) related information, +// including whether S3 RTC is enabled and the time when all objects and operations +// on objects must be replicated. Must be specified together with a Metrics +// block. +type ReplicationTime struct { + _ struct{} `type:"structure"` + + // Specifies whether the replication time is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationTimeStatus"` + + // A container specifying the time by which replication should be complete for + // all objects and operations on objects. + // + // Time is a required field + Time *ReplicationTimeValue `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTime) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTime) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationTime) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationTime"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Time == nil { + invalidParams.Add(request.NewErrParamRequired("Time")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicationTime) SetStatus(v string) *ReplicationTime { + s.Status = &v + return s +} + +// SetTime sets the Time field's value. +func (s *ReplicationTime) SetTime(v *ReplicationTimeValue) *ReplicationTime { + s.Time = v + return s +} + +// A container specifying the time value for S3 Replication Time Control (S3 +// RTC) and replication metrics EventThreshold. +type ReplicationTimeValue struct { + _ struct{} `type:"structure"` + + // Contains an integer specifying time in minutes. + // + // Valid value: 15 + Minutes *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTimeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ReplicationTimeValue) GoString() string { + return s.String() +} + +// SetMinutes sets the Minutes field's value. +func (s *ReplicationTimeValue) SetMinutes(v int64) *ReplicationTimeValue { + s.Minutes = &v + return s +} + +// Container for Payer. +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +// Container for specifying if periodic QueryProgress messages should be sent. +type RequestProgress struct { + _ struct{} `type:"structure"` + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RequestProgress) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v + return s +} + +type RestoreObjectInput struct { + _ struct{} `locationName:"RestoreObjectRequest" type:"structure" payload:"RestoreRequest"` + + // The bucket name containing the object to restore. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the action was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *RestoreObjectInput) SetChecksumAlgorithm(v string) *RestoreObjectInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *RestoreObjectInput) SetExpectedBucketOwner(v string) *RestoreObjectInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +func (s *RestoreObjectInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *RestoreObjectInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s RestoreObjectInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + // + // The Days element is required for regular restores, and must not be provided + // for select requests. + Days *int64 `type:"integer"` + + // The optional description for the job. + Description *string `type:"string"` + + // S3 Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + +// Specifies the restoration status of an object. Objects in certain storage +// classes must be restored before they can be retrieved. For more information +// about these storage classes and how to work with archived objects, see Working +// with archived objects (https://docs.aws.amazon.com/AmazonS3/latest/userguide/archived-objects.html) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Only the S3 Express +// One Zone storage class is supported by directory buckets to store objects. +type RestoreStatus struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is currently being restored. If the object restoration + // is in progress, the header returns the value TRUE. For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="true" + // + // If the object restoration has completed, the header returns the value FALSE. + // For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", RestoreExpiryDate="2012-12-21T00:00:00.000Z" + // + // If the object hasn't been restored, there is no header response. + IsRestoreInProgress *bool `type:"boolean"` + + // Indicates when the restored copy will expire. This value is populated only + // if the object has already been restored. For example: + // + // x-amz-optional-object-attributes: IsRestoreInProgress="false", RestoreExpiryDate="2012-12-21T00:00:00.000Z" + RestoreExpiryDate *time.Time `type:"timestamp"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RestoreStatus) GoString() string { + return s.String() +} + +// SetIsRestoreInProgress sets the IsRestoreInProgress field's value. +func (s *RestoreStatus) SetIsRestoreInProgress(v bool) *RestoreStatus { + s.IsRestoreInProgress = &v + return s +} + +// SetRestoreExpiryDate sets the RestoreExpiryDate field's value. +func (s *RestoreStatus) SetRestoreExpiryDate(v time.Time) *RestoreStatus { + s.RestoreExpiryDate = &v + return s +} + +// Specifies the redirect behavior and when a redirect is applied. For more +// information about routing rules, see Configuring advanced conditional redirects +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html#advanced-conditional-redirects) +// in the Amazon S3 User Guide. +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see Put Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon S3 API Reference. For examples, see Put Bucket Lifecycle Configuration +// Examples (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples). +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon S3 User Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + // Specifies the expiration for the lifecycle of the object. + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value can't be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, + // GLACIER, or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled + // (or versioning is suspended), you can set this action to request that Amazon + // S3 transition noncurrent object versions to the STANDARD_IA, ONEZONE_IA, + // INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class at + // a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Object key prefix that identifies one or more objects to which this rule + // applies. + // + // Replacement must be made for object keys containing special characters (such + // as carriage returns) when using XML requests. For more information, see XML + // related object key constraints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html#object-key-xml-related-constraints). + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an object transitions to a specified storage class. For more + // information about Amazon S3 lifecycle configuration rules, see Transitioning + // Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) + // in the Amazon S3 User Guide. + Transition *Transition `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Specifies the use of SSE-KMS to encrypt delivered inventory reports. +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` + + // Specifies the ID of the Key Management Service (KMS) symmetric encryption + // customer managed key to use for encrypting inventory reports. + // + // KeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SSEKMS's + // String and GoString methods. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSEKMS) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSEKMS) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delivered inventory reports. +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSES3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SSES3) GoString() string { + return s.String() +} + +// Specifies the byte range of the object to get the records from. A record +// is processed when its first byte is contained by the range. This parameter +// is optional, but when specified, it must not be empty. See RFC 2616, Section +// 14.35.1 about how to specify the start and end of the range. +type ScanRange struct { + _ struct{} `type:"structure"` + + // Specifies the end of the byte range. This parameter is optional. Valid values: + // non-negative integers. The default value is one less than the size of the + // object being queried. If only the End parameter is supplied, it is interpreted + // to mean scan the last N bytes of the file. For example, 50 + // means scan the last 50 bytes. + End *int64 `type:"long"` + + // Specifies the start of the byte range. This parameter is optional. Valid + // values: non-negative integers. The default value is 0. If only start is supplied, + // it means scan from that point to the end of the file. For example, 50 + // means scan from byte 50 until the end of the file. + Start *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanRange) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ScanRange) GoString() string { + return s.String() +} + +// SetEnd sets the End field's value. +func (s *ScanRange) SetEnd(v int64) *ScanRange { + s.End = &v + return s +} + +// SetStart sets the Start field's value. +func (s *ScanRange) SetStart(v int64) *ScanRange { + s.Start = &v + return s +} + +// SelectObjectContentEventStreamEvent groups together all EventStream +// events writes for SelectObjectContentEventStream. +// +// These events are: +// +// - ContinuationEvent +// - EndEvent +// - ProgressEvent +// - RecordsEvent +// - StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() + eventstreamapi.Marshaler + eventstreamapi.Unmarshaler +} + +// SelectObjectContentEventStreamReader provides the interface for reading to the stream. The +// default implementation for this interface will be SelectObjectContentEventStreamData. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// - ContinuationEvent +// - EndEvent +// - ProgressEvent +// - RecordsEvent +// - StatsEvent +// - SelectObjectContentEventStreamUnknownEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent + + // Close will stop the reader reading events from the stream. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + err *eventstreamapi.OnceError + + done chan struct{} + closeOnce sync.Once +} + +func newReadSelectObjectContentEventStream(eventReader *eventstreamapi.EventReader) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + eventReader: eventReader, + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + err: eventstreamapi.NewOnceError(), + } + go r.readEventStream() + + return r +} + +// Close will close the underlying event stream reader. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) + return r.Err() +} + +func (r *readSelectObjectContentEventStream) ErrorSet() <-chan struct{} { + return r.err.ErrorSet() +} + +func (r *readSelectObjectContentEventStream) Closed() <-chan struct{} { + return r.done +} + +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) +} + +func (r *readSelectObjectContentEventStream) Err() error { + return r.err.Err() +} + +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} + +func (r *readSelectObjectContentEventStream) readEventStream() { + defer r.Close() + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + if _, ok := err.(*eventstreamapi.UnknownMessageTypeError); ok { + continue + } + r.err.SetError(err) + return + } + + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } + } +} + +type unmarshalerForSelectObjectContentEventStreamEvent struct { + metadata protocol.ResponseMetadata +} + +func (u unmarshalerForSelectObjectContentEventStreamEvent) UnmarshalerForEventName(eventType string) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil + case "End": + return &EndEvent{}, nil + case "Progress": + return &ProgressEvent{}, nil + case "Records": + return &RecordsEvent{}, nil + case "Stats": + return &StatsEvent{}, nil + default: + return &SelectObjectContentEventStreamUnknownEvent{Type: eventType}, nil + } +} + +// SelectObjectContentEventStreamUnknownEvent provides a failsafe event for the +// SelectObjectContentEventStream group of events when an unknown event is received. +type SelectObjectContentEventStreamUnknownEvent struct { + Type string + Message eventstream.Message +} + +// The SelectObjectContentEventStreamUnknownEvent is and event in the SelectObjectContentEventStream +// group of events. +func (s *SelectObjectContentEventStreamUnknownEvent) eventSelectObjectContentEventStream() {} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) MarshalEvent(pm protocol.PayloadMarshaler) ( + msg eventstream.Message, err error, +) { + return e.Message.Clone(), nil +} + +// UnmarshalEvent unmarshals the EventStream Message into the SelectObjectContentEventStreamData value. +// This method is only used internally within the SDK's EventStream handling. +func (e *SelectObjectContentEventStreamUnknownEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + e.Message = msg.Clone() + return nil +} + +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records. It returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, see S3Select API Documentation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The S3 bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example, SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the format of the data in the object that is being queried. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` + + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` + + // The server-side encryption (SSE) algorithm used to encrypt the object. This + // parameter is needed only when the object was created using a checksum algorithm. + // For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The server-side encryption (SSE) customer managed key. This parameter is + // needed only when the object was created using a checksum algorithm. For more + // information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SelectObjectContentInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The MD5 server-side encryption (SSE) customer managed key. This parameter + // is needed only when the object was created using a checksum algorithm. For + // more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html) + // in the Amazon S3 User Guide. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the byte range of the object to get the records from. A record + // is processed when its first byte is contained by the range. This parameter + // is optional, but when specified, it must not be empty. See RFC 2616, Section + // 14.35.1 about how to specify the start and end of the range. + // + // ScanRangemay be used in the following ways: + // + // * 50100 - process only + // the records starting between the bytes 50 and 100 (inclusive, counting + // from zero) + // + // * 50 - process only the records + // starting after the byte 50 + // + // * 50 - process only the records within + // the last 50 bytes of the file. + ScanRange *ScanRange `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v + return s +} + +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *SelectObjectContentInput) SetExpectedBucketOwner(v string) *SelectObjectContentInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v + return s +} + +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v + return s +} + +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s +} + +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetScanRange sets the ScanRange field's value. +func (s *SelectObjectContentInput) SetScanRange(v *ScanRange) *SelectObjectContentInput { + s.ScanRange = v + return s +} + +func (s *SelectObjectContentInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *SelectObjectContentInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s SelectObjectContentInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` + + EventStream *SelectObjectContentEventStream +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectObjectContentOutput) GoString() string { + return s.String() +} + +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v + return s +} +func (s *SelectObjectContentOutput) GetEventStream() *SelectObjectContentEventStream { + return s.EventStream +} + +// GetStream returns the type to interact with the event stream. +func (s *SelectObjectContentOutput) GetStream() *SelectObjectContentEventStream { + return s.EventStream +} + +// Describes the parameters for Select job types. +type SelectParameters struct { + _ struct{} `type:"structure"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example, SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the serialization format of the object. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // Describes how the results of the Select job are serialized. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SelectParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v + return s +} + +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v + return s +} + +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v + return s +} + +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v + return s +} + +// Describes the default server-side encryption to apply to new objects in the +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. If you don't specify a customer +// managed key at configuration, Amazon S3 automatically creates an Amazon Web +// Services KMS key in your Amazon Web Services account the first time that +// you add an object encrypted with SSE-KMS to a bucket. By default, Amazon +// S3 uses this KMS key for SSE-KMS. For more information, see PUT Bucket encryption +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon S3 API Reference. +type ServerSideEncryptionByDefault struct { + _ struct{} `type:"structure"` + + // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services + // KMS key ID to use for the default encryption. This parameter is allowed if + // and only if SSEAlgorithm is set to aws:kms or aws:kms:dsse. + // + // You can specify the key ID, key alias, or the Amazon Resource Name (ARN) + // of the KMS key. + // + // * Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key ARN: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // * Key Alias: alias/alias-name + // + // If you use a key ID, you can run into a LogDestination undeliverable error + // when creating a VPC flow log. + // + // If you are using encryption with cross-account or Amazon Web Services service + // operations you must use a fully qualified KMS key ARN. For more information, + // see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy). + // + // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) + // in the Amazon Web Services Key Management Service Developer Guide. + // + // KMSMasterKeyID is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ServerSideEncryptionByDefault's + // String and GoString methods. + KMSMasterKeyID *string `type:"string" sensitive:"true"` + + // Server-side encryption algorithm to use for the default encryption. + // + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionByDefault) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionByDefault) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v + return s +} + +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} + +// Specifies the default server-side-encryption configuration. +type ServerSideEncryptionConfiguration struct { + _ struct{} `type:"structure"` + + // Container for information about a particular server-side encryption configuration + // rule. + // + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v + return s +} + +// Specifies the default server-side encryption configuration. +type ServerSideEncryptionRule struct { + _ struct{} `type:"structure"` + + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side + // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects + // are not affected. Setting the BucketKeyEnabled element to true causes Amazon + // S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. + // + // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon S3 User Guide. + BucketKeyEnabled *bool `type:"boolean"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ServerSideEncryptionRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryptionRule { + s.BucketKeyEnabled = &v + return s +} + +// The established temporary security credentials of the session. +// +// Directory buckets - These session credentials are only supported for the +// authentication and authorization of Zonal endpoint APIs on directory buckets. +type SessionCredentials struct { + _ struct{} `type:"structure"` + + // A unique identifier that's associated with a secret access key. The access + // key ID and the secret access key are used together to sign programmatic Amazon + // Web Services requests cryptographically. + // + // AccessKeyId is a required field + AccessKeyId *string `locationName:"AccessKeyId" type:"string" required:"true"` + + // Temporary security credentials expire after a specified interval. After temporary + // credentials expire, any calls that you make with those credentials will fail. + // So you must generate a new set of temporary credentials. Temporary credentials + // cannot be extended or refreshed beyond the original specified interval. + // + // Expiration is a required field + Expiration *time.Time `locationName:"Expiration" type:"timestamp" required:"true"` + + // A key that's used with the access key ID to cryptographically sign programmatic + // Amazon Web Services requests. Signing a request identifies the sender and + // prevents the request from being altered. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // + // SecretAccessKey is a required field + SecretAccessKey *string `locationName:"SecretAccessKey" type:"string" required:"true" sensitive:"true"` + + // A part of the temporary security credentials. The session token is used to + // validate the temporary security credentials. + // + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by SessionCredentials's + // String and GoString methods. + // + // SessionToken is a required field + SessionToken *string `locationName:"SessionToken" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SessionCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *SessionCredentials) SetAccessKeyId(v string) *SessionCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *SessionCredentials) SetExpiration(v time.Time) *SessionCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *SessionCredentials) SetSecretAccessKey(v string) *SessionCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *SessionCredentials) SetSessionToken(v string) *SessionCredentials { + s.SessionToken = &v + return s +} + +// To use simple format for S3 keys for log objects, set SimplePrefix to an +// empty object. +// +// [DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString] +type SimplePrefix struct { + _ struct{} `locationName:"SimplePrefix" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SimplePrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SimplePrefix) GoString() string { + return s.String() +} + +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// a customer managed key stored in Amazon Web Services Key Management Service +// (SSE-KMS). +type SourceSelectionCriteria struct { + _ struct{} `type:"structure"` + + // A filter that you can specify for selections for modifications on replicas. + // Amazon S3 doesn't replicate replica modifications by default. In the latest + // version of replication configuration (when Filter is specified), you can + // specify this element and set the status to Enabled to replicate modifications + // on replicas. + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, this element + // is not allowed + ReplicaModifications *ReplicaModifications `type:"structure"` + + // A container for filter information for the selection of Amazon S3 objects + // encrypted with Amazon Web Services KMS. If you include SourceSelectionCriteria + // in the replication configuration, this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceSelectionCriteria) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SourceSelectionCriteria) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.ReplicaModifications != nil { + if err := s.ReplicaModifications.Validate(); err != nil { + invalidParams.AddNested("ReplicaModifications", err.(request.ErrInvalidParams)) + } + } + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReplicaModifications sets the ReplicaModifications field's value. +func (s *SourceSelectionCriteria) SetReplicaModifications(v *ReplicaModifications) *SourceSelectionCriteria { + s.ReplicaModifications = v + return s +} + +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v + return s +} + +// A container for filter information for the selection of S3 objects encrypted +// with Amazon Web Services KMS. +type SseKmsEncryptedObjects struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using an Amazon Web Services KMS key stored in Amazon Web Services Key Management + // Service. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SseKmsEncryptedObjects) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SseKmsEncryptedObjects) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v + return s +} + +// Container for the stats details. +type Stats struct { + _ struct{} `type:"structure"` + + // The total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The total number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Stats) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Stats) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v + return s +} + +// Container for the Stats Event. +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StatsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StatsEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v + return s +} + +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// MarshalEvent marshals the type into an stream event value. This method +// should only used internally within the SDK's EventStream handling. +func (s *StatsEvent) MarshalEvent(pm protocol.PayloadMarshaler) (msg eventstream.Message, err error) { + msg.Headers.Set(eventstreamapi.MessageTypeHeader, eventstream.StringValue(eventstreamapi.EventMessageType)) + var buf bytes.Buffer + if err = pm.MarshalPayload(&buf, s); err != nil { + return eventstream.Message{}, err + } + msg.Payload = buf.Bytes() + return msg, err +} + +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. +type StorageClassAnalysis struct { + _ struct{} `type:"structure"` + + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. + DataExport *StorageClassAnalysisDataExport `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysis) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysis) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} + if s.DataExport != nil { + if err := s.DataExport.Validate(); err != nil { + invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataExport sets the DataExport field's value. +func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { + s.DataExport = v + return s +} + +// Container for data related to the storage class analysis for an Amazon S3 +// bucket for export. +type StorageClassAnalysisDataExport struct { + _ struct{} `type:"structure"` + + // The place to store the data for an analysis. + // + // Destination is a required field + Destination *AnalyticsExportDestination `type:"structure" required:"true"` + + // The version of the output schema to use when exporting data. Must be V_1. + // + // OutputSchemaVersion is a required field + OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysisDataExport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StorageClassAnalysisDataExport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysisDataExport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.OutputSchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { + s.Destination = v + return s +} + +// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. +func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { + s.OutputSchemaVersion = &v + return s +} + +// A container of a key value name pair. +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the object key. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Container for TagSet elements. +type Tagging struct { + _ struct{} `type:"structure"` + + // A collection for a set of tags + // + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + +// Container for granting information. +// +// Buckets that use the bucket owner enforced setting for Object Ownership don't +// support target grants. For more information, see Permissions server access +// log delivery (https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general) +// in the Amazon S3 User Guide. +type TargetGrant struct { + _ struct{} `type:"structure"` + + // Container for the person being granted permissions. + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Logging permissions assigned to the grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + +// Amazon S3 key format for log objects. Only one format, PartitionedPrefix +// or SimplePrefix, is allowed. +type TargetObjectKeyFormat struct { + _ struct{} `type:"structure"` + + // Partitioned S3 key for log objects. + PartitionedPrefix *PartitionedPrefix `locationName:"PartitionedPrefix" type:"structure"` + + // To use the simple format for S3 keys for log objects. To specify SimplePrefix + // format, set SimplePrefix to {}. + SimplePrefix *SimplePrefix `locationName:"SimplePrefix" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetObjectKeyFormat) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TargetObjectKeyFormat) GoString() string { + return s.String() +} + +// SetPartitionedPrefix sets the PartitionedPrefix field's value. +func (s *TargetObjectKeyFormat) SetPartitionedPrefix(v *PartitionedPrefix) *TargetObjectKeyFormat { + s.PartitionedPrefix = v + return s +} + +// SetSimplePrefix sets the SimplePrefix field's value. +func (s *TargetObjectKeyFormat) SetSimplePrefix(v *SimplePrefix) *TargetObjectKeyFormat { + s.SimplePrefix = v + return s +} + +// The S3 Intelligent-Tiering storage class is designed to optimize storage +// costs by automatically moving data to the most cost-effective storage access +// tier, without additional operational overhead. +type Tiering struct { + _ struct{} `type:"structure"` + + // S3 Intelligent-Tiering access tier. See Storage class for automatically optimizing + // frequently and infrequently accessed objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access) + // for a list of access tiers in the S3 Intelligent-Tiering storage class. + // + // AccessTier is a required field + AccessTier *string `type:"string" required:"true" enum:"IntelligentTieringAccessTier"` + + // The number of consecutive days of no access after which an object will be + // eligible to be transitioned to the corresponding tier. The minimum number + // of days specified for Archive Access tier must be at least 90 days and Deep + // Archive Access tier must be at least 180 days. The maximum can be up to 2 + // years (730 days). + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tiering) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tiering) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tiering) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tiering"} + if s.AccessTier == nil { + invalidParams.Add(request.NewErrParamRequired("AccessTier")) + } + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessTier sets the AccessTier field's value. +func (s *Tiering) SetAccessTier(v string) *Tiering { + s.AccessTier = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Tiering) SetDays(v int64) *Tiering { + s.Days = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon S3 User Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true" enum:"Event"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring event notifications using object key name filtering + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/notification-how-to-filtering.html) + // in the Amazon S3 User Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // TopicArn is a required field + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { + s.Id = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { + s.TopicArn = &v + return s +} + +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. This data type is deprecated. Use TopicConfiguration +// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_TopicConfiguration.html) +// instead. +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + // A collection of events related to objects + Events []*string `locationName:"Event" type:"list" flattened:"true" enum:"Event"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { + s.Id = &v + return s +} + +// SetTopic sets the Topic field's value. +func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { + s.Topic = &v + return s +} + +// Specifies when an object transitions to a specified storage class. For more +// information about Amazon S3 lifecycle configuration rules, see Transitioning +// Objects Using Amazon S3 Lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-transition-general-considerations.html) +// in the Amazon S3 User Guide. +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. + Days *int64 `type:"integer"` + + // The storage class to which you want the object to transition. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + +type UploadPartCopyInput struct { + _ struct{} `locationName:"UploadPartCopyRequest" type:"structure"` + + // The bucket name. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies the source object for the copy operation. You specify the value + // in one of two formats, depending on whether you want to access the source + // object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html): + // + // * For objects not accessed through an access point, specify the name of + // the source bucket and key of the source object, separated by a slash (/). + // For example, to copy the object reports/january.pdf from the bucket awsexamplebucket, + // use awsexamplebucket/reports/january.pdf. The value must be URL-encoded. + // + // * For objects accessed through access points, specify the Amazon Resource + // Name (ARN) of the object as accessed through the access point, in the + // format arn:aws:s3:::accesspoint//object/. + // For example, to copy the object reports/january.pdf through access point + // my-access-point owned by account 123456789012 in Region us-west-2, use + // the URL encoding of arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf. + // The value must be URL encoded. Amazon S3 supports copy operations using + // Access points only when the source and destination buckets are in the + // same Amazon Web Services Region. Access points are not supported by directory + // buckets. Alternatively, for objects accessed through Amazon S3 on Outposts, + // specify the ARN of the object as accessed in the format arn:aws:s3-outposts:::outpost//object/. + // For example, to copy the object reports/january.pdf through outpost my-outpost + // owned by account 123456789012 in Region us-west-2, use the URL encoding + // of arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf. + // The value must be URL-encoded. + // + // If your bucket has versioning enabled, you could have multiple versions of + // the same object. By default, x-amz-copy-source identifies the current version + // of the source object to copy. To copy a specific version of the source object + // to copy, append ?versionId= to the x-amz-copy-source request + // header (for example, x-amz-copy-source: /awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893). + // + // If the current version is a delete marker and you don't specify a versionId + // in the x-amz-copy-source request header, Amazon S3 returns a 404 Not Found + // error, because the object does not exist. If you specify versionId in the + // x-amz-copy-source and the versionId is a delete marker, Amazon S3 returns + // an HTTP 400 Bad Request error, because you are not allowed to specify a delete + // marker as a version for the x-amz-copy-source. + // + // Directory buckets - S3 Versioning isn't enabled and supported for directory + // buckets. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + // + // If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-match condition evaluates to true, and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false; + // + // Amazon S3 returns 200 OK and copies the data. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + // + // If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false, and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true; + // + // Amazon S3 returns 412 Precondition Failed response code. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + // + // If both of the x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-none-match condition evaluates to false, and; + // + // x-amz-copy-source-if-modified-since condition evaluates to true; + // + // Amazon S3 returns 412 Precondition Failed response code. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + // + // If both of the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since + // headers are present in the request as follows: + // + // x-amz-copy-source-if-match condition evaluates to true, and; + // + // x-amz-copy-source-if-unmodified-since condition evaluates to false; + // + // Amazon S3 returns 200 OK and copies the data. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first 10 bytes of the source. You can copy a range only if the source object + // is greater than 5 MB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (for example, + // AES256). + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + // + // This functionality is not supported when the source object is in a directory + // bucket. + // + // CopySourceSSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyInput's + // String and GoString methods. + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The account ID of the expected destination bucket owner. If the account ID + // that you provide does not match the actual owner of the destination bucket, + // the request fails with the HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // The account ID of the expected source bucket owner. If the account ID that + // you provide does not match the actual owner of the source bucket, the request + // fails with the HTTP status code 403 Forbidden (access denied). + ExpectedSourceBucketOwner *string `location:"header" locationName:"x-amz-source-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported when the destination bucket is a directory + // bucket. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetExpectedSourceBucketOwner sets the ExpectedSourceBucketOwner field's value. +func (s *UploadPartCopyInput) SetExpectedSourceBucketOwner(v string) *UploadPartCopyInput { + s.ExpectedSourceBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + +func (s *UploadPartCopyInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartCopyInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartCopyInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Container for all response elements. + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + // + // This functionality is not supported when the source object is in a directory + // bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartCopyOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartCopyOutput) SetBucketKeyEnabled(v bool) *UploadPartCopyOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + +type UploadPartInput struct { + _ struct{} `locationName:"UploadPartRequest" type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // The name of the bucket to which the multipart upload was initiated. + // + // Directory buckets - When you use this operation with a directory bucket, + // you must use virtual-hosted-style requests in the format Bucket_name.s3express-az_id.region.amazonaws.com. + // Path-style requests are not supported. Directory bucket names must be unique + // in the chosen Availability Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 + // (for example, DOC-EXAMPLE-BUCKET--usw2-az1--x-s3). For information about + // bucket naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html) + // in the Amazon S3 User Guide. + // + // Access points - When you use this action with an access point, you must provide + // the alias of the access point in place of the bucket name or specify the + // access point ARN. When using the access point ARN, you must direct requests + // to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. + // When using this action with an access point through the Amazon Web Services + // SDKs, you provide the access point ARN in place of the bucket name. For more + // information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html) + // in the Amazon S3 User Guide. + // + // Access points and Object Lambda access points are not supported by directory + // buckets. + // + // S3 on Outposts - When you use this action with Amazon S3 on Outposts, you + // must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname + // takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. + // When you use this action with S3 on Outposts through the Amazon Web Services + // SDKs, you provide the Outposts access point ARN in place of the bucket name. + // For more information about S3 on Outposts ARNs, see What is S3 on Outposts? + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) + // in the Amazon S3 User Guide. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates the algorithm used to create the checksum for the object when you + // use the SDK. This header will not provide any additional functionality if + // you don't use the SDK. When you send this header, there must be a corresponding + // x-amz-checksum or x-amz-trailer header sent. Otherwise, Amazon S3 fails the + // request with the HTTP status code 400 Bad Request. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // If you provide an individual checksum, Amazon S3 ignores any provided ChecksumAlgorithm + // parameter. + // + // This checksum algorithm must be the same for all parts and it match the checksum + // value supplied in the CreateMultipartUpload request. + // + // The AWS SDK for Go v1 does not support automatic computing request payload + // checksum. This feature is available in the AWS SDK for Go v2. If a value + // is specified for this parameter, the matching algorithm's checksum member + // must be populated with the algorithm's checksum of the request payload. + ChecksumAlgorithm *string `location:"header" locationName:"x-amz-sdk-checksum-algorithm" type:"string" enum:"ChecksumAlgorithm"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32 checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 32-bit CRC32C checksum of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 160-bit SHA-1 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This header specifies + // the base64-encoded, 256-bit SHA-256 digest of the object. For more information, + // see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameter is required + // if object lock parameters are specified. + // + // This functionality is not supported for directory buckets. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + + // The account ID of the expected bucket owner. If the account ID that you provide + // does not match the actual owner of the bucket, the request fails with the + // HTTP status code 403 Forbidden (access denied). + ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that they will be charged for the request. + // Bucket owners need not specify this parameter in their requests. If either + // the source or destination S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects + // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // in the Amazon S3 User Guide. + // + // This functionality is not supported for directory buckets. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use when encrypting the object (for example, AES256). + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // S3 does not store the encryption key. The key must be appropriate for use + // with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + // + // This functionality is not supported for directory buckets. + // + // SSECustomerKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartInput's + // String and GoString methods. + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure that the + // encryption key was transmitted without error. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetChecksumAlgorithm sets the ChecksumAlgorithm field's value. +func (s *UploadPartInput) SetChecksumAlgorithm(v string) *UploadPartInput { + s.ChecksumAlgorithm = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *UploadPartInput) SetChecksumCRC32(v string) *UploadPartInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *UploadPartInput) SetChecksumCRC32C(v string) *UploadPartInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *UploadPartInput) SetChecksumSHA1(v string) *UploadPartInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *UploadPartInput) SetChecksumSHA256(v string) *UploadPartInput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + +// SetExpectedBucketOwner sets the ExpectedBucketOwner field's value. +func (s *UploadPartInput) SetExpectedBucketOwner(v string) *UploadPartInput { + s.ExpectedBucketOwner = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + +func (s *UploadPartInput) getEndpointARN() (arn.Resource, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + return parseEndpointARN(*s.Bucket) +} + +func (s *UploadPartInput) hasEndpointARN() bool { + if s.Bucket == nil { + return false + } + return arn.IsARN(*s.Bucket) +} + +// updateArnableField updates the value of the input field that +// takes an ARN as an input. This method is useful to backfill +// the parsed resource name from ARN into the input member. +// It returns a pointer to a modified copy of input and an error. +// Note that original input is not modified. +func (s UploadPartInput) updateArnableField(v string) (interface{}, error) { + if s.Bucket == nil { + return nil, fmt.Errorf("member Bucket is nil") + } + s.Bucket = aws.String(v) + return &s, nil +} + +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with Key Management Service (KMS) keys (SSE-KMS). + // + // This functionality is not supported for directory buckets. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // The base64-encoded, 32-bit CRC32 checksum of the object. This will only be + // present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-checksum-crc32" type:"string"` + + // The base64-encoded, 32-bit CRC32C checksum of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-checksum-crc32c" type:"string"` + + // The base64-encoded, 160-bit SHA-1 digest of the object. This will only be + // present if it was uploaded with the object. When you use the API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-checksum-sha1" type:"string"` + + // The base64-encoded, 256-bit SHA-256 digest of the object. This will only + // be present if it was uploaded with the object. When you use an API operation + // on an object that was uploaded using multipart uploads, this value may not + // be a direct checksum value of the full object. Instead, it's a calculation + // based on the checksum values of each individual part. For more information + // about how checksums are calculated with multipart uploads, see Checking object + // integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums) + // in the Amazon S3 User Guide. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-checksum-sha256" type:"string"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to confirm the encryption algorithm + // that's used. + // + // This functionality is not supported for directory buckets. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide the round-trip message integrity + // verification of the customer-provided encryption key. + // + // This functionality is not supported for directory buckets. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, indicates the ID of the Key Management Service (KMS) symmetric + // encryption customer managed key that was used for the object. + // + // This functionality is not supported for directory buckets. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by UploadPartOutput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when you store this object in Amazon + // S3 (for example, AES256, aws:kms). + // + // For directory buckets, only server-side encryption with Amazon S3 managed + // keys (SSE-S3) (AES256) is supported. + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UploadPartOutput) GoString() string { + return s.String() +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartOutput) SetBucketKeyEnabled(v bool) *UploadPartOutput { + s.BucketKeyEnabled = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *UploadPartOutput) SetChecksumCRC32(v string) *UploadPartOutput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *UploadPartOutput) SetChecksumCRC32C(v string) *UploadPartOutput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *UploadPartOutput) SetChecksumSHA1(v string) *UploadPartOutput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *UploadPartOutput) SetChecksumSHA256(v string) *UploadPartOutput { + s.ChecksumSHA256 = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon S3 API Reference. +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + +// Specifies website configuration parameters for an Amazon S3 bucket. +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + // The name of the error document for the website. + ErrorDocument *ErrorDocument `type:"structure"` + + // The name of the index document for the website. + IndexDocument *IndexDocument `type:"structure"` + + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + // Rules that define when a redirect is applied and the redirect behavior. + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +type WriteGetObjectResponseInput struct { + _ struct{} `locationName:"WriteGetObjectResponseRequest" type:"structure" payload:"Body"` + + // Indicates that a range of bytes was specified. + AcceptRanges *string `location:"header" locationName:"x-amz-fwd-header-accept-ranges" type:"string"` + + // The object data. + // + // To use an non-seekable io.Reader for this request wrap the io.Reader with + // "aws.ReadSeekCloser". The SDK will not retry request errors for non-seekable + // readers. This will allow the SDK to send the reader's payload as chunked + // transfer encoding. + Body io.ReadSeeker `type:"blob"` + + // Indicates whether the object stored in Amazon S3 uses an S3 bucket key for + // server-side encryption with Amazon Web Services KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"x-amz-fwd-header-Cache-Control" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 32-bit CRC32 checksum of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumCRC32 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-crc32" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 32-bit CRC32C checksum of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumCRC32C *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-crc32c" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 160-bit SHA-1 digest of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumSHA1 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-sha1" type:"string"` + + // This header can be used as a data integrity check to verify that the data + // received is the same data that was originally sent. This specifies the base64-encoded, + // 256-bit SHA-256 digest of the object returned by the Object Lambda function. + // This may not match the checksum for the object stored in Amazon S3. Amazon + // S3 will perform validation of the checksum values only when the original + // GetObject request required checksum validation. For more information about + // checksums, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html) + // in the Amazon S3 User Guide. + // + // Only one checksum header can be specified at a time. If you supply multiple + // checksum headers, this request will fail. + ChecksumSHA256 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-checksum-sha256" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"x-amz-fwd-header-Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"x-amz-fwd-header-Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"x-amz-fwd-header-Content-Language" type:"string"` + + // The size of the content body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"x-amz-fwd-header-Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"x-amz-fwd-header-Content-Type" type:"string"` + + // Specifies whether an object stored in Amazon S3 is (true) or is not (false) + // a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-fwd-header-x-amz-delete-marker" type:"boolean"` + + // An opaque identifier assigned by a web server to a specific version of a + // resource found at a URL. + ETag *string `location:"header" locationName:"x-amz-fwd-header-ETag" type:"string"` + + // A string that uniquely identifies an error condition. Returned in the + // tag of the error XML response for a corresponding GetObject call. Cannot + // be used with a successful StatusCode header or when the transformed object + // is provided in the body. All error codes from S3 are sentence-cased. The + // regular expression (regex) value is "^[A-Z][a-zA-Z]+$". + ErrorCode *string `location:"header" locationName:"x-amz-fwd-error-code" type:"string"` + + // Contains a generic description of the error condition. Returned in the + // tag of the error XML response for a corresponding GetObject call. Cannot + // be used with a successful StatusCode header or when the transformed object + // is provided in body. + ErrorMessage *string `location:"header" locationName:"x-amz-fwd-error-message" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key-value pairs + // that provide the object expiration information. The value of the rule-id + // is URL-encoded. + Expiration *string `location:"header" locationName:"x-amz-fwd-header-x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"x-amz-fwd-header-Expires" type:"timestamp"` + + // The date and time that the object was last modified. + LastModified *time.Time `location:"header" locationName:"x-amz-fwd-header-Last-Modified" type:"timestamp"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Set to the number of metadata entries not returned in x-amz-meta headers. + // This can happen if you create metadata using an API like SOAP that supports + // more flexible metadata than the REST API. For example, using SOAP, you can + // create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-missing-meta" type:"integer"` + + // Indicates whether an object stored in Amazon S3 has an active legal hold. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Indicates whether an object stored in Amazon S3 has Object Lock enabled. + // For more information about S3 Object Lock, see Object Lock (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock.html). + ObjectLockMode *string `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when Object Lock is configured to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-fwd-header-x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-mp-parts-count" type:"integer"` + + // Indicates if request involves bucket that is either a source or destination + // in a Replication rule. For more information about S3 Replication, see Replication + // (https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication.html). + ReplicationStatus *string `location:"header" locationName:"x-amz-fwd-header-x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + // + // This functionality is not supported for directory buckets. + RequestCharged *string `location:"header" locationName:"x-amz-fwd-header-x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Route prefix to the HTTP URL generated. + // + // RequestRoute is a required field + RequestRoute *string `location:"header" locationName:"x-amz-request-route" type:"string" required:"true"` + + // A single use encrypted token that maps WriteGetObjectResponse to the end + // user GetObject request. + // + // RequestToken is a required field + RequestToken *string `location:"header" locationName:"x-amz-request-token" type:"string" required:"true"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-fwd-header-x-amz-restore" type:"string"` + + // Encryption algorithm used if server-side encryption with a customer-provided + // encryption key was specified for object stored in Amazon S3. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // 128-bit MD5 digest of customer-provided encryption key used in Amazon S3 + // to encrypt data stored in S3. For more information, see Protecting data using + // server-side encryption with customer-provided encryption keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon + // Web Services Key Management Service (Amazon Web Services KMS) symmetric encryption + // customer managed key that was used for stored in Amazon S3 object. + // + // SSEKMSKeyId is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by WriteGetObjectResponseInput's + // String and GoString methods. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The server-side encryption algorithm used when storing requested object in + // Amazon S3 (for example, AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-fwd-header-x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The integer status code for an HTTP response of a corresponding GetObject + // request. The following is a list of status codes. + // + // * 200 - OK + // + // * 206 - Partial Content + // + // * 304 - Not Modified + // + // * 400 - Bad Request + // + // * 401 - Unauthorized + // + // * 403 - Forbidden + // + // * 404 - Not Found + // + // * 405 - Method Not Allowed + // + // * 409 - Conflict + // + // * 411 - Length Required + // + // * 412 - Precondition Failed + // + // * 416 - Range Not Satisfiable + // + // * 500 - Internal Server Error + // + // * 503 - Service Unavailable + StatusCode *int64 `location:"header" locationName:"x-amz-fwd-status" type:"integer"` + + // Provides storage class information of the object. Amazon S3 returns this + // header for all objects except for S3 Standard storage class objects. + // + // For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html). + StorageClass *string `location:"header" locationName:"x-amz-fwd-header-x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-fwd-header-x-amz-tagging-count" type:"integer"` + + // An ID used to reference a specific version of the object. + VersionId *string `location:"header" locationName:"x-amz-fwd-header-x-amz-version-id" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WriteGetObjectResponseInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WriteGetObjectResponseInput"} + if s.RequestRoute == nil { + invalidParams.Add(request.NewErrParamRequired("RequestRoute")) + } + if s.RequestRoute != nil && len(*s.RequestRoute) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestRoute", 1)) + } + if s.RequestToken == nil { + invalidParams.Add(request.NewErrParamRequired("RequestToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *WriteGetObjectResponseInput) SetAcceptRanges(v string) *WriteGetObjectResponseInput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *WriteGetObjectResponseInput) SetBody(v io.ReadSeeker) *WriteGetObjectResponseInput { + s.Body = v + return s +} + +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *WriteGetObjectResponseInput) SetBucketKeyEnabled(v bool) *WriteGetObjectResponseInput { + s.BucketKeyEnabled = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *WriteGetObjectResponseInput) SetCacheControl(v string) *WriteGetObjectResponseInput { + s.CacheControl = &v + return s +} + +// SetChecksumCRC32 sets the ChecksumCRC32 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumCRC32(v string) *WriteGetObjectResponseInput { + s.ChecksumCRC32 = &v + return s +} + +// SetChecksumCRC32C sets the ChecksumCRC32C field's value. +func (s *WriteGetObjectResponseInput) SetChecksumCRC32C(v string) *WriteGetObjectResponseInput { + s.ChecksumCRC32C = &v + return s +} + +// SetChecksumSHA1 sets the ChecksumSHA1 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumSHA1(v string) *WriteGetObjectResponseInput { + s.ChecksumSHA1 = &v + return s +} + +// SetChecksumSHA256 sets the ChecksumSHA256 field's value. +func (s *WriteGetObjectResponseInput) SetChecksumSHA256(v string) *WriteGetObjectResponseInput { + s.ChecksumSHA256 = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *WriteGetObjectResponseInput) SetContentDisposition(v string) *WriteGetObjectResponseInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *WriteGetObjectResponseInput) SetContentEncoding(v string) *WriteGetObjectResponseInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *WriteGetObjectResponseInput) SetContentLanguage(v string) *WriteGetObjectResponseInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *WriteGetObjectResponseInput) SetContentLength(v int64) *WriteGetObjectResponseInput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *WriteGetObjectResponseInput) SetContentRange(v string) *WriteGetObjectResponseInput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *WriteGetObjectResponseInput) SetContentType(v string) *WriteGetObjectResponseInput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *WriteGetObjectResponseInput) SetDeleteMarker(v bool) *WriteGetObjectResponseInput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *WriteGetObjectResponseInput) SetETag(v string) *WriteGetObjectResponseInput { + s.ETag = &v + return s +} + +// SetErrorCode sets the ErrorCode field's value. +func (s *WriteGetObjectResponseInput) SetErrorCode(v string) *WriteGetObjectResponseInput { + s.ErrorCode = &v + return s +} + +// SetErrorMessage sets the ErrorMessage field's value. +func (s *WriteGetObjectResponseInput) SetErrorMessage(v string) *WriteGetObjectResponseInput { + s.ErrorMessage = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *WriteGetObjectResponseInput) SetExpiration(v string) *WriteGetObjectResponseInput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *WriteGetObjectResponseInput) SetExpires(v time.Time) *WriteGetObjectResponseInput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *WriteGetObjectResponseInput) SetLastModified(v time.Time) *WriteGetObjectResponseInput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *WriteGetObjectResponseInput) SetMetadata(v map[string]*string) *WriteGetObjectResponseInput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *WriteGetObjectResponseInput) SetMissingMeta(v int64) *WriteGetObjectResponseInput { + s.MissingMeta = &v + return s +} + +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockLegalHoldStatus(v string) *WriteGetObjectResponseInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockMode(v string) *WriteGetObjectResponseInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *WriteGetObjectResponseInput) SetObjectLockRetainUntilDate(v time.Time) *WriteGetObjectResponseInput { + s.ObjectLockRetainUntilDate = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *WriteGetObjectResponseInput) SetPartsCount(v int64) *WriteGetObjectResponseInput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *WriteGetObjectResponseInput) SetReplicationStatus(v string) *WriteGetObjectResponseInput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *WriteGetObjectResponseInput) SetRequestCharged(v string) *WriteGetObjectResponseInput { + s.RequestCharged = &v + return s +} + +// SetRequestRoute sets the RequestRoute field's value. +func (s *WriteGetObjectResponseInput) SetRequestRoute(v string) *WriteGetObjectResponseInput { + s.RequestRoute = &v + return s +} + +// SetRequestToken sets the RequestToken field's value. +func (s *WriteGetObjectResponseInput) SetRequestToken(v string) *WriteGetObjectResponseInput { + s.RequestToken = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *WriteGetObjectResponseInput) SetRestore(v string) *WriteGetObjectResponseInput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *WriteGetObjectResponseInput) SetSSECustomerAlgorithm(v string) *WriteGetObjectResponseInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *WriteGetObjectResponseInput) SetSSECustomerKeyMD5(v string) *WriteGetObjectResponseInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *WriteGetObjectResponseInput) SetSSEKMSKeyId(v string) *WriteGetObjectResponseInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *WriteGetObjectResponseInput) SetServerSideEncryption(v string) *WriteGetObjectResponseInput { + s.ServerSideEncryption = &v + return s +} + +// SetStatusCode sets the StatusCode field's value. +func (s *WriteGetObjectResponseInput) SetStatusCode(v int64) *WriteGetObjectResponseInput { + s.StatusCode = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *WriteGetObjectResponseInput) SetStorageClass(v string) *WriteGetObjectResponseInput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *WriteGetObjectResponseInput) SetTagCount(v int64) *WriteGetObjectResponseInput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *WriteGetObjectResponseInput) SetVersionId(v string) *WriteGetObjectResponseInput { + s.VersionId = &v + return s +} + +func (s *WriteGetObjectResponseInput) hostLabels() map[string]string { + return map[string]string{ + "RequestRoute": aws.StringValue(s.RequestRoute), + } +} + +type WriteGetObjectResponseOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s WriteGetObjectResponseOutput) GoString() string { + return s.String() +} + +const ( + // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value + AnalyticsS3ExportFileFormatCsv = "CSV" +) + +// AnalyticsS3ExportFileFormat_Values returns all elements of the AnalyticsS3ExportFileFormat enum +func AnalyticsS3ExportFileFormat_Values() []string { + return []string{ + AnalyticsS3ExportFileFormatCsv, + } +} + +const ( + // ArchiveStatusArchiveAccess is a ArchiveStatus enum value + ArchiveStatusArchiveAccess = "ARCHIVE_ACCESS" + + // ArchiveStatusDeepArchiveAccess is a ArchiveStatus enum value + ArchiveStatusDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// ArchiveStatus_Values returns all elements of the ArchiveStatus enum +func ArchiveStatus_Values() []string { + return []string{ + ArchiveStatusArchiveAccess, + ArchiveStatusDeepArchiveAccess, + } +} + +const ( + // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value + BucketAccelerateStatusEnabled = "Enabled" + + // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value + BucketAccelerateStatusSuspended = "Suspended" +) + +// BucketAccelerateStatus_Values returns all elements of the BucketAccelerateStatus enum +func BucketAccelerateStatus_Values() []string { + return []string{ + BucketAccelerateStatusEnabled, + BucketAccelerateStatusSuspended, + } +} + +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +// BucketCannedACL_Values returns all elements of the BucketCannedACL enum +func BucketCannedACL_Values() []string { + return []string{ + BucketCannedACLPrivate, + BucketCannedACLPublicRead, + BucketCannedACLPublicReadWrite, + BucketCannedACLAuthenticatedRead, + } +} + +const ( + // BucketLocationConstraintAfSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintAfSouth1 = "af-south-1" + + // BucketLocationConstraintApEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApEast1 = "ap-east-1" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintApNortheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast2 = "ap-northeast-2" + + // BucketLocationConstraintApNortheast3 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast3 = "ap-northeast-3" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSouth2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth2 = "ap-south-2" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintApSoutheast3 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast3 = "ap-southeast-3" + + // BucketLocationConstraintCaCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintCaCentral1 = "ca-central-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintCnNorthwest1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorthwest1 = "cn-northwest-1" + + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" + + // BucketLocationConstraintEuNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuNorth1 = "eu-north-1" + + // BucketLocationConstraintEuSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth1 = "eu-south-1" + + // BucketLocationConstraintEuSouth2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuSouth2 = "eu-south-2" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintEuWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest2 = "eu-west-2" + + // BucketLocationConstraintEuWest3 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest3 = "eu-west-3" + + // BucketLocationConstraintMeSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintMeSouth1 = "me-south-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintUsEast2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsEast2 = "us-east-2" + + // BucketLocationConstraintUsGovEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovEast1 = "us-gov-east-1" + + // BucketLocationConstraintUsGovWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsGovWest1 = "us-gov-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" +) + +// BucketLocationConstraint_Values returns all elements of the BucketLocationConstraint enum +func BucketLocationConstraint_Values() []string { + return []string{ + BucketLocationConstraintAfSouth1, + BucketLocationConstraintApEast1, + BucketLocationConstraintApNortheast1, + BucketLocationConstraintApNortheast2, + BucketLocationConstraintApNortheast3, + BucketLocationConstraintApSouth1, + BucketLocationConstraintApSouth2, + BucketLocationConstraintApSoutheast1, + BucketLocationConstraintApSoutheast2, + BucketLocationConstraintApSoutheast3, + BucketLocationConstraintCaCentral1, + BucketLocationConstraintCnNorth1, + BucketLocationConstraintCnNorthwest1, + BucketLocationConstraintEu, + BucketLocationConstraintEuCentral1, + BucketLocationConstraintEuNorth1, + BucketLocationConstraintEuSouth1, + BucketLocationConstraintEuSouth2, + BucketLocationConstraintEuWest1, + BucketLocationConstraintEuWest2, + BucketLocationConstraintEuWest3, + BucketLocationConstraintMeSouth1, + BucketLocationConstraintSaEast1, + BucketLocationConstraintUsEast2, + BucketLocationConstraintUsGovEast1, + BucketLocationConstraintUsGovWest1, + BucketLocationConstraintUsWest1, + BucketLocationConstraintUsWest2, + } +} + +const ( + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value + BucketLogsPermissionFullControl = "FULL_CONTROL" + + // BucketLogsPermissionRead is a BucketLogsPermission enum value + BucketLogsPermissionRead = "READ" + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value + BucketLogsPermissionWrite = "WRITE" +) + +// BucketLogsPermission_Values returns all elements of the BucketLogsPermission enum +func BucketLogsPermission_Values() []string { + return []string{ + BucketLogsPermissionFullControl, + BucketLogsPermissionRead, + BucketLogsPermissionWrite, + } +} + +const ( + // BucketTypeDirectory is a BucketType enum value + BucketTypeDirectory = "Directory" +) + +// BucketType_Values returns all elements of the BucketType enum +func BucketType_Values() []string { + return []string{ + BucketTypeDirectory, + } +} + +const ( + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value + BucketVersioningStatusEnabled = "Enabled" + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value + BucketVersioningStatusSuspended = "Suspended" +) + +// BucketVersioningStatus_Values returns all elements of the BucketVersioningStatus enum +func BucketVersioningStatus_Values() []string { + return []string{ + BucketVersioningStatusEnabled, + BucketVersioningStatusSuspended, + } +} + +const ( + // ChecksumAlgorithmCrc32 is a ChecksumAlgorithm enum value + ChecksumAlgorithmCrc32 = "CRC32" + + // ChecksumAlgorithmCrc32c is a ChecksumAlgorithm enum value + ChecksumAlgorithmCrc32c = "CRC32C" + + // ChecksumAlgorithmSha1 is a ChecksumAlgorithm enum value + ChecksumAlgorithmSha1 = "SHA1" + + // ChecksumAlgorithmSha256 is a ChecksumAlgorithm enum value + ChecksumAlgorithmSha256 = "SHA256" +) + +// ChecksumAlgorithm_Values returns all elements of the ChecksumAlgorithm enum +func ChecksumAlgorithm_Values() []string { + return []string{ + ChecksumAlgorithmCrc32, + ChecksumAlgorithmCrc32c, + ChecksumAlgorithmSha1, + ChecksumAlgorithmSha256, + } +} + +const ( + // ChecksumModeEnabled is a ChecksumMode enum value + ChecksumModeEnabled = "ENABLED" +) + +// ChecksumMode_Values returns all elements of the ChecksumMode enum +func ChecksumMode_Values() []string { + return []string{ + ChecksumModeEnabled, + } +} + +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + +// CompressionType_Values returns all elements of the CompressionType enum +func CompressionType_Values() []string { + return []string{ + CompressionTypeNone, + CompressionTypeGzip, + CompressionTypeBzip2, + } +} + +const ( + // DataRedundancySingleAvailabilityZone is a DataRedundancy enum value + DataRedundancySingleAvailabilityZone = "SingleAvailabilityZone" +) + +// DataRedundancy_Values returns all elements of the DataRedundancy enum +func DataRedundancy_Values() []string { + return []string{ + DataRedundancySingleAvailabilityZone, + } +} + +const ( + // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusEnabled = "Enabled" + + // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusDisabled = "Disabled" +) + +// DeleteMarkerReplicationStatus_Values returns all elements of the DeleteMarkerReplicationStatus enum +func DeleteMarkerReplicationStatus_Values() []string { + return []string{ + DeleteMarkerReplicationStatusEnabled, + DeleteMarkerReplicationStatusDisabled, + } +} + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key can contain any Unicode character; +// however, the XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // EncodingTypeUrl is a EncodingType enum value + EncodingTypeUrl = "url" +) + +// EncodingType_Values returns all elements of the EncodingType enum +func EncodingType_Values() []string { + return []string{ + EncodingTypeUrl, + } +} + +// The bucket event for which to send notifications. +const ( + // EventS3ReducedRedundancyLostObject is a Event enum value + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + + // EventS3ObjectCreated is a Event enum value + EventS3ObjectCreated = "s3:ObjectCreated:*" + + // EventS3ObjectCreatedPut is a Event enum value + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + + // EventS3ObjectCreatedPost is a Event enum value + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + + // EventS3ObjectCreatedCopy is a Event enum value + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + + // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + + // EventS3ObjectRemoved is a Event enum value + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + + // EventS3ObjectRemovedDelete is a Event enum value + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + + // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + + // EventS3ObjectRestore is a Event enum value + EventS3ObjectRestore = "s3:ObjectRestore:*" + + // EventS3ObjectRestorePost is a Event enum value + EventS3ObjectRestorePost = "s3:ObjectRestore:Post" + + // EventS3ObjectRestoreCompleted is a Event enum value + EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" + + // EventS3Replication is a Event enum value + EventS3Replication = "s3:Replication:*" + + // EventS3ReplicationOperationFailedReplication is a Event enum value + EventS3ReplicationOperationFailedReplication = "s3:Replication:OperationFailedReplication" + + // EventS3ReplicationOperationNotTracked is a Event enum value + EventS3ReplicationOperationNotTracked = "s3:Replication:OperationNotTracked" + + // EventS3ReplicationOperationMissedThreshold is a Event enum value + EventS3ReplicationOperationMissedThreshold = "s3:Replication:OperationMissedThreshold" + + // EventS3ReplicationOperationReplicatedAfterThreshold is a Event enum value + EventS3ReplicationOperationReplicatedAfterThreshold = "s3:Replication:OperationReplicatedAfterThreshold" + + // EventS3ObjectRestoreDelete is a Event enum value + EventS3ObjectRestoreDelete = "s3:ObjectRestore:Delete" + + // EventS3LifecycleTransition is a Event enum value + EventS3LifecycleTransition = "s3:LifecycleTransition" + + // EventS3IntelligentTiering is a Event enum value + EventS3IntelligentTiering = "s3:IntelligentTiering" + + // EventS3ObjectAclPut is a Event enum value + EventS3ObjectAclPut = "s3:ObjectAcl:Put" + + // EventS3LifecycleExpiration is a Event enum value + EventS3LifecycleExpiration = "s3:LifecycleExpiration:*" + + // EventS3LifecycleExpirationDelete is a Event enum value + EventS3LifecycleExpirationDelete = "s3:LifecycleExpiration:Delete" + + // EventS3LifecycleExpirationDeleteMarkerCreated is a Event enum value + EventS3LifecycleExpirationDeleteMarkerCreated = "s3:LifecycleExpiration:DeleteMarkerCreated" + + // EventS3ObjectTagging is a Event enum value + EventS3ObjectTagging = "s3:ObjectTagging:*" + + // EventS3ObjectTaggingPut is a Event enum value + EventS3ObjectTaggingPut = "s3:ObjectTagging:Put" + + // EventS3ObjectTaggingDelete is a Event enum value + EventS3ObjectTaggingDelete = "s3:ObjectTagging:Delete" +) + +// Event_Values returns all elements of the Event enum +func Event_Values() []string { + return []string{ + EventS3ReducedRedundancyLostObject, + EventS3ObjectCreated, + EventS3ObjectCreatedPut, + EventS3ObjectCreatedPost, + EventS3ObjectCreatedCopy, + EventS3ObjectCreatedCompleteMultipartUpload, + EventS3ObjectRemoved, + EventS3ObjectRemovedDelete, + EventS3ObjectRemovedDeleteMarkerCreated, + EventS3ObjectRestore, + EventS3ObjectRestorePost, + EventS3ObjectRestoreCompleted, + EventS3Replication, + EventS3ReplicationOperationFailedReplication, + EventS3ReplicationOperationNotTracked, + EventS3ReplicationOperationMissedThreshold, + EventS3ReplicationOperationReplicatedAfterThreshold, + EventS3ObjectRestoreDelete, + EventS3LifecycleTransition, + EventS3IntelligentTiering, + EventS3ObjectAclPut, + EventS3LifecycleExpiration, + EventS3LifecycleExpirationDelete, + EventS3LifecycleExpirationDeleteMarkerCreated, + EventS3ObjectTagging, + EventS3ObjectTaggingPut, + EventS3ObjectTaggingDelete, + } +} + +const ( + // ExistingObjectReplicationStatusEnabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusEnabled = "Enabled" + + // ExistingObjectReplicationStatusDisabled is a ExistingObjectReplicationStatus enum value + ExistingObjectReplicationStatusDisabled = "Disabled" +) + +// ExistingObjectReplicationStatus_Values returns all elements of the ExistingObjectReplicationStatus enum +func ExistingObjectReplicationStatus_Values() []string { + return []string{ + ExistingObjectReplicationStatusEnabled, + ExistingObjectReplicationStatusDisabled, + } +} + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +// ExpirationStatus_Values returns all elements of the ExpirationStatus enum +func ExpirationStatus_Values() []string { + return []string{ + ExpirationStatusEnabled, + ExpirationStatusDisabled, + } +} + +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +// ExpressionType_Values returns all elements of the ExpressionType enum +func ExpressionType_Values() []string { + return []string{ + ExpressionTypeSql, + } +} + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + +// FileHeaderInfo_Values returns all elements of the FileHeaderInfo enum +func FileHeaderInfo_Values() []string { + return []string{ + FileHeaderInfoUse, + FileHeaderInfoIgnore, + FileHeaderInfoNone, + } +} + +const ( + // FilterRuleNamePrefix is a FilterRuleName enum value + FilterRuleNamePrefix = "prefix" + + // FilterRuleNameSuffix is a FilterRuleName enum value + FilterRuleNameSuffix = "suffix" +) + +// FilterRuleName_Values returns all elements of the FilterRuleName enum +func FilterRuleName_Values() []string { + return []string{ + FilterRuleNamePrefix, + FilterRuleNameSuffix, + } +} + +const ( + // IntelligentTieringAccessTierArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierArchiveAccess = "ARCHIVE_ACCESS" + + // IntelligentTieringAccessTierDeepArchiveAccess is a IntelligentTieringAccessTier enum value + IntelligentTieringAccessTierDeepArchiveAccess = "DEEP_ARCHIVE_ACCESS" +) + +// IntelligentTieringAccessTier_Values returns all elements of the IntelligentTieringAccessTier enum +func IntelligentTieringAccessTier_Values() []string { + return []string{ + IntelligentTieringAccessTierArchiveAccess, + IntelligentTieringAccessTierDeepArchiveAccess, + } +} + +const ( + // IntelligentTieringStatusEnabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusEnabled = "Enabled" + + // IntelligentTieringStatusDisabled is a IntelligentTieringStatus enum value + IntelligentTieringStatusDisabled = "Disabled" +) + +// IntelligentTieringStatus_Values returns all elements of the IntelligentTieringStatus enum +func IntelligentTieringStatus_Values() []string { + return []string{ + IntelligentTieringStatusEnabled, + IntelligentTieringStatusDisabled, + } +} + +const ( + // InventoryFormatCsv is a InventoryFormat enum value + InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" + + // InventoryFormatParquet is a InventoryFormat enum value + InventoryFormatParquet = "Parquet" +) + +// InventoryFormat_Values returns all elements of the InventoryFormat enum +func InventoryFormat_Values() []string { + return []string{ + InventoryFormatCsv, + InventoryFormatOrc, + InventoryFormatParquet, + } +} + +const ( + // InventoryFrequencyDaily is a InventoryFrequency enum value + InventoryFrequencyDaily = "Daily" + + // InventoryFrequencyWeekly is a InventoryFrequency enum value + InventoryFrequencyWeekly = "Weekly" +) + +// InventoryFrequency_Values returns all elements of the InventoryFrequency enum +func InventoryFrequency_Values() []string { + return []string{ + InventoryFrequencyDaily, + InventoryFrequencyWeekly, + } +} + +const ( + // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsAll = "All" + + // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsCurrent = "Current" +) + +// InventoryIncludedObjectVersions_Values returns all elements of the InventoryIncludedObjectVersions enum +func InventoryIncludedObjectVersions_Values() []string { + return []string{ + InventoryIncludedObjectVersionsAll, + InventoryIncludedObjectVersionsCurrent, + } +} + +const ( + // InventoryOptionalFieldSize is a InventoryOptionalField enum value + InventoryOptionalFieldSize = "Size" + + // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value + InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" + + // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value + InventoryOptionalFieldStorageClass = "StorageClass" + + // InventoryOptionalFieldEtag is a InventoryOptionalField enum value + InventoryOptionalFieldEtag = "ETag" + + // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value + InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" + + // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value + InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" + + // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + + // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockMode = "ObjectLockMode" + + // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" + + // InventoryOptionalFieldIntelligentTieringAccessTier is a InventoryOptionalField enum value + InventoryOptionalFieldIntelligentTieringAccessTier = "IntelligentTieringAccessTier" + + // InventoryOptionalFieldBucketKeyStatus is a InventoryOptionalField enum value + InventoryOptionalFieldBucketKeyStatus = "BucketKeyStatus" + + // InventoryOptionalFieldChecksumAlgorithm is a InventoryOptionalField enum value + InventoryOptionalFieldChecksumAlgorithm = "ChecksumAlgorithm" + + // InventoryOptionalFieldObjectAccessControlList is a InventoryOptionalField enum value + InventoryOptionalFieldObjectAccessControlList = "ObjectAccessControlList" + + // InventoryOptionalFieldObjectOwner is a InventoryOptionalField enum value + InventoryOptionalFieldObjectOwner = "ObjectOwner" +) + +// InventoryOptionalField_Values returns all elements of the InventoryOptionalField enum +func InventoryOptionalField_Values() []string { + return []string{ + InventoryOptionalFieldSize, + InventoryOptionalFieldLastModifiedDate, + InventoryOptionalFieldStorageClass, + InventoryOptionalFieldEtag, + InventoryOptionalFieldIsMultipartUploaded, + InventoryOptionalFieldReplicationStatus, + InventoryOptionalFieldEncryptionStatus, + InventoryOptionalFieldObjectLockRetainUntilDate, + InventoryOptionalFieldObjectLockMode, + InventoryOptionalFieldObjectLockLegalHoldStatus, + InventoryOptionalFieldIntelligentTieringAccessTier, + InventoryOptionalFieldBucketKeyStatus, + InventoryOptionalFieldChecksumAlgorithm, + InventoryOptionalFieldObjectAccessControlList, + InventoryOptionalFieldObjectOwner, + } +} + +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" +) + +// JSONType_Values returns all elements of the JSONType enum +func JSONType_Values() []string { + return []string{ + JSONTypeDocument, + JSONTypeLines, + } +} + +const ( + // LocationTypeAvailabilityZone is a LocationType enum value + LocationTypeAvailabilityZone = "AvailabilityZone" +) + +// LocationType_Values returns all elements of the LocationType enum +func LocationType_Values() []string { + return []string{ + LocationTypeAvailabilityZone, + } +} + +const ( + // MFADeleteEnabled is a MFADelete enum value + MFADeleteEnabled = "Enabled" + + // MFADeleteDisabled is a MFADelete enum value + MFADeleteDisabled = "Disabled" +) + +// MFADelete_Values returns all elements of the MFADelete enum +func MFADelete_Values() []string { + return []string{ + MFADeleteEnabled, + MFADeleteDisabled, + } +} + +const ( + // MFADeleteStatusEnabled is a MFADeleteStatus enum value + MFADeleteStatusEnabled = "Enabled" + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value + MFADeleteStatusDisabled = "Disabled" +) + +// MFADeleteStatus_Values returns all elements of the MFADeleteStatus enum +func MFADeleteStatus_Values() []string { + return []string{ + MFADeleteStatusEnabled, + MFADeleteStatusDisabled, + } +} + +const ( + // MetadataDirectiveCopy is a MetadataDirective enum value + MetadataDirectiveCopy = "COPY" + + // MetadataDirectiveReplace is a MetadataDirective enum value + MetadataDirectiveReplace = "REPLACE" +) + +// MetadataDirective_Values returns all elements of the MetadataDirective enum +func MetadataDirective_Values() []string { + return []string{ + MetadataDirectiveCopy, + MetadataDirectiveReplace, + } +} + +const ( + // MetricsStatusEnabled is a MetricsStatus enum value + MetricsStatusEnabled = "Enabled" + + // MetricsStatusDisabled is a MetricsStatus enum value + MetricsStatusDisabled = "Disabled" +) + +// MetricsStatus_Values returns all elements of the MetricsStatus enum +func MetricsStatus_Values() []string { + return []string{ + MetricsStatusEnabled, + MetricsStatusDisabled, + } +} + +const ( + // ObjectAttributesEtag is a ObjectAttributes enum value + ObjectAttributesEtag = "ETag" + + // ObjectAttributesChecksum is a ObjectAttributes enum value + ObjectAttributesChecksum = "Checksum" + + // ObjectAttributesObjectParts is a ObjectAttributes enum value + ObjectAttributesObjectParts = "ObjectParts" + + // ObjectAttributesStorageClass is a ObjectAttributes enum value + ObjectAttributesStorageClass = "StorageClass" + + // ObjectAttributesObjectSize is a ObjectAttributes enum value + ObjectAttributesObjectSize = "ObjectSize" +) + +// ObjectAttributes_Values returns all elements of the ObjectAttributes enum +func ObjectAttributes_Values() []string { + return []string{ + ObjectAttributesEtag, + ObjectAttributesChecksum, + ObjectAttributesObjectParts, + ObjectAttributesStorageClass, + ObjectAttributesObjectSize, + } +} + +const ( + // ObjectCannedACLPrivate is a ObjectCannedACL enum value + ObjectCannedACLPrivate = "private" + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value + ObjectCannedACLPublicRead = "public-read" + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value + ObjectCannedACLPublicReadWrite = "public-read-write" + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value + ObjectCannedACLAuthenticatedRead = "authenticated-read" + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value + ObjectCannedACLAwsExecRead = "aws-exec-read" + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +// ObjectCannedACL_Values returns all elements of the ObjectCannedACL enum +func ObjectCannedACL_Values() []string { + return []string{ + ObjectCannedACLPrivate, + ObjectCannedACLPublicRead, + ObjectCannedACLPublicReadWrite, + ObjectCannedACLAuthenticatedRead, + ObjectCannedACLAwsExecRead, + ObjectCannedACLBucketOwnerRead, + ObjectCannedACLBucketOwnerFullControl, + } +} + +const ( + // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value + ObjectLockEnabledEnabled = "Enabled" +) + +// ObjectLockEnabled_Values returns all elements of the ObjectLockEnabled enum +func ObjectLockEnabled_Values() []string { + return []string{ + ObjectLockEnabledEnabled, + } +} + +const ( + // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOn = "ON" + + // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOff = "OFF" +) + +// ObjectLockLegalHoldStatus_Values returns all elements of the ObjectLockLegalHoldStatus enum +func ObjectLockLegalHoldStatus_Values() []string { + return []string{ + ObjectLockLegalHoldStatusOn, + ObjectLockLegalHoldStatusOff, + } +} + +const ( + // ObjectLockModeGovernance is a ObjectLockMode enum value + ObjectLockModeGovernance = "GOVERNANCE" + + // ObjectLockModeCompliance is a ObjectLockMode enum value + ObjectLockModeCompliance = "COMPLIANCE" +) + +// ObjectLockMode_Values returns all elements of the ObjectLockMode enum +func ObjectLockMode_Values() []string { + return []string{ + ObjectLockModeGovernance, + ObjectLockModeCompliance, + } +} + +const ( + // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeGovernance = "GOVERNANCE" + + // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeCompliance = "COMPLIANCE" +) + +// ObjectLockRetentionMode_Values returns all elements of the ObjectLockRetentionMode enum +func ObjectLockRetentionMode_Values() []string { + return []string{ + ObjectLockRetentionModeGovernance, + ObjectLockRetentionModeCompliance, + } +} + +// The container element for object ownership for a bucket's ownership controls. +// +// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to +// the bucket owner if the objects are uploaded with the bucket-owner-full-control +// canned ACL. +// +// ObjectWriter - The uploading account will own the object if the object is +// uploaded with the bucket-owner-full-control canned ACL. +// +// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer +// affect permissions. The bucket owner automatically owns and has full control +// over every object in the bucket. The bucket only accepts PUT requests that +// don't specify an ACL or specify bucket owner full control ACLs (such as the +// predefined bucket-owner-full-control canned ACL or a custom ACL in XML format +// that grants the same permissions). +// +// By default, ObjectOwnership is set to BucketOwnerEnforced and ACLs are disabled. +// We recommend keeping ACLs disabled, except in uncommon use cases where you +// must control access for each object individually. For more information about +// S3 Object Ownership, see Controlling ownership of objects and disabling ACLs +// for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. Directory buckets +// use the bucket owner enforced setting for S3 Object Ownership. +const ( + // ObjectOwnershipBucketOwnerPreferred is a ObjectOwnership enum value + ObjectOwnershipBucketOwnerPreferred = "BucketOwnerPreferred" + + // ObjectOwnershipObjectWriter is a ObjectOwnership enum value + ObjectOwnershipObjectWriter = "ObjectWriter" + + // ObjectOwnershipBucketOwnerEnforced is a ObjectOwnership enum value + ObjectOwnershipBucketOwnerEnforced = "BucketOwnerEnforced" +) + +// ObjectOwnership_Values returns all elements of the ObjectOwnership enum +func ObjectOwnership_Values() []string { + return []string{ + ObjectOwnershipBucketOwnerPreferred, + ObjectOwnershipObjectWriter, + ObjectOwnershipBucketOwnerEnforced, + } +} + +const ( + // ObjectStorageClassStandard is a ObjectStorageClass enum value + ObjectStorageClassStandard = "STANDARD" + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value + ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value + ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" + + // ObjectStorageClassOutposts is a ObjectStorageClass enum value + ObjectStorageClassOutposts = "OUTPOSTS" + + // ObjectStorageClassGlacierIr is a ObjectStorageClass enum value + ObjectStorageClassGlacierIr = "GLACIER_IR" + + // ObjectStorageClassSnow is a ObjectStorageClass enum value + ObjectStorageClassSnow = "SNOW" + + // ObjectStorageClassExpressOnezone is a ObjectStorageClass enum value + ObjectStorageClassExpressOnezone = "EXPRESS_ONEZONE" +) + +// ObjectStorageClass_Values returns all elements of the ObjectStorageClass enum +func ObjectStorageClass_Values() []string { + return []string{ + ObjectStorageClassStandard, + ObjectStorageClassReducedRedundancy, + ObjectStorageClassGlacier, + ObjectStorageClassStandardIa, + ObjectStorageClassOnezoneIa, + ObjectStorageClassIntelligentTiering, + ObjectStorageClassDeepArchive, + ObjectStorageClassOutposts, + ObjectStorageClassGlacierIr, + ObjectStorageClassSnow, + ObjectStorageClassExpressOnezone, + } +} + +const ( + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value + ObjectVersionStorageClassStandard = "STANDARD" +) + +// ObjectVersionStorageClass_Values returns all elements of the ObjectVersionStorageClass enum +func ObjectVersionStorageClass_Values() []string { + return []string{ + ObjectVersionStorageClassStandard, + } +} + +const ( + // OptionalObjectAttributesRestoreStatus is a OptionalObjectAttributes enum value + OptionalObjectAttributesRestoreStatus = "RestoreStatus" +) + +// OptionalObjectAttributes_Values returns all elements of the OptionalObjectAttributes enum +func OptionalObjectAttributes_Values() []string { + return []string{ + OptionalObjectAttributesRestoreStatus, + } +} + +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + +// OwnerOverride_Values returns all elements of the OwnerOverride enum +func OwnerOverride_Values() []string { + return []string{ + OwnerOverrideDestination, + } +} + +const ( + // PartitionDateSourceEventTime is a PartitionDateSource enum value + PartitionDateSourceEventTime = "EventTime" + + // PartitionDateSourceDeliveryTime is a PartitionDateSource enum value + PartitionDateSourceDeliveryTime = "DeliveryTime" +) + +// PartitionDateSource_Values returns all elements of the PartitionDateSource enum +func PartitionDateSource_Values() []string { + return []string{ + PartitionDateSourceEventTime, + PartitionDateSourceDeliveryTime, + } +} + +const ( + // PayerRequester is a Payer enum value + PayerRequester = "Requester" + + // PayerBucketOwner is a Payer enum value + PayerBucketOwner = "BucketOwner" +) + +// Payer_Values returns all elements of the Payer enum +func Payer_Values() []string { + return []string{ + PayerRequester, + PayerBucketOwner, + } +} + +const ( + // PermissionFullControl is a Permission enum value + PermissionFullControl = "FULL_CONTROL" + + // PermissionWrite is a Permission enum value + PermissionWrite = "WRITE" + + // PermissionWriteAcp is a Permission enum value + PermissionWriteAcp = "WRITE_ACP" + + // PermissionRead is a Permission enum value + PermissionRead = "READ" + + // PermissionReadAcp is a Permission enum value + PermissionReadAcp = "READ_ACP" +) + +// Permission_Values returns all elements of the Permission enum +func Permission_Values() []string { + return []string{ + PermissionFullControl, + PermissionWrite, + PermissionWriteAcp, + PermissionRead, + PermissionReadAcp, + } +} + +const ( + // ProtocolHttp is a Protocol enum value + ProtocolHttp = "http" + + // ProtocolHttps is a Protocol enum value + ProtocolHttps = "https" +) + +// Protocol_Values returns all elements of the Protocol enum +func Protocol_Values() []string { + return []string{ + ProtocolHttp, + ProtocolHttps, + } +} + +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + +// QuoteFields_Values returns all elements of the QuoteFields enum +func QuoteFields_Values() []string { + return []string{ + QuoteFieldsAlways, + QuoteFieldsAsneeded, + } +} + +const ( + // ReplicaModificationsStatusEnabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusEnabled = "Enabled" + + // ReplicaModificationsStatusDisabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusDisabled = "Disabled" +) + +// ReplicaModificationsStatus_Values returns all elements of the ReplicaModificationsStatus enum +func ReplicaModificationsStatus_Values() []string { + return []string{ + ReplicaModificationsStatusEnabled, + ReplicaModificationsStatusDisabled, + } +} + +const ( + // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusEnabled = "Enabled" + + // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusDisabled = "Disabled" +) + +// ReplicationRuleStatus_Values returns all elements of the ReplicationRuleStatus enum +func ReplicationRuleStatus_Values() []string { + return []string{ + ReplicationRuleStatusEnabled, + ReplicationRuleStatusDisabled, + } +} + +const ( + // ReplicationStatusComplete is a ReplicationStatus enum value + ReplicationStatusComplete = "COMPLETE" + + // ReplicationStatusPending is a ReplicationStatus enum value + ReplicationStatusPending = "PENDING" + + // ReplicationStatusFailed is a ReplicationStatus enum value + ReplicationStatusFailed = "FAILED" + + // ReplicationStatusReplica is a ReplicationStatus enum value + ReplicationStatusReplica = "REPLICA" + + // ReplicationStatusCompleted is a ReplicationStatus enum value + ReplicationStatusCompleted = "COMPLETED" +) + +// ReplicationStatus_Values returns all elements of the ReplicationStatus enum +func ReplicationStatus_Values() []string { + return []string{ + ReplicationStatusComplete, + ReplicationStatusPending, + ReplicationStatusFailed, + ReplicationStatusReplica, + ReplicationStatusCompleted, + } +} + +const ( + // ReplicationTimeStatusEnabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusEnabled = "Enabled" + + // ReplicationTimeStatusDisabled is a ReplicationTimeStatus enum value + ReplicationTimeStatusDisabled = "Disabled" +) + +// ReplicationTimeStatus_Values returns all elements of the ReplicationTimeStatus enum +func ReplicationTimeStatus_Values() []string { + return []string{ + ReplicationTimeStatusEnabled, + ReplicationTimeStatusDisabled, + } +} + +// If present, indicates that the requester was successfully charged for the +// request. +// +// This functionality is not supported for directory buckets. +const ( + // RequestChargedRequester is a RequestCharged enum value + RequestChargedRequester = "requester" +) + +// RequestCharged_Values returns all elements of the RequestCharged enum +func RequestCharged_Values() []string { + return []string{ + RequestChargedRequester, + } +} + +// Confirms that the requester knows that they will be charged for the request. +// Bucket owners need not specify this parameter in their requests. If either +// the source or destination S3 bucket has Requester Pays enabled, the requester +// will pay for corresponding charges to copy the object. For information about +// downloading objects from Requester Pays buckets, see Downloading Objects +// in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) +// in the Amazon S3 User Guide. +// +// This functionality is not supported for directory buckets. +const ( + // RequestPayerRequester is a RequestPayer enum value + RequestPayerRequester = "requester" +) + +// RequestPayer_Values returns all elements of the RequestPayer enum +func RequestPayer_Values() []string { + return []string{ + RequestPayerRequester, + } +} + +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + +// RestoreRequestType_Values returns all elements of the RestoreRequestType enum +func RestoreRequestType_Values() []string { + return []string{ + RestoreRequestTypeSelect, + } +} + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value + ServerSideEncryptionAes256 = "AES256" + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value + ServerSideEncryptionAwsKms = "aws:kms" + + // ServerSideEncryptionAwsKmsDsse is a ServerSideEncryption enum value + ServerSideEncryptionAwsKmsDsse = "aws:kms:dsse" +) + +// ServerSideEncryption_Values returns all elements of the ServerSideEncryption enum +func ServerSideEncryption_Values() []string { + return []string{ + ServerSideEncryptionAes256, + ServerSideEncryptionAwsKms, + ServerSideEncryptionAwsKmsDsse, + } +} + +const ( + // SessionModeReadOnly is a SessionMode enum value + SessionModeReadOnly = "ReadOnly" + + // SessionModeReadWrite is a SessionMode enum value + SessionModeReadWrite = "ReadWrite" +) + +// SessionMode_Values returns all elements of the SessionMode enum +func SessionMode_Values() []string { + return []string{ + SessionModeReadOnly, + SessionModeReadWrite, + } +} + +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + +// SseKmsEncryptedObjectsStatus_Values returns all elements of the SseKmsEncryptedObjectsStatus enum +func SseKmsEncryptedObjectsStatus_Values() []string { + return []string{ + SseKmsEncryptedObjectsStatusEnabled, + SseKmsEncryptedObjectsStatusDisabled, + } +} + +const ( + // StorageClassStandard is a StorageClass enum value + StorageClassStandard = "STANDARD" + + // StorageClassReducedRedundancy is a StorageClass enum value + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // StorageClassStandardIa is a StorageClass enum value + StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" + + // StorageClassDeepArchive is a StorageClass enum value + StorageClassDeepArchive = "DEEP_ARCHIVE" + + // StorageClassOutposts is a StorageClass enum value + StorageClassOutposts = "OUTPOSTS" + + // StorageClassGlacierIr is a StorageClass enum value + StorageClassGlacierIr = "GLACIER_IR" + + // StorageClassSnow is a StorageClass enum value + StorageClassSnow = "SNOW" + + // StorageClassExpressOnezone is a StorageClass enum value + StorageClassExpressOnezone = "EXPRESS_ONEZONE" +) + +// StorageClass_Values returns all elements of the StorageClass enum +func StorageClass_Values() []string { + return []string{ + StorageClassStandard, + StorageClassReducedRedundancy, + StorageClassStandardIa, + StorageClassOnezoneIa, + StorageClassIntelligentTiering, + StorageClassGlacier, + StorageClassDeepArchive, + StorageClassOutposts, + StorageClassGlacierIr, + StorageClassSnow, + StorageClassExpressOnezone, + } +} + +const ( + // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value + StorageClassAnalysisSchemaVersionV1 = "V_1" +) + +// StorageClassAnalysisSchemaVersion_Values returns all elements of the StorageClassAnalysisSchemaVersion enum +func StorageClassAnalysisSchemaVersion_Values() []string { + return []string{ + StorageClassAnalysisSchemaVersionV1, + } +} + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +// TaggingDirective_Values returns all elements of the TaggingDirective enum +func TaggingDirective_Values() []string { + return []string{ + TaggingDirectiveCopy, + TaggingDirectiveReplace, + } +} + +const ( + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +// Tier_Values returns all elements of the Tier enum +func Tier_Values() []string { + return []string{ + TierStandard, + TierBulk, + TierExpedited, + } +} + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" + + // TransitionStorageClassGlacierIr is a TransitionStorageClass enum value + TransitionStorageClassGlacierIr = "GLACIER_IR" +) + +// TransitionStorageClass_Values returns all elements of the TransitionStorageClass enum +func TransitionStorageClass_Values() []string { + return []string{ + TransitionStorageClassGlacier, + TransitionStorageClassStandardIa, + TransitionStorageClassOnezoneIa, + TransitionStorageClassIntelligentTiering, + TransitionStorageClassDeepArchive, + TransitionStorageClassGlacierIr, + } +} + +const ( + // TypeCanonicalUser is a Type enum value + TypeCanonicalUser = "CanonicalUser" + + // TypeAmazonCustomerByEmail is a Type enum value + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + + // TypeGroup is a Type enum value + TypeGroup = "Group" +) + +// Type_Values returns all elements of the Type enum +func Type_Values() []string { + return []string{ + TypeCanonicalUser, + TypeAmazonCustomerByEmail, + TypeGroup, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 00000000000..407f06b6ede --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,202 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := aws.CopySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 00000000000..20828387ea2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,107 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = aws.String(loc) + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 00000000000..229606b708a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,89 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/internal/s3shared/s3err" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + if c.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateUnset { + if aws.BoolValue(c.Config.UseDualStack) { + c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateEnabled + } else { + c.Config.UseDualStackEndpoint = endpoints.DualStackEndpointStateDisabled + } + } + + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(endpointHandler) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) +} + +func defaultInitRequestFn(r *request.Request) { + // Add request handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarshalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) + case opWriteGetObjectResponse: + r.Handlers.Build.PushFront(buildWriteGetObjectResponseEndpoint) + } +} + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} + +// endpointARNGetter is an accessor interface to grab the +// the field corresponding to an endpoint ARN input. +type endpointARNGetter interface { + getEndpointARN() (arn.Resource, error) + hasEndpointARN() bool +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 00000000000..c148f757ee1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// # Using the Client +// +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 00000000000..2e8244f8f64 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,109 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// # Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Download concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to download file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// # Automatic URI cleaning +// +// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname) +// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct +// used by the service client. +// +// svc := s3.New(sess, &aws.Config{ +// DisableRestProtocolURICleaning: aws.Bool(true), +// }) +// out, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String("bucketname"), +// Key: aws.String("//foo//bar//moo"), +// }) +// +// # Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// # S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go new file mode 100644 index 00000000000..71b43869264 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint.go @@ -0,0 +1,298 @@ +package s3 + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + awsarn "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" +) + +const ( + s3Namespace = "s3" + s3AccessPointNamespace = "s3-accesspoint" + s3ObjectsLambdaNamespace = "s3-object-lambda" + s3OutpostsNamespace = "s3-outposts" +) + +// Used by shapes with members decorated as endpoint ARN. +func parseEndpointARN(v string) (arn.Resource, error) { + return arn.ParseResource(v, accessPointResourceParser) +} + +func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) { + resParts := arn.SplitResource(a.Resource) + switch resParts[0] { + case "accesspoint": + switch a.Service { + case s3Namespace: + return arn.ParseAccessPointResource(a, resParts[1:]) + case s3ObjectsLambdaNamespace: + return parseS3ObjectLambdaAccessPointResource(a, resParts) + default: + return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s or %s", s3Namespace, s3ObjectsLambdaNamespace)} + } + case "outpost": + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + return parseOutpostAccessPointResource(a, resParts[1:]) + default: + return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"} + } +} + +// parseOutpostAccessPointResource attempts to parse the ARNs resource as an +// outpost access-point resource. +// +// Supported Outpost AccessPoint ARN format: +// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName} +// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint +func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) { + // outpost accesspoint arn is only valid if service is s3-outposts + if a.Service != "s3-outposts" { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"} + } + + if len(resParts) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + if len(resParts) < 3 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ + ARN: a, Reason: "access-point resource not set in Outpost ARN", + } + } + + resID := strings.TrimSpace(resParts[0]) + if len(resID) == 0 { + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"} + } + + var outpostAccessPointARN = arn.OutpostAccessPointARN{} + switch resParts[1] { + case "accesspoint": + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:]) + if err != nil { + return arn.OutpostAccessPointARN{}, err + } + // set access-point arn + outpostAccessPointARN.AccessPointARN = accessPointARN + default: + return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"} + } + + // set outpost id + outpostAccessPointARN.OutpostID = resID + return outpostAccessPointARN, nil +} + +func parseS3ObjectLambdaAccessPointResource(a awsarn.ARN, resParts []string) (arn.S3ObjectLambdaAccessPointARN, error) { + if a.Service != s3ObjectsLambdaNamespace { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("service is not %s", s3ObjectsLambdaNamespace)} + } + + accessPointARN, err := arn.ParseAccessPointResource(a, resParts[1:]) + if err != nil { + return arn.S3ObjectLambdaAccessPointARN{}, err + } + + if len(accessPointARN.Region) == 0 { + return arn.S3ObjectLambdaAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: fmt.Sprintf("%s region not set", s3ObjectsLambdaNamespace)} + } + + return arn.S3ObjectLambdaAccessPointARN{ + AccessPointARN: accessPointARN, + }, nil +} + +func endpointHandler(req *request.Request) { + endpoint, ok := req.Params.(endpointARNGetter) + if !ok || !endpoint.hasEndpointARN() { + updateBucketEndpointFromParams(req) + return + } + + resource, err := endpoint.getEndpointARN() + if err != nil { + req.Error = s3shared.NewInvalidARNError(nil, err) + return + } + + resReq := s3shared.ResourceRequest{ + Resource: resource, + Request: req, + } + + if len(resReq.Request.ClientInfo.PartitionID) != 0 && resReq.IsCrossPartition() { + req.Error = s3shared.NewClientPartitionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() { + req.Error = s3shared.NewClientRegionMismatchError(resource, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + return + } + + switch tv := resource.(type) { + case arn.AccessPointARN: + err = updateRequestAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.S3ObjectLambdaAccessPointARN: + err = updateRequestS3ObjectLambdaAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + case arn.OutpostAccessPointARN: + // outposts does not support FIPS regions + if req.Config.UseFIPSEndpoint == endpoints.FIPSEndpointStateEnabled { + req.Error = s3shared.NewFIPSConfigurationError(resource, req.ClientInfo.PartitionID, + aws.StringValue(req.Config.Region), nil) + return + } + + err = updateRequestOutpostAccessPointEndpoint(req, tv) + if err != nil { + req.Error = err + } + default: + req.Error = s3shared.NewInvalidARNError(resource, nil) + } +} + +func updateBucketEndpointFromParams(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucket name was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + updateEndpointForS3Config(r, bucket) +} + +func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func updateRequestS3ObjectLambdaAccessPointEndpoint(req *request.Request, accessPoint arn.S3ObjectLambdaAccessPointARN) error { + // DualStack not supported + if isUseDualStackEndpoint(req) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := s3ObjectLambdaAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + + return nil +} + +func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error { + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + return s3shared.NewClientConfiguredForAccelerateError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Dualstack not supported + if isUseDualStackEndpoint(req) { + return s3shared.NewClientConfiguredForDualStackError(accessPoint, + req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil) + } + + // Ignore the disable host prefix for access points + req.Config.DisableEndpointHostPrefix = aws.Bool(false) + + if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil { + return err + } + + removeBucketFromPath(req.HTTPRequest.URL) + return nil +} + +func removeBucketFromPath(u *url.URL) { + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} + +func buildWriteGetObjectResponseEndpoint(req *request.Request) { + // DualStack not supported + if isUseDualStackEndpoint(req) { + req.Error = awserr.New("ConfigurationError", "client configured for dualstack but not supported for operation", nil) + return + } + + // Accelerate not supported + if aws.BoolValue(req.Config.S3UseAccelerate) { + req.Error = awserr.New("ConfigurationError", "client configured for accelerate but not supported for operation", nil) + return + } + + signingName := s3ObjectsLambdaNamespace + signingRegion := req.ClientInfo.SigningRegion + + if !hasCustomEndpoint(req) { + endpoint, err := resolveRegionalEndpoint(req, aws.StringValue(req.Config.Region), req.ClientInfo.ResolvedRegion, EndpointsID) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed to resolve endpoint", err) + return + } + signingRegion = endpoint.SigningRegion + + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + req.Error = err + return + } + updateS3HostPrefixForS3ObjectLambda(req) + } + + redirectSigner(req, signingName, signingRegion) +} + +func isUseDualStackEndpoint(req *request.Request) bool { + if req.Config.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset { + return req.Config.UseDualStackEndpoint == endpoints.DualStackEndpointStateEnabled + } + return aws.BoolValue(req.Config.UseDualStack) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go new file mode 100644 index 00000000000..7ae18ef5481 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/endpoint_builder.go @@ -0,0 +1,239 @@ +package s3 + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3shared" + "github.com/aws/aws-sdk-go/internal/s3shared/arn" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + accessPointPrefixLabel = "accesspoint" + accountIDPrefixLabel = "accountID" + accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}." + + outpostPrefixLabel = "outpost" + outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}." +) + +// hasCustomEndpoint returns true if endpoint is a custom endpoint +func hasCustomEndpoint(r *request.Request) bool { + return len(aws.StringValue(r.Config.Endpoint)) > 0 +} + +// accessPointEndpointBuilder represents the endpoint builder for access point arn +type accessPointEndpointBuilder arn.AccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3" as signing name. +func (a accessPointEndpointBuilder) build(req *request.Request) error { + resolveService := arn.AccessPointARN(a).Service + resolveRegion := arn.AccessPointARN(a).Region + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", resolveService) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a), + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + // dual stack provided by endpoint resolver + updateS3HostForS3AccessPoint(req) + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err) + } + + return nil +} + +func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.AccessPointARN(a).AccountID, + } +} + +// s3ObjectLambdaAccessPointEndpointBuilder represents the endpoint builder for an s3 object lambda access point arn +type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN + +// build builds the endpoint for corresponding access point arn +// +// For building an endpoint from access point arn, format used is: +// - Access point endpoint format : {accesspointName}-{accountId}.s3-object-lambda.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com +// +// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name. +func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", EndpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(arn.S3ObjectLambdaAccessPointARN(a), + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + endpoint.SigningName = s3ObjectsLambdaNamespace + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + + updateS3HostPrefixForS3ObjectLambda(req) + } + + protocol.HostPrefixBuilder{ + Prefix: accessPointPrefixTemplate, + LabelsFn: a.hostPrefixLabelValues, + }.Build(req) + + // signer redirection + redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(arn.S3ObjectLambdaAccessPointARN(a), err) + } + + return nil +} + +func (a s3ObjectLambdaAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccessPointName, + accountIDPrefixLabel: arn.S3ObjectLambdaAccessPointARN(a).AccountID, + } +} + +// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn. +type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN + +// build builds an endpoint corresponding to the outpost access point arn. +// +// For building an endpoint from outpost access point arn, format used is: +// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix} +// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com +// +// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name. +func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error { + resolveRegion := o.Region + resolveService := o.Service + + endpointsID := resolveService + if resolveService == s3OutpostsNamespace { + endpointsID = "s3" + } + + endpoint, err := resolveRegionalEndpoint(req, resolveRegion, "", endpointsID) + if err != nil { + return s3shared.NewFailedToResolveEndpointError(o, + req.ClientInfo.PartitionID, resolveRegion, err) + } + + endpoint.URL = endpoints.AddScheme(endpoint.URL, aws.BoolValue(req.Config.DisableSSL)) + + if !hasCustomEndpoint(req) { + if err = updateRequestEndpoint(req, endpoint.URL); err != nil { + return err + } + updateHostPrefix(req, endpointsID, resolveService) + } + + protocol.HostPrefixBuilder{ + Prefix: outpostAccessPointPrefixTemplate, + LabelsFn: o.hostPrefixLabelValues, + }.Build(req) + + // set the signing region, name to resolved names from ARN + redirectSigner(req, resolveService, resolveRegion) + + err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host) + if err != nil { + return s3shared.NewInvalidARNError(o, err) + } + + return nil +} + +func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string { + return map[string]string{ + accessPointPrefixLabel: o.AccessPointName, + accountIDPrefixLabel: o.AccountID, + outpostPrefixLabel: o.OutpostID, + } +} + +func resolveRegionalEndpoint(r *request.Request, region, resolvedRegion, endpointsID string) (endpoints.ResolvedEndpoint, error) { + return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) { + opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL) + opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack) + opts.UseDualStackEndpoint = r.Config.UseDualStackEndpoint + opts.UseFIPSEndpoint = r.Config.UseFIPSEndpoint + opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint + opts.ResolvedRegion = resolvedRegion + opts.Logger = r.Config.Logger + opts.LogDeprecated = r.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) + }) +} + +func updateRequestEndpoint(r *request.Request, endpoint string) (err error) { + r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath) + if err != nil { + return awserr.New(request.ErrCodeSerialization, + "failed to parse endpoint URL", err) + } + + return nil +} + +// redirectSigner sets signing name, signing region for a request +func redirectSigner(req *request.Request, signingName string, signingRegion string) { + req.ClientInfo.SigningName = signingName + req.ClientInfo.SigningRegion = signingRegion +} + +func updateS3HostForS3AccessPoint(req *request.Request) { + updateHostPrefix(req, "s3", s3AccessPointNamespace) +} + +func updateS3HostPrefixForS3ObjectLambda(req *request.Request) { + updateHostPrefix(req, "s3", s3ObjectsLambdaNamespace) +} + +func updateHostPrefix(req *request.Request, oldEndpointPrefix, newEndpointPrefix string) { + host := req.HTTPRequest.URL.Host + if strings.HasPrefix(host, oldEndpointPrefix) { + // replace service hostlabel oldEndpointPrefix to newEndpointPrefix + req.HTTPRequest.URL.Host = newEndpointPrefix + host[len(oldEndpointPrefix):] + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go new file mode 100644 index 00000000000..8a67333ab26 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,69 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + // + // The bucket you tried to create already exists, and you own it. Amazon S3 + // returns this error in all Amazon Web Services Regions except in the North + // Virginia Region. For legacy compatibility, if you re-create an existing bucket + // that you already own in the North Virginia Region, Amazon S3 returns 200 + // OK and resets the bucket access control lists (ACLs). + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeInvalidObjectState for service response error code + // "InvalidObjectState". + // + // Object is archived and inaccessible until restored. + // + // If the object you are retrieving is stored in the S3 Glacier Flexible Retrieval + // storage class, the S3 Glacier Deep Archive storage class, the S3 Intelligent-Tiering + // Archive Access tier, or the S3 Intelligent-Tiering Deep Archive Access tier, + // before you can retrieve the object you must first restore a copy using RestoreObject + // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). + // Otherwise, this operation returns an InvalidObjectState error. For information + // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) + // in the Amazon S3 User Guide. + ErrCodeInvalidObjectState = "InvalidObjectState" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This action is not allowed against this storage tier. + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY action is not in the active tier and is only + // stored in Amazon S3 Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 00000000000..81cdec1ae75 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,136 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request, bucketName string) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r, bucketName) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r, bucketName) + } +} + +func updateEndpointForHostStyle(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + +func updateEndpointForAccelerate(r *request.Request, bucketName string) { + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucketName) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucketName), + nil) + return + } + + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + + moveBucketToHost(r.HTTPRequest.URL, bucketName) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + removeBucketFromPath(u) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 00000000000..308b7d473e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,9 @@ +//go:build !go1.6 +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 00000000000..70feffab752 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,29 @@ +//go:build go1.6 +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-continue") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 00000000000..3e75d0e9427 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,108 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "s3" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2006-03-01", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + svc.Handlers.BuildStream.PushBackNamed(restxml.BuildHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go new file mode 100644 index 00000000000..57a0bd92ca3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -0,0 +1,84 @@ +package s3 + +import ( + "crypto/md5" + "encoding/base64" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) + +func validateSSERequiresSSL(r *request.Request) { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } +} + +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() + } + + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() + } + + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return + } + + // In backwards compatible, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 00000000000..096adc091dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,47 @@ +package s3 + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +func copyMultipartStatusOKUnmarshalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + // Note, some middleware later in the stack like restxml.Unmarshal expect a valid, non-closed Body + // even in case of an error, so we replace it with an empty Reader. + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewBuffer(nil)) + return + } + + body := bytes.NewReader(b) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, sdkio.SeekStart) + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == request.ErrCodeSerialization && + err.OrigErr() != io.EOF { + r.Error = nil + return + } + // if empty payload + if err.OrigErr() == io.EOF { + r.HTTPResponse.StatusCode = http.StatusInternalServerError + } else { + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 00000000000..6eecf669107 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,114 @@ +package s3 + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + // Bucket exists in a different region, and request needs + // to be made to the correct region. + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + msg := fmt.Sprintf( + "incorrect region, the bucket is not in '%s' region at endpoint '%s'", + aws.StringValue(r.Config.Region), + aws.StringValue(r.Config.Endpoint), + ) + if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { + msg += fmt.Sprintf(", bucket is in '%s' region", v) + } + r.Error = awserr.NewRequestFailure( + awserr.New("BucketRegionError", msg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Attempt to parse error from body if it is known + var errResp xmlErrorResponse + var err error + if r.HTTPResponse.StatusCode >= 200 && r.HTTPResponse.StatusCode < 300 { + err = s3unmarshalXMLError(&errResp, r.HTTPResponse.Body) + } else { + err = xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + } + + if err != nil { + var errorMsg string + if err == io.EOF { + errorMsg = "empty response payload" + } else { + errorMsg = "failed to unmarshal error message" + } + + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + errorMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Fallback to status code converted to message if still no error code + if len(errResp.Code) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText + } + + r.Error = awserr.NewRequestFailure( + awserr.New(errResp.Code, errResp.Message, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} + +// s3unmarshalXMLError is s3 specific xml error unmarshaler +// for 200 OK errors and response payloads. +// This function differs from the xmlUtil.UnmarshalXMLError +// func. It does not ignore the EOF error and passes it up. +// Related to bug fix for `s3 200 OK response with empty payload` +func s3unmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 00000000000..2596c694b50 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 301, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 403, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go new file mode 100644 index 00000000000..b8f590f71d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go @@ -0,0 +1,1367 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +const opGetRoleCredentials = "GetRoleCredentials" + +// GetRoleCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the GetRoleCredentials operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRoleCredentials for more information on using the GetRoleCredentials +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetRoleCredentialsRequest method. +// req, resp := client.GetRoleCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) { + op := &request.Operation{ + Name: opGetRoleCredentials, + HTTPMethod: "GET", + HTTPPath: "/federation/credentials", + } + + if input == nil { + input = &GetRoleCredentialsInput{} + } + + output = &GetRoleCredentialsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// GetRoleCredentials API operation for AWS Single Sign-On. +// +// Returns the STS short-term credentials for a given role name that is assigned +// to the user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation GetRoleCredentials for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// - ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials +func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + return out, req.Send() +} + +// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of +// the ability to pass a context and additional request options. +// +// See GetRoleCredentials for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) { + req, out := c.GetRoleCredentialsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAccountRoles = "ListAccountRoles" + +// ListAccountRolesRequest generates a "aws/request.Request" representing the +// client's request for the ListAccountRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccountRoles for more information on using the ListAccountRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListAccountRolesRequest method. +// req, resp := client.ListAccountRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) { + op := &request.Operation{ + Name: opListAccountRoles, + HTTPMethod: "GET", + HTTPPath: "/assignment/roles", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountRolesInput{} + } + + output = &ListAccountRolesOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccountRoles API operation for AWS Single Sign-On. +// +// Lists all roles that are assigned to the user for a given AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccountRoles for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// - ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles +func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + return out, req.Send() +} + +// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccountRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) { + req, out := c.ListAccountRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccountRoles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccountRoles operation. +// pageNum := 0 +// err := client.ListAccountRolesPages(params, +// func(page *sso.ListAccountRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error { + return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountRolesPagesWithContext same as ListAccountRolesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountRolesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountRolesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListAccounts = "ListAccounts" + +// ListAccountsRequest generates a "aws/request.Request" representing the +// client's request for the ListAccounts operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccounts for more information on using the ListAccounts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the ListAccountsRequest method. +// req, resp := client.ListAccountsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) { + op := &request.Operation{ + Name: opListAccounts, + HTTPMethod: "GET", + HTTPPath: "/assignment/accounts", + Paginator: &request.Paginator{ + InputTokens: []string{"nextToken"}, + OutputTokens: []string{"nextToken"}, + LimitToken: "maxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListAccountsInput{} + } + + output = &ListAccountsOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// ListAccounts API operation for AWS Single Sign-On. +// +// Lists all AWS accounts assigned to the user. These AWS accounts are assigned +// by the administrator of the account. For more information, see Assign User +// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers) +// in the IAM Identity Center User Guide. This operation returns a paginated +// response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation ListAccounts for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// - ResourceNotFoundException +// The specified resource doesn't exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts +func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + return out, req.Send() +} + +// ListAccountsWithContext is the same as ListAccounts with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccounts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) { + req, out := c.ListAccountsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountsPages iterates over the pages of a ListAccounts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccounts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccounts operation. +// pageNum := 0 +// err := client.ListAccountsPages(params, +// func(page *sso.ListAccountsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error { + return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountsPagesWithContext same as ListAccountsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opLogout = "Logout" + +// LogoutRequest generates a "aws/request.Request" representing the +// client's request for the Logout operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Logout for more information on using the Logout +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the LogoutRequest method. +// req, resp := client.LogoutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) { + op := &request.Operation{ + Name: opLogout, + HTTPMethod: "POST", + HTTPPath: "/logout", + } + + if input == nil { + input = &LogoutInput{} + } + + output = &LogoutOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// Logout API operation for AWS Single Sign-On. +// +// Removes the locally stored SSO tokens from the client-side cache and sends +// an API call to the IAM Identity Center service to invalidate the corresponding +// server-side IAM Identity Center sign in session. +// +// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM +// Identity Center sign in session is used to obtain an IAM session, as specified +// in the corresponding IAM Identity Center permission set. More specifically, +// IAM Identity Center assumes an IAM role in the target account on behalf of +// the user, and the corresponding temporary AWS credentials are returned to +// the client. +// +// After user logout, any existing IAM role sessions that were created by using +// IAM Identity Center permission sets continue based on the duration configured +// in the permission set. For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) +// in the IAM Identity Center User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Single Sign-On's +// API operation Logout for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - UnauthorizedException +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +// +// - TooManyRequestsException +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout +func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + return out, req.Send() +} + +// LogoutWithContext is the same as Logout with the addition of +// the ability to pass a context and additional request options. +// +// See Logout for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) { + req, out := c.LogoutRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Provides information about your AWS account. +type AccountInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account that is assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The display name of the AWS account that is assigned to the user. + AccountName *string `locationName:"accountName" type:"string"` + + // The email address of the AWS account that is assigned to the user. + EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccountInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccountInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *AccountInfo) SetAccountId(v string) *AccountInfo { + s.AccountId = &v + return s +} + +// SetAccountName sets the AccountName field's value. +func (s *AccountInfo) SetAccountName(v string) *AccountInfo { + s.AccountName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo { + s.EmailAddress = &v + return s +} + +type GetRoleCredentialsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by GetRoleCredentialsInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The friendly name of the role that is assigned to the user. + // + // RoleName is a required field + RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoleCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput { + s.RoleName = &v + return s +} + +type GetRoleCredentialsOutput struct { + _ struct{} `type:"structure"` + + // The credentials for the role that is assigned to the user. + RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetRoleCredentialsOutput) GoString() string { + return s.String() +} + +// SetRoleCredentials sets the RoleCredentials field's value. +func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput { + s.RoleCredentials = v + return s +} + +// Indicates that a problem occurred with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +type ListAccountRolesInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountRolesInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // The identifier for the AWS account that is assigned to the user. + // + // AccountId is a required field + AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"` + + // The number of items that clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // The page token from the previous response output when you request subsequent + // pages. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.AccountId == nil { + invalidParams.Add(request.NewErrParamRequired("AccountId")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput { + s.AccessToken = &v + return s +} + +// SetAccountId sets the AccountId field's value. +func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput { + s.AccountId = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput { + s.NextToken = &v + return s +} + +type ListAccountRolesOutput struct { + _ struct{} `type:"structure"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` + + // A paginated response with the list of roles and the next token if more results + // are available. + RoleList []*RoleInfo `locationName:"roleList" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountRolesOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput { + s.NextToken = &v + return s +} + +// SetRoleList sets the RoleList field's value. +func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput { + s.RoleList = v + return s +} + +type ListAccountsInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by ListAccountsInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` + + // This is the number of items clients can request per page. + MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"` + + // (Optional) When requesting subsequent pages, this is the page token from + // the previous response output. + NextToken *string `location:"querystring" locationName:"next_token" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + if s.MaxResults != nil && *s.MaxResults < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput { + s.AccessToken = &v + return s +} + +// SetMaxResults sets the MaxResults field's value. +func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput { + s.MaxResults = &v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput { + s.NextToken = &v + return s +} + +type ListAccountsOutput struct { + _ struct{} `type:"structure"` + + // A paginated response with the list of account information and the next token + // if more results are available. + AccountList []*AccountInfo `locationName:"accountList" type:"list"` + + // The page token client that is used to retrieve the list of accounts. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ListAccountsOutput) GoString() string { + return s.String() +} + +// SetAccountList sets the AccountList field's value. +func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput { + s.AccountList = v + return s +} + +// SetNextToken sets the NextToken field's value. +func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput { + s.NextToken = &v + return s +} + +type LogoutInput struct { + _ struct{} `type:"structure" nopayload:"true"` + + // The token issued by the CreateToken API call. For more information, see CreateToken + // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html) + // in the IAM Identity Center OIDC API Reference Guide. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by LogoutInput's + // String and GoString methods. + // + // AccessToken is a required field + AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LogoutInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LogoutInput"} + if s.AccessToken == nil { + invalidParams.Add(request.NewErrParamRequired("AccessToken")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessToken sets the AccessToken field's value. +func (s *LogoutInput) SetAccessToken(v string) *LogoutInput { + s.AccessToken = &v + return s +} + +type LogoutOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s LogoutOutput) GoString() string { + return s.String() +} + +// The specified resource doesn't exist. +type ResourceNotFoundException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ResourceNotFoundException) GoString() string { + return s.String() +} + +func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error { + return &ResourceNotFoundException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ResourceNotFoundException) Code() string { + return "ResourceNotFoundException" +} + +// Message returns the exception's message. +func (s *ResourceNotFoundException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ResourceNotFoundException) OrigErr() error { + return nil +} + +func (s *ResourceNotFoundException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ResourceNotFoundException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ResourceNotFoundException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Provides information about the role credentials that are assigned to the +// user. +type RoleCredentials struct { + _ struct{} `type:"structure"` + + // The identifier used for the temporary security credentials. For more information, + // see Using Temporary Security Credentials to Request Access to AWS Resources + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + AccessKeyId *string `locationName:"accessKeyId" type:"string"` + + // The date on which temporary security credentials expire. + Expiration *int64 `locationName:"expiration" type:"long"` + + // The key that is used to sign the request. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. + SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` + + // The token used for temporary credentials. For more information, see Using + // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html) + // in the AWS IAM User Guide. + // + // SessionToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RoleCredentials's + // String and GoString methods. + SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleCredentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleCredentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials { + s.SessionToken = &v + return s +} + +// Provides information about the role that is assigned to the user. +type RoleInfo struct { + _ struct{} `type:"structure"` + + // The identifier of the AWS account assigned to the user. + AccountId *string `locationName:"accountId" type:"string"` + + // The friendly name of the role that is assigned to the user. + RoleName *string `locationName:"roleName" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RoleInfo) GoString() string { + return s.String() +} + +// SetAccountId sets the AccountId field's value. +func (s *RoleInfo) SetAccountId(v string) *RoleInfo { + s.AccountId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *RoleInfo) SetRoleName(v string) *RoleInfo { + s.RoleName = &v + return s +} + +// Indicates that the request is being made too frequently and is more than +// what the server can handle. +type TooManyRequestsException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyRequestsException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s TooManyRequestsException) GoString() string { + return s.String() +} + +func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error { + return &TooManyRequestsException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *TooManyRequestsException) Code() string { + return "TooManyRequestsException" +} + +// Message returns the exception's message. +func (s *TooManyRequestsException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *TooManyRequestsException) OrigErr() error { + return nil +} + +func (s *TooManyRequestsException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *TooManyRequestsException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *TooManyRequestsException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the request is not authorized. This can happen due to an invalid +// access token in the request. +type UnauthorizedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedException(v protocol.ResponseMetadata) error { + return &UnauthorizedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedException) Code() string { + return "UnauthorizedException" +} + +// Message returns the exception's message. +func (s *UnauthorizedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedException) OrigErr() error { + return nil +} + +func (s *UnauthorizedException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go new file mode 100644 index 00000000000..15e61a32282 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go @@ -0,0 +1,45 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sso provides the client and types for making API +// requests to AWS Single Sign-On. +// +// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web +// service that makes it easy for you to assign user access to IAM Identity +// Center resources such as the AWS access portal. Users can get AWS account +// applications and roles assigned to them and get federated into the application. +// +// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces +// will continue to retain their original name for backward compatibility purposes. +// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed). +// +// This reference guide describes the IAM Identity Center Portal operations +// that you can call programatically and includes detailed information on data +// types and errors. +// +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs +// provide a convenient way to create programmatic access to IAM Identity Center +// and other AWS services. For more information about the AWS SDKs, including +// how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service. +// +// See sso package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/ +// +// # Using the Client +// +// To contact AWS Single Sign-On with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Single Sign-On client SSO for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New +package sso diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go new file mode 100644 index 00000000000..77a6792e352 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go @@ -0,0 +1,44 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that a problem occurred with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The specified resource doesn't exist. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTooManyRequestsException for service response error code + // "TooManyRequestsException". + // + // Indicates that the request is being made too frequently and is more than + // what the server can handle. + ErrCodeTooManyRequestsException = "TooManyRequestsException" + + // ErrCodeUnauthorizedException for service response error code + // "UnauthorizedException". + // + // Indicates that the request is not authorized. This can happen due to an invalid + // access token in the request. + ErrCodeUnauthorizedException = "UnauthorizedException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "InvalidRequestException": newErrorInvalidRequestException, + "ResourceNotFoundException": newErrorResourceNotFoundException, + "TooManyRequestsException": newErrorTooManyRequestsException, + "UnauthorizedException": newErrorUnauthorizedException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go new file mode 100644 index 00000000000..7094cfe4130 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sso + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSO provides the API operation methods for making requests to +// AWS Single Sign-On. See this package's package overview docs +// for details on the service. +// +// SSO methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSO struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO" // Name of service. + EndpointsID = "portal.sso" // ID to lookup a service endpoint with. + ServiceID = "SSO" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSO client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a SSO client from just a session. +// svc := sso.New(mySession) +// +// // Create a SSO client with additional configuration +// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "awsssoportal" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSO { + svc := &SSO{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSO operation and runs any +// custom request initialization. +func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go new file mode 100644 index 00000000000..818cab7cda9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go @@ -0,0 +1,86 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package ssoiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sso" +) + +// SSOAPI provides an interface to enable mocking the +// sso.SSO service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Single Sign-On. +// func myFunc(svc ssoiface.SSOAPI) bool { +// // Make svc.GetRoleCredentials request +// } +// +// func main() { +// sess := session.New() +// svc := sso.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSSOClient struct { +// ssoiface.SSOAPI +// } +// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSSOClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type SSOAPI interface { + GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error) + GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput) + + ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error) + ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error) + ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput) + + ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error + ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error + + ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error) + ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error) + ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput) + + ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error + ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error + + Logout(*sso.LogoutInput) (*sso.LogoutOutput, error) + LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error) + LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput) +} + +var _ SSOAPI = (*sso.SSO)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go new file mode 100644 index 00000000000..04f6c811b63 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go @@ -0,0 +1,2252 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" +) + +const opCreateToken = "CreateToken" + +// CreateTokenRequest generates a "aws/request.Request" representing the +// client's request for the CreateToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateToken for more information on using the CreateToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenRequest method. +// req, resp := client.CreateTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Request, output *CreateTokenOutput) { + op := &request.Operation{ + Name: opCreateToken, + HTTPMethod: "POST", + HTTPPath: "/token", + } + + if input == nil { + input = &CreateTokenInput{} + } + + output = &CreateTokenOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// CreateToken API operation for AWS SSO OIDC. +// +// Creates and returns access and refresh tokens for clients that are authenticated +// using client secrets. The access token can be used to fetch short-term credentials +// for the assigned AWS accounts or to access application APIs using bearer +// authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateToken for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken +func (c *SSOOIDC) CreateToken(input *CreateTokenInput) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + return out, req.Send() +} + +// CreateTokenWithContext is the same as CreateToken with the addition of +// the ability to pass a context and additional request options. +// +// See CreateToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInput, opts ...request.Option) (*CreateTokenOutput, error) { + req, out := c.CreateTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTokenWithIAM = "CreateTokenWithIAM" + +// CreateTokenWithIAMRequest generates a "aws/request.Request" representing the +// client's request for the CreateTokenWithIAM operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTokenWithIAM for more information on using the CreateTokenWithIAM +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the CreateTokenWithIAMRequest method. +// req, resp := client.CreateTokenWithIAMRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req *request.Request, output *CreateTokenWithIAMOutput) { + op := &request.Operation{ + Name: opCreateTokenWithIAM, + HTTPMethod: "POST", + HTTPPath: "/token?aws_iam=t", + } + + if input == nil { + input = &CreateTokenWithIAMInput{} + } + + output = &CreateTokenWithIAMOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateTokenWithIAM API operation for AWS SSO OIDC. +// +// Creates and returns access and refresh tokens for clients and applications +// that are authenticated using IAM entities. The access token can be used to +// fetch short-term credentials for the assigned AWS accounts or to access application +// APIs using bearer authentication. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation CreateTokenWithIAM for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - InvalidGrantException +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - UnsupportedGrantTypeException +// Indicates that the grant type in the request is not supported by the service. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - AuthorizationPendingException +// Indicates that a request to authorize a client with an access user session +// token is pending. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - AccessDeniedException +// You do not have sufficient access to perform this action. +// +// - ExpiredTokenException +// Indicates that the token issued by the service is expired and is no longer +// valid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// - InvalidRequestRegionException +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM +func (c *SSOOIDC) CreateTokenWithIAM(input *CreateTokenWithIAMInput) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + return out, req.Send() +} + +// CreateTokenWithIAMWithContext is the same as CreateTokenWithIAM with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTokenWithIAM for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) CreateTokenWithIAMWithContext(ctx aws.Context, input *CreateTokenWithIAMInput, opts ...request.Option) (*CreateTokenWithIAMOutput, error) { + req, out := c.CreateTokenWithIAMRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRegisterClient = "RegisterClient" + +// RegisterClientRequest generates a "aws/request.Request" representing the +// client's request for the RegisterClient operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RegisterClient for more information on using the RegisterClient +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the RegisterClientRequest method. +// req, resp := client.RegisterClientRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *request.Request, output *RegisterClientOutput) { + op := &request.Operation{ + Name: opRegisterClient, + HTTPMethod: "POST", + HTTPPath: "/client/register", + } + + if input == nil { + input = &RegisterClientInput{} + } + + output = &RegisterClientOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// RegisterClient API operation for AWS SSO OIDC. +// +// Registers a client with IAM Identity Center. This allows clients to initiate +// device authorization. The output should be persisted for reuse through many +// authentication requests. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation RegisterClient for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidScopeException +// Indicates that the scope provided in the request is invalid. +// +// - InvalidClientMetadataException +// Indicates that the client information sent in the request during registration +// is invalid. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient +func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + return out, req.Send() +} + +// RegisterClientWithContext is the same as RegisterClient with the addition of +// the ability to pass a context and additional request options. +// +// See RegisterClient for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) RegisterClientWithContext(ctx aws.Context, input *RegisterClientInput, opts ...request.Option) (*RegisterClientOutput, error) { + req, out := c.RegisterClientRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opStartDeviceAuthorization = "StartDeviceAuthorization" + +// StartDeviceAuthorizationRequest generates a "aws/request.Request" representing the +// client's request for the StartDeviceAuthorization operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See StartDeviceAuthorization for more information on using the StartDeviceAuthorization +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the StartDeviceAuthorizationRequest method. +// req, resp := client.StartDeviceAuthorizationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorizationRequest(input *StartDeviceAuthorizationInput) (req *request.Request, output *StartDeviceAuthorizationOutput) { + op := &request.Operation{ + Name: opStartDeviceAuthorization, + HTTPMethod: "POST", + HTTPPath: "/device_authorization", + } + + if input == nil { + input = &StartDeviceAuthorizationInput{} + } + + output = &StartDeviceAuthorizationOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// StartDeviceAuthorization API operation for AWS SSO OIDC. +// +// Initiates device authorization by requesting a pair of verification codes +// from the authorization service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS SSO OIDC's +// API operation StartDeviceAuthorization for usage and error information. +// +// Returned Error Types: +// +// - InvalidRequestException +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +// +// - InvalidClientException +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +// +// - UnauthorizedClientException +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +// +// - SlowDownException +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +// +// - InternalServerException +// Indicates that an error from the service occurred while trying to process +// a request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization +func (c *SSOOIDC) StartDeviceAuthorization(input *StartDeviceAuthorizationInput) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + return out, req.Send() +} + +// StartDeviceAuthorizationWithContext is the same as StartDeviceAuthorization with the addition of +// the ability to pass a context and additional request options. +// +// See StartDeviceAuthorization for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *SSOOIDC) StartDeviceAuthorizationWithContext(ctx aws.Context, input *StartDeviceAuthorizationInput, opts ...request.Option) (*StartDeviceAuthorizationOutput, error) { + req, out := c.StartDeviceAuthorizationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// You do not have sufficient access to perform this action. +type AccessDeniedException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be access_denied. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AccessDeniedException) GoString() string { + return s.String() +} + +func newErrorAccessDeniedException(v protocol.ResponseMetadata) error { + return &AccessDeniedException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AccessDeniedException) Code() string { + return "AccessDeniedException" +} + +// Message returns the exception's message. +func (s *AccessDeniedException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AccessDeniedException) OrigErr() error { + return nil +} + +func (s *AccessDeniedException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AccessDeniedException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AccessDeniedException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request to authorize a client with an access user session +// token is pending. +type AuthorizationPendingException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be authorization_pending. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AuthorizationPendingException) GoString() string { + return s.String() +} + +func newErrorAuthorizationPendingException(v protocol.ResponseMetadata) error { + return &AuthorizationPendingException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *AuthorizationPendingException) Code() string { + return "AuthorizationPendingException" +} + +// Message returns the exception's message. +func (s *AuthorizationPendingException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *AuthorizationPendingException) OrigErr() error { + return nil +} + +func (s *AuthorizationPendingException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *AuthorizationPendingException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *AuthorizationPendingException) RequestID() string { + return s.RespMetadata.RequestID +} + +type CreateTokenInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client or application. This value comes + // from the result of the RegisterClient API. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string generated for the client. This value should come from the + // persisted result of the RegisterClient API. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` + + // Used only when calling this API for the Authorization Code grant type. The + // short-term code is used to identify this authorization request. This grant + // type is currently unsupported for the CreateToken API. + Code *string `locationName:"code" type:"string"` + + // Used only when calling this API for the Device Code grant type. This short-term + // code is used to identify this authorization request. This comes from the + // result of the StartDeviceAuthorization API. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Supports the following OAuth grant types: Device Code and Refresh Token. + // Specify either of the following values, depending on the grant type that + // you want: + // + // * Device Code - urn:ietf:params:oauth:grant-type:device_code + // + // * Refresh Token - refresh_token + // + // For information about how to obtain the device code, see the StartDeviceAuthorization + // topic. + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If this value is + // not specified, IAM Identity Center authorizes all scopes that are configured + // for the client during the call to RegisterClient. + Scope []*string `locationName:"scope" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenInput) SetClientId(v string) *CreateTokenInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *CreateTokenInput) SetClientSecret(v string) *CreateTokenInput { + s.ClientSecret = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput { + s.Code = &v + return s +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput { + s.DeviceCode = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenInput) SetGrantType(v string) *CreateTokenInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenInput) SetRedirectUri(v string) *CreateTokenInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenInput) SetRefreshToken(v string) *CreateTokenInput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput { + s.Scope = v + return s +} + +type CreateTokenOutput struct { + _ struct{} `type:"structure"` + + // A bearer token to access AWS accounts and applications assigned to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // The idToken is not implemented or supported. For more information about the + // features and limitations of the current IAM Identity Center OIDC implementation, + // see Considerations for Using this Guide in the IAM Identity Center OIDC API + // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // A JSON Web Token (JWT) that identifies who is associated with the issued + // access token. + // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // Used to notify the client that the returned token is an access token. The + // supported token type is Bearer. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenOutput) SetAccessToken(v string) *CreateTokenOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenOutput) SetExpiresIn(v int64) *CreateTokenOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenOutput) SetIdToken(v string) *CreateTokenOutput { + s.IdToken = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenOutput) SetRefreshToken(v string) *CreateTokenOutput { + s.RefreshToken = &v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput { + s.TokenType = &v + return s +} + +type CreateTokenWithIAMInput struct { + _ struct{} `type:"structure"` + + // Used only when calling this API for the JWT Bearer grant type. This value + // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize + // a trusted token issuer, configure the JWT Bearer GrantOptions for the application. + // + // Assertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + Assertion *string `locationName:"assertion" type:"string" sensitive:"true"` + + // The unique identifier string for the client or application. This value is + // an application ARN that has OAuth grants configured. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // short-term code is used to identify this authorization request. The code + // is obtained through a redirect from IAM Identity Center to a redirect URI + // persisted in the Authorization Code GrantOptions for the application. + Code *string `locationName:"code" type:"string"` + + // Supports the following OAuth grant types: Authorization Code, Refresh Token, + // JWT Bearer, and Token Exchange. Specify one of the following values, depending + // on the grant type that you want: + // + // * Authorization Code - authorization_code + // + // * Refresh Token - refresh_token + // + // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer + // + // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange + // + // GrantType is a required field + GrantType *string `locationName:"grantType" type:"string" required:"true"` + + // Used only when calling this API for the Authorization Code grant type. This + // value specifies the location of the client or application that has registered + // to receive the authorization code. + RedirectUri *string `locationName:"redirectUri" type:"string"` + + // Used only when calling this API for the Refresh Token grant type. This token + // is used to refresh short-term tokens, such as the access token, that might + // expire. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that the requester can receive. The following + // values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + RequestedTokenType *string `locationName:"requestedTokenType" type:"string"` + + // The list of scopes for which authorization is requested. The access token + // that is issued is limited to the scopes that are granted. If the value is + // not specified, IAM Identity Center authorizes all scopes configured for the + // application, including the following default scopes: openid, aws, sts:identity_context. + Scope []*string `locationName:"scope" type:"list"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the subject of the exchange. The value of the subject token must + // be an access token issued by IAM Identity Center to a different client or + // application. The access token must have authorized scopes that indicate the + // requested application as a target audience. + // + // SubjectToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's + // String and GoString methods. + SubjectToken *string `locationName:"subjectToken" type:"string" sensitive:"true"` + + // Used only when calling this API for the Token Exchange grant type. This value + // specifies the type of token that is passed as the subject of the exchange. + // The following value is supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + SubjectTokenType *string `locationName:"subjectTokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTokenWithIAMInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTokenWithIAMInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.GrantType == nil { + invalidParams.Add(request.NewErrParamRequired("GrantType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssertion sets the Assertion field's value. +func (s *CreateTokenWithIAMInput) SetAssertion(v string) *CreateTokenWithIAMInput { + s.Assertion = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *CreateTokenWithIAMInput) SetClientId(v string) *CreateTokenWithIAMInput { + s.ClientId = &v + return s +} + +// SetCode sets the Code field's value. +func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput { + s.Code = &v + return s +} + +// SetGrantType sets the GrantType field's value. +func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput { + s.GrantType = &v + return s +} + +// SetRedirectUri sets the RedirectUri field's value. +func (s *CreateTokenWithIAMInput) SetRedirectUri(v string) *CreateTokenWithIAMInput { + s.RedirectUri = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMInput) SetRefreshToken(v string) *CreateTokenWithIAMInput { + s.RefreshToken = &v + return s +} + +// SetRequestedTokenType sets the RequestedTokenType field's value. +func (s *CreateTokenWithIAMInput) SetRequestedTokenType(v string) *CreateTokenWithIAMInput { + s.RequestedTokenType = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMInput) SetScope(v []*string) *CreateTokenWithIAMInput { + s.Scope = v + return s +} + +// SetSubjectToken sets the SubjectToken field's value. +func (s *CreateTokenWithIAMInput) SetSubjectToken(v string) *CreateTokenWithIAMInput { + s.SubjectToken = &v + return s +} + +// SetSubjectTokenType sets the SubjectTokenType field's value. +func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWithIAMInput { + s.SubjectTokenType = &v + return s +} + +type CreateTokenWithIAMOutput struct { + _ struct{} `type:"structure"` + + // A bearer token to access AWS accounts and applications assigned to a user. + // + // AccessToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"` + + // Indicates the time in seconds when an access token will expire. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // A JSON Web Token (JWT) that identifies the user associated with the issued + // access token. + // + // IdToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + IdToken *string `locationName:"idToken" type:"string" sensitive:"true"` + + // Indicates the type of tokens that are issued by IAM Identity Center. The + // following values are supported: + // + // * Access Token - urn:ietf:params:oauth:token-type:access_token + // + // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token + IssuedTokenType *string `locationName:"issuedTokenType" type:"string"` + + // A token that, if present, can be used to refresh a previously issued access + // token that might have expired. + // + // For more information about the features and limitations of the current IAM + // Identity Center OIDC implementation, see Considerations for Using this Guide + // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html). + // + // RefreshToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's + // String and GoString methods. + RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"` + + // The list of scopes for which authorization is granted. The access token that + // is issued is limited to the scopes that are granted. + Scope []*string `locationName:"scope" type:"list"` + + // Used to notify the requester that the returned token is an access token. + // The supported token type is Bearer. + TokenType *string `locationName:"tokenType" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s CreateTokenWithIAMOutput) GoString() string { + return s.String() +} + +// SetAccessToken sets the AccessToken field's value. +func (s *CreateTokenWithIAMOutput) SetAccessToken(v string) *CreateTokenWithIAMOutput { + s.AccessToken = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *CreateTokenWithIAMOutput) SetExpiresIn(v int64) *CreateTokenWithIAMOutput { + s.ExpiresIn = &v + return s +} + +// SetIdToken sets the IdToken field's value. +func (s *CreateTokenWithIAMOutput) SetIdToken(v string) *CreateTokenWithIAMOutput { + s.IdToken = &v + return s +} + +// SetIssuedTokenType sets the IssuedTokenType field's value. +func (s *CreateTokenWithIAMOutput) SetIssuedTokenType(v string) *CreateTokenWithIAMOutput { + s.IssuedTokenType = &v + return s +} + +// SetRefreshToken sets the RefreshToken field's value. +func (s *CreateTokenWithIAMOutput) SetRefreshToken(v string) *CreateTokenWithIAMOutput { + s.RefreshToken = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *CreateTokenWithIAMOutput) SetScope(v []*string) *CreateTokenWithIAMOutput { + s.Scope = v + return s +} + +// SetTokenType sets the TokenType field's value. +func (s *CreateTokenWithIAMOutput) SetTokenType(v string) *CreateTokenWithIAMOutput { + s.TokenType = &v + return s +} + +// Indicates that the token issued by the service is expired and is no longer +// valid. +type ExpiredTokenException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be expired_token. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ExpiredTokenException) GoString() string { + return s.String() +} + +func newErrorExpiredTokenException(v protocol.ResponseMetadata) error { + return &ExpiredTokenException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *ExpiredTokenException) Code() string { + return "ExpiredTokenException" +} + +// Message returns the exception's message. +func (s *ExpiredTokenException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *ExpiredTokenException) OrigErr() error { + return nil +} + +func (s *ExpiredTokenException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *ExpiredTokenException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *ExpiredTokenException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that an error from the service occurred while trying to process +// a request. +type InternalServerException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be server_error. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InternalServerException) GoString() string { + return s.String() +} + +func newErrorInternalServerException(v protocol.ResponseMetadata) error { + return &InternalServerException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InternalServerException) Code() string { + return "InternalServerException" +} + +// Message returns the exception's message. +func (s *InternalServerException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InternalServerException) OrigErr() error { + return nil +} + +func (s *InternalServerException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InternalServerException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InternalServerException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the clientId or clientSecret in the request is invalid. For +// example, this can occur when a client sends an incorrect clientId or an expired +// clientSecret. +type InvalidClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_client. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientException) GoString() string { + return s.String() +} + +func newErrorInvalidClientException(v protocol.ResponseMetadata) error { + return &InvalidClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientException) Code() string { + return "InvalidClientException" +} + +// Message returns the exception's message. +func (s *InvalidClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientException) OrigErr() error { + return nil +} + +func (s *InvalidClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the client information sent in the request during registration +// is invalid. +type InvalidClientMetadataException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_client_metadata. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidClientMetadataException) GoString() string { + return s.String() +} + +func newErrorInvalidClientMetadataException(v protocol.ResponseMetadata) error { + return &InvalidClientMetadataException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidClientMetadataException) Code() string { + return "InvalidClientMetadataException" +} + +// Message returns the exception's message. +func (s *InvalidClientMetadataException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidClientMetadataException) OrigErr() error { + return nil +} + +func (s *InvalidClientMetadataException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidClientMetadataException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidClientMetadataException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a request contains an invalid grant. This can occur if a client +// makes a CreateToken request with an invalid grant type. +type InvalidGrantException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_grant. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidGrantException) GoString() string { + return s.String() +} + +func newErrorInvalidGrantException(v protocol.ResponseMetadata) error { + return &InvalidGrantException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidGrantException) Code() string { + return "InvalidGrantException" +} + +// Message returns the exception's message. +func (s *InvalidGrantException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidGrantException) OrigErr() error { + return nil +} + +func (s *InvalidGrantException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidGrantException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidGrantException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that something is wrong with the input to the request. For example, +// a required parameter might be missing or out of range. +type InvalidRequestException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_request. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestException(v protocol.ResponseMetadata) error { + return &InvalidRequestException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestException) Code() string { + return "InvalidRequestException" +} + +// Message returns the exception's message. +func (s *InvalidRequestException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestException) OrigErr() error { + return nil +} + +func (s *InvalidRequestException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that a token provided as input to the request was issued by and +// is only usable by calling IAM Identity Center endpoints in another region. +type InvalidRequestRegionException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Indicates the IAM Identity Center endpoint which the requester may call with + // this token. + Endpoint *string `locationName:"endpoint" type:"string"` + + // Single error code. For this exception the value will be invalid_request. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` + + // Indicates the region which the requester may call with this token. + Region *string `locationName:"region" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidRequestRegionException) GoString() string { + return s.String() +} + +func newErrorInvalidRequestRegionException(v protocol.ResponseMetadata) error { + return &InvalidRequestRegionException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidRequestRegionException) Code() string { + return "InvalidRequestRegionException" +} + +// Message returns the exception's message. +func (s *InvalidRequestRegionException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidRequestRegionException) OrigErr() error { + return nil +} + +func (s *InvalidRequestRegionException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidRequestRegionException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidRequestRegionException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the scope provided in the request is invalid. +type InvalidScopeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be invalid_scope. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s InvalidScopeException) GoString() string { + return s.String() +} + +func newErrorInvalidScopeException(v protocol.ResponseMetadata) error { + return &InvalidScopeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *InvalidScopeException) Code() string { + return "InvalidScopeException" +} + +// Message returns the exception's message. +func (s *InvalidScopeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *InvalidScopeException) OrigErr() error { + return nil +} + +func (s *InvalidScopeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *InvalidScopeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *InvalidScopeException) RequestID() string { + return s.RespMetadata.RequestID +} + +type RegisterClientInput struct { + _ struct{} `type:"structure"` + + // The friendly name of the client. + // + // ClientName is a required field + ClientName *string `locationName:"clientName" type:"string" required:"true"` + + // The type of client. The service supports only public as a client type. Anything + // other than public will be rejected by the service. + // + // ClientType is a required field + ClientType *string `locationName:"clientType" type:"string" required:"true"` + + // The list of scopes that are defined by the client. Upon authorization, this + // list is used to restrict permissions when granting an access token. + Scopes []*string `locationName:"scopes" type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterClientInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterClientInput"} + if s.ClientName == nil { + invalidParams.Add(request.NewErrParamRequired("ClientName")) + } + if s.ClientType == nil { + invalidParams.Add(request.NewErrParamRequired("ClientType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientName sets the ClientName field's value. +func (s *RegisterClientInput) SetClientName(v string) *RegisterClientInput { + s.ClientName = &v + return s +} + +// SetClientType sets the ClientType field's value. +func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput { + s.ClientType = &v + return s +} + +// SetScopes sets the Scopes field's value. +func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput { + s.Scopes = v + return s +} + +type RegisterClientOutput struct { + _ struct{} `type:"structure"` + + // An endpoint that the client can use to request authorization. + AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"` + + // The unique identifier string for each client. This client uses this identifier + // to get authenticated by the service in subsequent calls. + ClientId *string `locationName:"clientId" type:"string"` + + // Indicates the time at which the clientId and clientSecret were issued. + ClientIdIssuedAt *int64 `locationName:"clientIdIssuedAt" type:"long"` + + // A secret string generated for the client. The client will use this string + // to get authenticated by the service in subsequent calls. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by RegisterClientOutput's + // String and GoString methods. + ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"` + + // Indicates the time at which the clientId and clientSecret will become invalid. + ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"` + + // An endpoint that the client can use to create tokens. + TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s RegisterClientOutput) GoString() string { + return s.String() +} + +// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value. +func (s *RegisterClientOutput) SetAuthorizationEndpoint(v string) *RegisterClientOutput { + s.AuthorizationEndpoint = &v + return s +} + +// SetClientId sets the ClientId field's value. +func (s *RegisterClientOutput) SetClientId(v string) *RegisterClientOutput { + s.ClientId = &v + return s +} + +// SetClientIdIssuedAt sets the ClientIdIssuedAt field's value. +func (s *RegisterClientOutput) SetClientIdIssuedAt(v int64) *RegisterClientOutput { + s.ClientIdIssuedAt = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *RegisterClientOutput) SetClientSecret(v string) *RegisterClientOutput { + s.ClientSecret = &v + return s +} + +// SetClientSecretExpiresAt sets the ClientSecretExpiresAt field's value. +func (s *RegisterClientOutput) SetClientSecretExpiresAt(v int64) *RegisterClientOutput { + s.ClientSecretExpiresAt = &v + return s +} + +// SetTokenEndpoint sets the TokenEndpoint field's value. +func (s *RegisterClientOutput) SetTokenEndpoint(v string) *RegisterClientOutput { + s.TokenEndpoint = &v + return s +} + +// Indicates that the client is making the request too frequently and is more +// than the service can handle. +type SlowDownException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be slow_down. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s SlowDownException) GoString() string { + return s.String() +} + +func newErrorSlowDownException(v protocol.ResponseMetadata) error { + return &SlowDownException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *SlowDownException) Code() string { + return "SlowDownException" +} + +// Message returns the exception's message. +func (s *SlowDownException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *SlowDownException) OrigErr() error { + return nil +} + +func (s *SlowDownException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *SlowDownException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *SlowDownException) RequestID() string { + return s.RespMetadata.RequestID +} + +type StartDeviceAuthorizationInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the client that is registered with IAM Identity + // Center. This value should come from the persisted result of the RegisterClient + // API operation. + // + // ClientId is a required field + ClientId *string `locationName:"clientId" type:"string" required:"true"` + + // A secret string that is generated for the client. This value should come + // from the persisted result of the RegisterClient API operation. + // + // ClientSecret is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by StartDeviceAuthorizationInput's + // String and GoString methods. + // + // ClientSecret is a required field + ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"` + + // The URL for the Amazon Web Services access portal. For more information, + // see Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html) + // in the IAM Identity Center User Guide. + // + // StartUrl is a required field + StartUrl *string `locationName:"startUrl" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StartDeviceAuthorizationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StartDeviceAuthorizationInput"} + if s.ClientId == nil { + invalidParams.Add(request.NewErrParamRequired("ClientId")) + } + if s.ClientSecret == nil { + invalidParams.Add(request.NewErrParamRequired("ClientSecret")) + } + if s.StartUrl == nil { + invalidParams.Add(request.NewErrParamRequired("StartUrl")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientId sets the ClientId field's value. +func (s *StartDeviceAuthorizationInput) SetClientId(v string) *StartDeviceAuthorizationInput { + s.ClientId = &v + return s +} + +// SetClientSecret sets the ClientSecret field's value. +func (s *StartDeviceAuthorizationInput) SetClientSecret(v string) *StartDeviceAuthorizationInput { + s.ClientSecret = &v + return s +} + +// SetStartUrl sets the StartUrl field's value. +func (s *StartDeviceAuthorizationInput) SetStartUrl(v string) *StartDeviceAuthorizationInput { + s.StartUrl = &v + return s +} + +type StartDeviceAuthorizationOutput struct { + _ struct{} `type:"structure"` + + // The short-lived code that is used by the device when polling for a session + // token. + DeviceCode *string `locationName:"deviceCode" type:"string"` + + // Indicates the number of seconds in which the verification code will become + // invalid. + ExpiresIn *int64 `locationName:"expiresIn" type:"integer"` + + // Indicates the number of seconds the client must wait between attempts when + // polling for a session. + Interval *int64 `locationName:"interval" type:"integer"` + + // A one-time user verification code. This is needed to authorize an in-use + // device. + UserCode *string `locationName:"userCode" type:"string"` + + // The URI of the verification page that takes the userCode to authorize the + // device. + VerificationUri *string `locationName:"verificationUri" type:"string"` + + // An alternate URL that the client can use to automatically launch a browser. + // This process skips the manual step in which the user visits the verification + // page and enters their code. + VerificationUriComplete *string `locationName:"verificationUriComplete" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s StartDeviceAuthorizationOutput) GoString() string { + return s.String() +} + +// SetDeviceCode sets the DeviceCode field's value. +func (s *StartDeviceAuthorizationOutput) SetDeviceCode(v string) *StartDeviceAuthorizationOutput { + s.DeviceCode = &v + return s +} + +// SetExpiresIn sets the ExpiresIn field's value. +func (s *StartDeviceAuthorizationOutput) SetExpiresIn(v int64) *StartDeviceAuthorizationOutput { + s.ExpiresIn = &v + return s +} + +// SetInterval sets the Interval field's value. +func (s *StartDeviceAuthorizationOutput) SetInterval(v int64) *StartDeviceAuthorizationOutput { + s.Interval = &v + return s +} + +// SetUserCode sets the UserCode field's value. +func (s *StartDeviceAuthorizationOutput) SetUserCode(v string) *StartDeviceAuthorizationOutput { + s.UserCode = &v + return s +} + +// SetVerificationUri sets the VerificationUri field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUri(v string) *StartDeviceAuthorizationOutput { + s.VerificationUri = &v + return s +} + +// SetVerificationUriComplete sets the VerificationUriComplete field's value. +func (s *StartDeviceAuthorizationOutput) SetVerificationUriComplete(v string) *StartDeviceAuthorizationOutput { + s.VerificationUriComplete = &v + return s +} + +// Indicates that the client is not currently authorized to make the request. +// This can happen when a clientId is not issued for a public client. +type UnauthorizedClientException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be unauthorized_client. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnauthorizedClientException) GoString() string { + return s.String() +} + +func newErrorUnauthorizedClientException(v protocol.ResponseMetadata) error { + return &UnauthorizedClientException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnauthorizedClientException) Code() string { + return "UnauthorizedClientException" +} + +// Message returns the exception's message. +func (s *UnauthorizedClientException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnauthorizedClientException) OrigErr() error { + return nil +} + +func (s *UnauthorizedClientException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnauthorizedClientException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnauthorizedClientException) RequestID() string { + return s.RespMetadata.RequestID +} + +// Indicates that the grant type in the request is not supported by the service. +type UnsupportedGrantTypeException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + // Single error code. For this exception the value will be unsupported_grant_type. + Error_ *string `locationName:"error" type:"string"` + + // Human-readable text providing additional information, used to assist the + // client developer in understanding the error that occurred. + Error_description *string `locationName:"error_description" type:"string"` + + Message_ *string `locationName:"message" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s UnsupportedGrantTypeException) GoString() string { + return s.String() +} + +func newErrorUnsupportedGrantTypeException(v protocol.ResponseMetadata) error { + return &UnsupportedGrantTypeException{ + RespMetadata: v, + } +} + +// Code returns the exception type name. +func (s *UnsupportedGrantTypeException) Code() string { + return "UnsupportedGrantTypeException" +} + +// Message returns the exception's message. +func (s *UnsupportedGrantTypeException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} + +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *UnsupportedGrantTypeException) OrigErr() error { + return nil +} + +func (s *UnsupportedGrantTypeException) Error() string { + return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *UnsupportedGrantTypeException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *UnsupportedGrantTypeException) RequestID() string { + return s.RespMetadata.RequestID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go new file mode 100644 index 00000000000..083568c616f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go @@ -0,0 +1,67 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package ssooidc provides the client and types for making API +// requests to AWS SSO OIDC. +// +// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a +// client (such as CLI or a native application) to register with IAM Identity +// Center. The service also enables the client to fetch the user’s access +// token upon successful authentication and authorization with IAM Identity +// Center. +// +// IAM Identity Center uses the sso and identitystore API namespaces. +// +// # Considerations for Using This Guide +// +// Before you begin using this guide, we recommend that you first review the +// following important information about how the IAM Identity Center OIDC service +// works. +// +// - The IAM Identity Center OIDC service currently implements only the portions +// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628 +// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single +// sign-on authentication with the CLI. +// +// - With older versions of the CLI, the service only emits OIDC access tokens, +// so to obtain a new token, users must explicitly re-authenticate. To access +// the OIDC flow that supports token refresh and doesn’t require re-authentication, +// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI +// V2) with support for OIDC token refresh and configurable IAM Identity +// Center session durations. For more information, see Configure Amazon Web +// Services access portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html). +// +// - The access tokens provided by this service grant access to all Amazon +// Web Services account entitlements assigned to an IAM Identity Center user, +// not just a particular application. +// +// - The documentation in this guide does not describe the mechanism to convert +// the access token into Amazon Web Services Auth (“sigv4”) credentials +// for use with IAM-protected Amazon Web Services service endpoints. For +// more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html) +// in the IAM Identity Center Portal API Reference Guide. +// +// For general information about IAM Identity Center, see What is IAM Identity +// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) +// in the IAM Identity Center User Guide. +// +// See https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10 for more information on this service. +// +// See ssooidc package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/ +// +// # Using the Client +// +// To contact AWS SSO OIDC with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS SSO OIDC client SSOOIDC for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/#New +package ssooidc diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go new file mode 100644 index 00000000000..e6242e4928d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go @@ -0,0 +1,115 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/private/protocol" +) + +const ( + + // ErrCodeAccessDeniedException for service response error code + // "AccessDeniedException". + // + // You do not have sufficient access to perform this action. + ErrCodeAccessDeniedException = "AccessDeniedException" + + // ErrCodeAuthorizationPendingException for service response error code + // "AuthorizationPendingException". + // + // Indicates that a request to authorize a client with an access user session + // token is pending. + ErrCodeAuthorizationPendingException = "AuthorizationPendingException" + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // Indicates that the token issued by the service is expired and is no longer + // valid. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeInternalServerException for service response error code + // "InternalServerException". + // + // Indicates that an error from the service occurred while trying to process + // a request. + ErrCodeInternalServerException = "InternalServerException" + + // ErrCodeInvalidClientException for service response error code + // "InvalidClientException". + // + // Indicates that the clientId or clientSecret in the request is invalid. For + // example, this can occur when a client sends an incorrect clientId or an expired + // clientSecret. + ErrCodeInvalidClientException = "InvalidClientException" + + // ErrCodeInvalidClientMetadataException for service response error code + // "InvalidClientMetadataException". + // + // Indicates that the client information sent in the request during registration + // is invalid. + ErrCodeInvalidClientMetadataException = "InvalidClientMetadataException" + + // ErrCodeInvalidGrantException for service response error code + // "InvalidGrantException". + // + // Indicates that a request contains an invalid grant. This can occur if a client + // makes a CreateToken request with an invalid grant type. + ErrCodeInvalidGrantException = "InvalidGrantException" + + // ErrCodeInvalidRequestException for service response error code + // "InvalidRequestException". + // + // Indicates that something is wrong with the input to the request. For example, + // a required parameter might be missing or out of range. + ErrCodeInvalidRequestException = "InvalidRequestException" + + // ErrCodeInvalidRequestRegionException for service response error code + // "InvalidRequestRegionException". + // + // Indicates that a token provided as input to the request was issued by and + // is only usable by calling IAM Identity Center endpoints in another region. + ErrCodeInvalidRequestRegionException = "InvalidRequestRegionException" + + // ErrCodeInvalidScopeException for service response error code + // "InvalidScopeException". + // + // Indicates that the scope provided in the request is invalid. + ErrCodeInvalidScopeException = "InvalidScopeException" + + // ErrCodeSlowDownException for service response error code + // "SlowDownException". + // + // Indicates that the client is making the request too frequently and is more + // than the service can handle. + ErrCodeSlowDownException = "SlowDownException" + + // ErrCodeUnauthorizedClientException for service response error code + // "UnauthorizedClientException". + // + // Indicates that the client is not currently authorized to make the request. + // This can happen when a clientId is not issued for a public client. + ErrCodeUnauthorizedClientException = "UnauthorizedClientException" + + // ErrCodeUnsupportedGrantTypeException for service response error code + // "UnsupportedGrantTypeException". + // + // Indicates that the grant type in the request is not supported by the service. + ErrCodeUnsupportedGrantTypeException = "UnsupportedGrantTypeException" +) + +var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ + "AccessDeniedException": newErrorAccessDeniedException, + "AuthorizationPendingException": newErrorAuthorizationPendingException, + "ExpiredTokenException": newErrorExpiredTokenException, + "InternalServerException": newErrorInternalServerException, + "InvalidClientException": newErrorInvalidClientException, + "InvalidClientMetadataException": newErrorInvalidClientMetadataException, + "InvalidGrantException": newErrorInvalidGrantException, + "InvalidRequestException": newErrorInvalidRequestException, + "InvalidRequestRegionException": newErrorInvalidRequestRegionException, + "InvalidScopeException": newErrorInvalidScopeException, + "SlowDownException": newErrorSlowDownException, + "UnauthorizedClientException": newErrorUnauthorizedClientException, + "UnsupportedGrantTypeException": newErrorUnsupportedGrantTypeException, +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go new file mode 100644 index 00000000000..782bae3692d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go @@ -0,0 +1,106 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package ssooidc + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restjson" +) + +// SSOOIDC provides the API operation methods for making requests to +// AWS SSO OIDC. See this package's package overview docs +// for details on the service. +// +// SSOOIDC methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type SSOOIDC struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "SSO OIDC" // Name of service. + EndpointsID = "oidc" // ID to lookup a service endpoint with. + ServiceID = "SSO OIDC" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the SSOOIDC client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a SSOOIDC client from just a session. +// svc := ssooidc.New(mySession) +// +// // Create a SSOOIDC client with additional configuration +// svc := ssooidc.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = "sso-oauth" + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSOOIDC { + svc := &SSOOIDC{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2019-06-10", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed( + protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(), + ) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SSOOIDC operation and runs any +// custom request initialization. +func (c *SSOOIDC) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 00000000000..2c395f5f673 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,3553 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials that you can use to access +// Amazon Web Services resources. These temporary credentials consist of an +// access key ID, a secret access key, and a security token. Typically, you +// use AssumeRole within your account or for cross-account access. For a comparison +// of AssumeRole with other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any Amazon Web Services service with the following exception: +// You cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// When you create a role, you create two policies: a role trust policy that +// specifies who can assume the role, and a permissions policy that specifies +// what can be done with the role. You specify the trusted principal that is +// allowed to assume the role in the role trust policy. +// +// To assume a role from a different account, your Amazon Web Services account +// must be trusted by the role. The trust relationship is defined in the role's +// trust policy when the role is created. That trust policy states which accounts +// are allowed to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the account administrator. The administrator must +// attach a policy that allows the user to call AssumeRole for the ARN of the +// role in the other account. +// +// To allow a user to assume a role in the same account, you can do either of +// the following: +// +// - Attach a policy to the user that allows the user to call AssumeRole +// (as long as the role's trust policy trusts the account). +// +// - Add the user as a principal directly in the role's trust policy. +// +// You can do either because the role’s trust policy acts as an IAM resource-based +// policy. When a resource-based policy grants access to a principal in the +// same account, no additional identity-based policy is required. For more information +// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These tags are +// called session tags. For more information about session tags, see Passing +// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// # Using MFA with AssumeRole +// +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an Amazon +// Web Services MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication. If the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA device produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based Amazon Web +// Services access without user-specific credentials or configuration. For a +// comparison of AssumeRoleWithSAML with the other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to Amazon Web +// Services services. +// +// # Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining) +// limits your CLI or Amazon Web Services API role session to a maximum of one +// hour. When you use the AssumeRole API operation to assume a role, you can +// specify the duration of your role session with the DurationSeconds parameter. +// You can specify a parameter value of up to 43200 seconds (12 hours), depending +// on the maximum session duration setting for your role. However, if you assume +// a role using role chaining and provide a DurationSeconds parameter value +// greater than one hour, the operation fails. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any Amazon Web Services service with the following exception: +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services +// security credentials. The identity of the caller is validated by using keys +// in the metadata document that is uploaded for the SAML provider entity for +// your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. +// The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the persistent +// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your SAML assertion +// as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, session tags override the role's tags with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// # SAML Configuration +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by Amazon Web Services. +// Additionally, you must use Identity and Access Management (IAM) to create +// a SAML provider entity in your Amazon Web Services account that represents +// your identity provider. You must also create an IAM role that specifies this +// SAML provider in its trust policy. +// +// For more information, see the following resources: +// +// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider. Example providers +// include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID +// Connect-compatible identity provider such as Google or Amazon Cognito federated +// identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html). +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide +// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android +// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify +// a user. You can also supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html) +// in Amazon Cognito Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web +// Services security credentials. Therefore, you can distribute an application +// (for example, on mobile devices) that requests temporary security credentials +// without including long-term Amazon Web Services credentials in the application. +// You also don't need to deploy server-based proxy services that use long-term +// Amazon Web Services credentials. Instead, the identity of the caller is validated +// by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to Amazon Web Services service +// API operations. +// +// # Session Duration +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. +// +// # Permissions +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any Amazon Web Services service with the following +// exception: you cannot call the STS GetFederationToken or GetSessionToken +// API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// Passing policies to this operation returns new temporary credentials. The +// resulting session's permissions are the intersection of the role's identity-based +// policy and the session policies. You can use the role's temporary credentials +// in subsequent Amazon Web Services API calls to access resources in the account +// that owns the role. You cannot use session policies to grant more permissions +// than those allowed by the identity-based policy of the role that is being +// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. +// +// # Tags +// +// (Optional) You can configure your IdP to pass attributes into your web identity +// token as session tags. Each session tag consists of a key name and an associated +// value. For more information about session tags, see Passing Session Tags +// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can pass up to 50 session tags. The plaintext session tag keys can’t +// exceed 128 characters and the values can’t exceed 256 characters. For these +// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// An Amazon Web Services conversion compresses the passed inline session policy, +// managed policy ARNs, and session tags into a packed binary format that has +// a separate limit. Your request can fail for this limit even if your plaintext +// meets the other requirements. The PackedPolicySize response element indicates +// by percentage how close the policies and tags for your request are to the +// upper size limit. +// +// You can pass a session tag with the same key as a tag that is attached to +// the role. When you do, the session tag overrides the role tag with the same +// key. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// You can set the session tags as transitive. Transitive tags persist during +// role chaining. For more information, see Chaining Roles with Session Tags +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) +// in the IAM User Guide. +// +// # Identities +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided web identity token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to Amazon Web Services. +// +// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers. The toolkits then show how to use the information from these +// providers to get and use temporary security credentials. +// +// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// - ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the identity provider (IDP) that +// was asked to verify the incoming identity token could not be reached. This +// is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the identity provider might be down or not responding. +// +// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by Amazon Web +// Services. Get a new identity token from the identity provider and then retry +// the request. +// +// - ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an Amazon Web Services request. +// +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some Amazon Web Services operations additionally +// return an encoded message that can provide details about this authorization +// failure. +// +// Only certain Amazon Web Services operations return an encoded authorization +// message. The documentation for an individual operation indicates whether +// that operation returns an encoded message in addition to returning an HTTP +// code. +// +// The message is encoded because the details of the authorization status can +// contain privileged information that the user who requested the operation +// should not see. To decode an authorization status message, a user must be +// granted permissions through an IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) +// action. +// +// The decoded message includes the following type of information: +// +// - Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// - The principal who made the request. +// +// - The requested action. +// +// - The requested resource. +// +// - The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// - ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccessKeyInfo = "GetAccessKeyInfo" + +// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyInfo operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetAccessKeyInfoRequest method. +// req, resp := client.GetAccessKeyInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) { + op := &request.Operation{ + Name: opGetAccessKeyInfo, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyInfoInput{} + } + + output = &GetAccessKeyInfoOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyInfo API operation for AWS Security Token Service. +// +// Returns the account identifier for the specified access key ID. +// +// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) +// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). +// For more information about access keys, see Managing Access Keys for IAM +// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) +// in the IAM User Guide. +// +// When you pass an access key ID to this operation, it returns the ID of the +// Amazon Web Services account to which the keys belong. Access key IDs beginning +// with AKIA are long-term credentials for an IAM user or the Amazon Web Services +// account root user. Access key IDs beginning with ASIA are temporary credentials +// that are created using STS operations. If the account in the response belongs +// to you, you can sign in as the root user and review your root user access +// keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html) +// to learn which IAM user owns the keys. To learn who requested the temporary +// credentials for an ASIA access key, view the STS events in your CloudTrail +// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation does not indicate the state of the access key. The key might +// be active, inactive, or deleted. Active keys might not have permissions to +// perform an operation. Providing a deleted access key might return an error +// that the key doesn't exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetAccessKeyInfo for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo +func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + return out, req.Send() +} + +// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyInfo for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) { + req, out := c.GetAccessKeyInfoRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM user or role whose credentials are used to +// call the operation. +// +// No permissions are required to perform this operation. If an administrator +// attaches a policy to your identity that explicitly denies access to the sts:GetCallerIdentity +// action, you can still perform this operation. Permissions are not required +// because the same information is returned when access is denied. To view an +// example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a user. A typical +// use is in a proxy application that gets temporary security credentials on +// behalf of distributed applications inside a corporate network. +// +// You must call the GetFederationToken operation using the long-term security +// credentials of an IAM user. As a result, this call is appropriate in contexts +// where those credentials can be safeguarded, usually in a server-based application. +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Although it is possible to call GetFederationToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user that +// you create for the purpose of a proxy application, we do not recommend it. +// For more information, see Safeguard your root user credentials and don't +// use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// # Session duration +// +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// session duration is 43,200 seconds (12 hours). Temporary credentials obtained +// by using the root user credentials have a maximum duration of 3,600 seconds +// (1 hour). +// +// # Permissions +// +// You can use the temporary credentials created by GetFederationToken in any +// Amazon Web Services service with the following exceptions: +// +// - You cannot call any IAM operations using the CLI or the Amazon Web Services +// API. This limitation does not apply to console sessions. +// +// - You cannot call any STS operations except GetCallerIdentity. +// +// You can use temporary credentials for single sign-on (SSO) to the console. +// +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policy Amazon +// Resource Names (ARNs) to use as managed session policies. The plaintext that +// you use for both inline and managed session policies can't exceed 2,048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. When you pass +// session policies, the session permissions are the intersection of the IAM +// user policies and the session policies that you pass. This gives you a way +// to further restrict the permissions for a federated user. You cannot use +// session policies to grant more permissions than those that are defined in +// the permissions policy of the IAM user. For more information, see Session +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// You can use the credentials to access a resource that has a resource-based +// policy. If that policy specifically references the federated user session +// in the Principal element of the policy, the session has the permissions allowed +// by the policy. These permissions are granted in addition to the permissions +// granted by the session policies. +// +// # Tags +// +// (Optional) You can pass tag key-value pairs to your session. These are called +// session tags. For more information about session tags, see Passing Session +// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You can create a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity) +// in the IAM User Guide. +// +// An administrator must grant you the permissions necessary to pass session +// tags. The administrator can also create granular permissions to allow you +// to pass only specific session tags. For more information, see Tutorial: Using +// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html) +// in the IAM User Guide. +// +// Tag key–value pairs are not case sensitive, but case is preserved. This +// means that you cannot have separate Department and department tag keys. Assume +// that the user that you are federating has the Department=Marketing tag and +// you pass the department=engineering session tag. Department and department +// are not saved as separate tags, and the session tag passed in the request +// takes precedence over the user tag. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// +// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the total packed size of the session policies +// and session tags combined was too large. An Amazon Web Services conversion +// compresses the session policy document, session policy ARNs, and session +// tags into a packed binary format that has a separate limit. The error message +// indicates by percentage how close the policies and tags are to the upper +// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +// +// You could receive this error even though you meet other defined session policy +// and session tag limits. For more information, see IAM and STS Entity Character +// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) +// in the IAM User Guide. +// +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an Amazon Web Services account +// or IAM user. The credentials consist of an access key ID, a secret access +// key, and a security token. Typically, you use GetSessionToken if you want +// to use MFA to protect programmatic calls to specific Amazon Web Services +// API operations like Amazon EC2 StopInstances. +// +// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that the call returns, IAM users can then make programmatic calls to API +// operations that require MFA authentication. An incorrect MFA code causes +// the API to return an access denied error. For a comparison of GetSessionToken +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// No permissions are required for users to perform this operation. The purpose +// of the sts:GetSessionToken operation is to authenticate the user using MFA. +// You cannot use policies to control authentication operations. For more information, +// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html) +// in the IAM User Guide. +// +// # Session Duration +// +// The GetSessionToken operation must be called by using the long-term Amazon +// Web Services security credentials of an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify. This duration can +// range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 +// hours), with a default of 43,200 seconds (12 hours). Credentials based on +// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds +// (1 hour), with a default of 1 hour. +// +// # Permissions +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any Amazon Web Services service with the following exceptions: +// +// - You cannot call any IAM API operations unless MFA authentication information +// is included in the request. +// +// - You cannot call any STS API except AssumeRole or GetCallerIdentity. +// +// The credentials that GetSessionToken returns are based on permissions associated +// with the IAM user whose credentials were used to call the operation. The +// temporary credentials have the same permissions as the IAM user. +// +// Although it is possible to call GetSessionToken using the security credentials +// of an Amazon Web Services account root user rather than an IAM user, we do +// not recommend it. If GetSessionToken is called using root user credentials, +// the temporary credentials have root user permissions. For more information, +// see Safeguard your root user credentials and don't use them for everyday +// tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials) +// in the IAM User Guide +// +// For more information about using GetSessionToken to create temporary credentials, +// see Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// - ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating Amazon Web Services STS in an Amazon Web Services Region +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value specified can range + // from 900 seconds (15 minutes) up to the maximum session duration set for + // the role. The maximum session duration setting can have a value from 1 hour + // to 12 hours. If you specify a value higher than this setting or the administrator + // setting (whichever is lower), the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. + // + // Role chaining limits your Amazon Web Services CLI or Amazon Web Services + // API role session to a maximum of one hour. When you use the AssumeRole API + // operation to assume a role, you can specify the duration of your role session + // with the DurationSeconds parameter. You can specify a parameter value of + // up to 43200 seconds (12 hours), depending on the maximum session duration + // setting for your role. However, if you assume a role using role chaining + // and provide a DurationSeconds parameter value greater than one hour, the + // operation fails. To learn how to view the maximum value for your role, see + // View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your Amazon Web + // Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of previously acquired trusted context assertions in the format of + // a JSON array. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. + // + // The following is an example of a ProvidedContext value that includes a single + // trusted context assertion and the ARN of the context provider from which + // the trusted context assertion was generated. + // + // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}] + ProvidedContexts []*ProvidedContext `type:"list"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests that use the temporary security credentials will expose the role + // session name to the external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition + // key to further control access to Amazon Web Services resources based on the + // value of source identity. For more information about using source identity, + // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@-. You cannot + // use a value that begins with the text aws:. This prefix is reserved for Amazon + // Web Services internal use. + SourceIdentity *string `min:"2" type:"string"` + + // A list of session tags that you want to pass. Each session tag consists of + // a key name and an associated value. For more information about session tags, + // see Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters, and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the role. When you do, session tags override a role tag with the same + // key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + // + // Additionally, if you used temporary credentials to perform this operation, + // the new session inherits any transitive session tags from the calling session. + // If you pass a session tag with the same key as an inherited tag, the operation + // fails. To view the inherited tags for a session, see the CloudTrail logs. + // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA. (In other words, if the policy includes a condition + // that tests for MFA). If the role being assumed requires MFA and if the TokenCode + // value is missing or expired, the AssumeRole call returns an "access denied" + // error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` + + // A list of keys for session tags that you want to set as transitive. If you + // set a tag key as transitive, the corresponding key and value passes to subsequent + // sessions in a role chain. For more information, see Chaining Roles with Session + // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining) + // in the IAM User Guide. + // + // This parameter is optional. When you set session tags as transitive, the + // session policy and session tags packed binary limit is not affected. + // + // If you choose not to specify a transitive tag key, then no tags are passed + // from this session to any subsequent sessions. + TransitiveTagKeys []*string `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 { + invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvidedContexts != nil { + for i, v := range s.ProvidedContexts { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + +// SetProvidedContexts sets the ProvidedContexts field's value. +func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput { + s.ProvidedContexts = v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput { + s.SourceIdentity = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput { + s.Tags = v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// SetTransitiveTagKeys sets the TransitiveTagKeys field's value. +func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput { + s.TransitiveTagKeys = v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// Amazon Web Services credentials that can be used to make Amazon Web Services +// requests. +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The source identity specified by the principal that is calling the AssumeRole + // operation. + // + // You can require users to specify a source identity when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. You can use source identity information in CloudTrail logs to determine + // who took actions with a role. You can use the aws:SourceIdentity condition + // key to further control access to Amazon Web Services resources based on the + // value of source identity. For more information about using source identity, + // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput { + s.SourceIdentity = &v + return s +} + +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. + // + // SAMLAssertion is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithSAMLInput's + // String and GoString methods. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the following: + // + // * The Issuer response value. + // + // * The Amazon Web Services account ID. + // + // * The friendly name (the last part of the ARN) of the SAML provider in + // IAM. + // + // The combination of NameQualifier and Subject can be used to uniquely identify + // a user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value in the SourceIdentity attribute in the SAML assertion. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your SAML identity provider to use an attribute + // associated with your users, like user name or email, as the source identity + // when calling AssumeRoleWithSAML. You do this by adding an attribute to the + // SAML assertion. For more information about using source identity, see Monitor + // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput { + s.SourceIdentity = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent Amazon Web Services API calls + // to access resources in the account that owns the role. You cannot use session + // policies to grant more permissions than those allowed by the identity-based + // policy of the role that is being assumed. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plaintext that you use for both inline and managed session policies + // can't exceed 2,048 characters. For more information about ARNs, see Amazon + // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent Amazon Web Services API calls to access resources in the account + // that owns the role. You cannot use session policies to grant more permissions + // than those allowed by the identity-based policy of the role that is being + // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // The fully qualified host component of the domain name of the OAuth 2.0 identity + // provider. Do not specify this value for an OpenID Connect identity provider. + // + // Currently www.amazon.com and graph.facebook.com are the only supported identity + // providers for OAuth 2.0 access tokens. Do not include URL schemes and port + // numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. Only tokens with + // RSA algorithms (RS256) are supported. + // + // WebIdentityToken is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's + // String and GoString methods. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary Amazon Web Services credentials that can be used to make +// Amazon Web Services requests. +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The value of the source identity that is returned in the JSON web token (JWT) + // from the identity provider. + // + // You can require users to set a source identity value when they assume a role. + // You do this by using the sts:SourceIdentity condition key in a role trust + // policy. That way, actions that are taken with the role are associated with + // that user. After the source identity is set, the value cannot be changed. + // It is present in the request for all actions that are taken by the role and + // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining) + // sessions. You can configure your identity provider to use an attribute associated + // with your users, like user name or email, as the source identity when calling + // AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web + // token. To learn more about OIDC tokens and claims, see Using Tokens with + // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html) + // in the Amazon Cognito Developer Guide. For more information about using source + // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html) + // in the IAM User Guide. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SourceIdentity *string `min:"2" type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSourceIdentity sets the SourceIdentity field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput { + s.SourceIdentity = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by Amazon Web Services + // when the role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// Amazon Web Services credentials for API authentication. +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a sensitive parameter and its value will be + // replaced with "sensitive" in string returned by Credentials's + // String and GoString methods. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true" sensitive:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an Amazon +// Web Services request. +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // The API returns a response with the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +type GetAccessKeyInfoInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern) a string of characters + // that can consist of any upper- or lowercase letter or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyInfoInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput { + s.AccessKeyId = &v + return s +} + +type GetAccessKeyInfoOutput struct { + _ struct{} `type:"structure"` + + // The number used to identify the Amazon Web Services account. + Account *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetAccessKeyInfoOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput { + s.Account = &v + return s +} + +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Web Services account ID number of the account that owns or contains + // the calling entity. + Account *string `type:"string"` + + // The Amazon Web Services ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using root user credentials are restricted to a maximum of 3,600 seconds + // (one hour). If the specified duration is longer than one hour, the session + // obtained by using root user credentials defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // The plaintext that you use for both inline and managed session policies can't + // exceed 2,048 characters. The JSON policy characters can be any ASCII character + // from the space character to the end of the valid character list (\u0020 through + // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage + // return (\u000D) characters. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policy Amazon + // Resource Names (ARNs) to use as managed session policies. The plaintext that + // you use for both inline and managed session policies can't exceed 2,048 characters. + // You can provide up to 10 managed policy ARNs. For more information about + // ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces + // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The resulting credentials can be used to access a resource that has a resource-based + // policy. If that policy specifically references the federated user session + // in the Principal element of the policy, the session has the permissions allowed + // by the policy. These permissions are granted in addition to the permissions + // that are granted by the session policies. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` + + // A list of session tags. Each session tag consists of a key name and an associated + // value. For more information about session tags, see Passing Session Tags + // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // This parameter is optional. You can pass up to 50 session tags. The plaintext + // session tag keys can’t exceed 128 characters and the values can’t exceed + // 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // An Amazon Web Services conversion compresses the passed inline session policy, + // managed policy ARNs, and session tags into a packed binary format that has + // a separate limit. Your request can fail for this limit even if your plaintext + // meets the other requirements. The PackedPolicySize response element indicates + // by percentage how close the policies and tags for your request are to the + // upper size limit. + // + // You can pass a session tag with the same key as a tag that is already attached + // to the user you are federating. When you do, session tags override a user + // tag with the same key. + // + // Tag key–value pairs are not case sensitive, but case is preserved. This + // means that you cannot have separate Department and department tag keys. Assume + // that the role has the Department=Marketing tag and you pass the department=engineering + // session tag. Department and department are not saved as separate tags, and + // the session tag passed in the request takes precedence over the role tag. + Tags []*Tag `type:"list"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + +// SetTags sets the Tags field's value. +func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput { + s.Tags = v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value that indicates the packed size of the session policies + // and session tags combined passed in the request. The request fails if the + // packed size is greater than 100 percent, which means the policies and tags + // exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for Amazon Web Services account owners are restricted to a maximum of 3,600 + // seconds (one hour). If the duration is longer than one hour, the session + // for Amazon Web Services account owners defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the Amazon Web Services + // Management Console and viewing the user's security credentials. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary Amazon Web Services credentials that can be used to make Amazon +// Web Services requests. +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the Amazon Web Services General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} + +// Contains information about the provided context. This includes the signed +// and encrypted trusted context assertion and the context provider ARN from +// which the trusted context assertion was generated. +type ProvidedContext struct { + _ struct{} `type:"structure"` + + // The signed and encrypted trusted context assertion generated by the context + // provider. The trusted context assertion is signed and encrypted by Amazon + // Web Services STS. + ContextAssertion *string `min:"4" type:"string"` + + // The context provider ARN from which the trusted context assertion was generated. + ProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s ProvidedContext) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvidedContext) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"} + if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4)) + } + if s.ProviderArn != nil && len(*s.ProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContextAssertion sets the ContextAssertion field's value. +func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext { + s.ContextAssertion = &v + return s +} + +// SetProviderArn sets the ProviderArn field's value. +func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext { + s.ProviderArn = &v + return s +} + +// You can pass custom key-value pair attributes when you assume a role or federate +// a user. These are called session tags. You can then use the session tags +// to control access to resources. For more information, see Tagging Amazon +// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag keys can’t + // exceed 128 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value for a session tag. + // + // You can pass up to 50 session tags. The plain text session tag values can’t + // exceed 256 characters. For these and additional limits, see IAM and STS Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +// +// API parameter values that are decorated as "sensitive" in the API will not +// be included in the string output. The member name will be present, but the +// value will be replaced with "sensitive". +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 00000000000..d5307fcaa0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,11 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = customizeRequest +} + +func customizeRequest(r *request.Request) { + r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 00000000000..ea1d9eb0ccf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,31 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// Security Token Service (STS) enables you to request temporary, limited-privilege +// credentials for users. This guide provides descriptions of the STS API. For +// more information about using this service, see Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// # Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 00000000000..b680bbd5d70 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,84 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the identity provider (IDP) that + // was asked to verify the incoming identity token could not be reached. This + // is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by Amazon Web + // Services. Get a new identity token from the identity provider and then retry + // the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the total packed size of the session policies + // and session tags combined was too large. An Amazon Web Services conversion + // compresses the session policy document, session policy ARNs, and session + // tags into a packed binary format that has a separate limit. The error message + // indicates by percentage how close the policies and tags are to the upper + // size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) + // in the IAM User Guide. + // + // You could receive this error even though you meet other defined session policy + // and session tag limits. For more information, see IAM and STS Entity Character + // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length) + // in the IAM User Guide. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating Amazon Web Services STS in an Amazon Web Services Region + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 00000000000..12327d05332 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,104 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifier of a specific service. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// +// mySession := session.Must(session.NewSession()) +// +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + if c.SigningNameDerived || len(c.SigningName) == 0 { + c.SigningName = EndpointsID + // No Fallback + } + return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + PartitionID: partitionID, + Endpoint: endpoint, + APIVersion: "2011-06-15", + ResolvedRegion: resolvedRegion, + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go new file mode 100644 index 00000000000..bf06b2e7d08 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go @@ -0,0 +1,96 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package stsiface + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/sts" +) + +// STSAPI provides an interface to enable mocking the +// sts.STS service client's API operation, +// paginators, and waiters. This make unit testing your code that calls out +// to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // AWS Security Token Service. +// func myFunc(svc stsiface.STSAPI) bool { +// // Make svc.AssumeRole request +// } +// +// func main() { +// sess := session.New() +// svc := sts.New(sess) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockSTSClient struct { +// stsiface.STSAPI +// } +// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockSTSClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type STSAPI interface { + AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) + AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error) + AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput) + + AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error) + AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput) + + AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error) + AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput) + + DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error) + DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput) + + GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error) + GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput) + + GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error) + GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput) + + GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error) + GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error) + GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput) + + GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error) + GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error) + GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput) +} + +var _ STSAPI = (*sts.STS)(nil) diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index b8d6561a4e1..39ffae99938 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,7 @@ +# Release (2024-03-29) + +* No change notes available for this release. + # Release (2024-02-21) ## Module Highlights diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index 341392e10f8..a6b22f353d3 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.20.1" +const goModuleVersion = "1.20.2" diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore new file mode 100644 index 00000000000..50d95c548b6 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# IDEs +.idea/ diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE new file mode 100644 index 00000000000..89b81799655 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Cenk Altı + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md new file mode 100644 index 00000000000..9433004a280 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/README.md @@ -0,0 +1,30 @@ +# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls] + +This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. + +[Exponential backoff][exponential backoff wiki] +is an algorithm that uses feedback to multiplicatively decrease the rate of some process, +in order to gradually find an acceptable rate. +The retries exponentially increase and stop increasing when a certain threshold is met. + +## Usage + +Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end. + +Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation. + +## Contributing + +* I would like to keep this library as small as possible. +* Please don't send a PR without opening an issue and discussing it first. +* If proposed change is not a common use case, I will probably not accept it. + +[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4 +[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png +[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master +[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master + +[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java +[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff + +[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go new file mode 100644 index 00000000000..3676ee405d8 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff. Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go new file mode 100644 index 00000000000..48482330eb7 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/context.go @@ -0,0 +1,62 @@ +package backoff + +import ( + "context" + "time" +) + +// BackOffContext is a backoff policy that stops retrying after the context +// is canceled. +type BackOffContext interface { // nolint: golint + BackOff + Context() context.Context +} + +type backOffContext struct { + BackOff + ctx context.Context +} + +// WithContext returns a BackOffContext with context ctx +// +// ctx must not be nil +func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint + if ctx == nil { + panic("nil context") + } + + if b, ok := b.(*backOffContext); ok { + return &backOffContext{ + BackOff: b.BackOff, + ctx: ctx, + } + } + + return &backOffContext{ + BackOff: b, + ctx: ctx, + } +} + +func getContext(b BackOff) context.Context { + if cb, ok := b.(BackOffContext); ok { + return cb.Context() + } + if tb, ok := b.(*backOffTries); ok { + return getContext(tb.delegate) + } + return context.Background() +} + +func (b *backOffContext) Context() context.Context { + return b.ctx +} + +func (b *backOffContext) NextBackOff() time.Duration { + select { + case <-b.ctx.Done(): + return Stop + default: + return b.BackOff.NextBackOff() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go new file mode 100644 index 00000000000..aac99f196ad --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -0,0 +1,216 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff returns Stop. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Stop time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options. +type ExponentialBackOffOpts func(*ExponentialBackOff) + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Stop: Stop, + Clock: SystemClock, + } + for _, fn := range opts { + fn(b) + } + b.Reset() + return b +} + +// WithInitialInterval sets the initial interval between retries. +func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.InitialInterval = duration + } +} + +// WithRandomizationFactor sets the randomization factor to add jitter to intervals. +func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.RandomizationFactor = randomizationFactor + } +} + +// WithMultiplier sets the multiplier for increasing the interval after each retry. +func WithMultiplier(multiplier float64) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Multiplier = multiplier + } +} + +// WithMaxInterval sets the maximum interval between retries. +func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxInterval = duration + } +} + +// WithMaxElapsedTime sets the maximum total time for retries. +func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.MaxElapsedTime = duration + } +} + +// WithRetryStopDuration sets the duration after which retries should stop. +func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Stop = duration + } +} + +// WithClockProvider sets the clock used to measure time. +func WithClockProvider(clock Clock) ExponentialBackOffOpts { + return func(ebo *ExponentialBackOff) { + ebo.Clock = clock + } +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +// Reset must be called before using b. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval * (1 ± RandomizationFactor) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + elapsed := b.GetElapsedTime() + next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) + b.incrementCurrentInterval() + if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime { + return b.Stop + } + return next +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). It is +// safe to call even while the backoff policy is used by a running +// ticker. +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go new file mode 100644 index 00000000000..b9c0c51cd75 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -0,0 +1,146 @@ +package backoff + +import ( + "errors" + "time" +) + +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// +// If o returns a *PermanentError, the operation is not retried, and the +// wrapped error is returned. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { + return RetryNotify(o, b, nil) +} + +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + return RetryNotifyWithTimer(operation, b, notify, nil) +} + +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + +// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer +// for each failed attempt before sleep. +// A default timer that uses system timer is used when nil is passed. +func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) + if t == nil { + t = &defaultTimer{} + } + + defer func() { + t.Stop() + }() + + ctx := getContext(b) + + b.Reset() + for { + res, err = operation() + if err == nil { + return res, nil + } + + var permanent *PermanentError + if errors.As(err, &permanent) { + return res, permanent.Err + } + + if next = b.NextBackOff(); next == Stop { + if cerr := ctx.Err(); cerr != nil { + return res, cerr + } + + return res, err + } + + if notify != nil { + notify(err, next) + } + + t.Start(next) + + select { + case <-ctx.Done(): + return res, ctx.Err() + case <-t.C(): + } + } +} + +// PermanentError signals that the operation should not be retried. +type PermanentError struct { + Err error +} + +func (e *PermanentError) Error() string { + return e.Err.Error() +} + +func (e *PermanentError) Unwrap() error { + return e.Err +} + +func (e *PermanentError) Is(target error) bool { + _, ok := target.(*PermanentError) + return ok +} + +// Permanent wraps the given err in a *PermanentError. +func Permanent(err error) error { + if err == nil { + return nil + } + return &PermanentError{ + Err: err, + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go new file mode 100644 index 00000000000..df9d68bce52 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go @@ -0,0 +1,97 @@ +package backoff + +import ( + "context" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + ctx context.Context + timer Timer + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send +// the time at times specified by the BackOff argument. Ticker is +// guaranteed to tick at least once. The channel is closed when Stop +// method is called or BackOff stops. It is not safe to manipulate the +// provided backoff policy (notably calling NextBackOff or Reset) +// while the ticker is running. +func NewTicker(b BackOff) *Ticker { + return NewTickerWithTimer(b, &defaultTimer{}) +} + +// NewTickerWithTimer returns a new Ticker with a custom timer. +// A default timer that uses system timer is used when nil is passed. +func NewTickerWithTimer(b BackOff, timer Timer) *Ticker { + if timer == nil { + timer = &defaultTimer{} + } + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + ctx: getContext(b), + timer: timer, + stop: make(chan struct{}), + } + t.b.Reset() + go t.run() + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + case <-t.ctx.Done(): + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + t.timer.Start(next) + return t.timer.C() +} diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go new file mode 100644 index 00000000000..8120d0213c5 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/timer.go @@ -0,0 +1,35 @@ +package backoff + +import "time" + +type Timer interface { + Start(duration time.Duration) + Stop() + C() <-chan time.Time +} + +// defaultTimer implements Timer interface using time.Timer +type defaultTimer struct { + timer *time.Timer +} + +// C returns the timers channel which receives the current time when the timer fires. +func (t *defaultTimer) C() <-chan time.Time { + return t.timer.C +} + +// Start starts the timer to fire after the given duration +func (t *defaultTimer) Start(duration time.Duration) { + if t.timer == nil { + t.timer = time.NewTimer(duration) + } else { + t.timer.Reset(duration) + } +} + +// Stop is called when the timer is not used anymore and resources may be freed. +func (t *defaultTimer) Stop() { + if t.timer != nil { + t.timer.Stop() + } +} diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go new file mode 100644 index 00000000000..28d58ca37c6 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/v4/tries.go @@ -0,0 +1,38 @@ +package backoff + +import "time" + +/* +WithMaxRetries creates a wrapper around another BackOff, which will +return Stop if NextBackOff() has been called too many times since +the last time Reset() was called + +Note: Implementation is not thread-safe. +*/ +func WithMaxRetries(b BackOff, max uint64) BackOff { + return &backOffTries{delegate: b, maxTries: max} +} + +type backOffTries struct { + delegate BackOff + maxTries uint64 + numTries uint64 +} + +func (b *backOffTries) NextBackOff() time.Duration { + if b.maxTries == 0 { + return Stop + } + if b.maxTries > 0 { + if b.maxTries <= b.numTries { + return Stop + } + b.numTries++ + } + return b.delegate.NextBackOff() +} + +func (b *backOffTries) Reset() { + b.numTries = 0 + b.delegate.Reset() +} diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 00000000000..23a0ada2fbb --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 00000000000..eb9fb7ff2d8 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 00000000000..e256b41a5dd --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index e8c65d0f672..9e790390b62 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,13 @@ # Change history of go-restful + +## [v3.12.0] - 2024-03-11 +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 +- better not have 2 tags on one commit + ## [v3.11.1, v3.11.2] - 2024-01-09 - fix by restoring custom JSON handler functions (Mike Beaumont #540) diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 95a05a08944..7234604e47b 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,7 +2,6 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) [![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f99fe..80adf55fdfe 100644 --- a/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e942..a9b3faaa81f 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -155,7 +155,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") if (method == http.MethodPost || method == http.MethodPut || - method == http.MethodPatch) && length == "" { + method == http.MethodPatch) && (length == "" || length == "0") { return nil, NewError( http.StatusUnsupportedMediaType, fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), diff --git a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md index c1f280d4d0b..ff0c22c8969 100644 --- a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md +++ b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md @@ -27,14 +27,14 @@ compatibility status with go-git. ## Branching and merging -| Feature | Sub-feature | Status | Notes | Examples | -| ----------- | ----------- | ------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- | -| `branch` | | ✅ | | - [branch](_examples/branch/main.go) | -| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) | -| `merge` | | ❌ | | | -| `mergetool` | | ❌ | | | -| `stash` | | ❌ | | | -| `tag` | | ✅ | | - [tag](_examples/tag/main.go)
- [tag create and push](_examples/tag-create-push/main.go) | +| Feature | Sub-feature | Status | Notes | Examples | +| ----------- | ----------- | ------------ | --------------------------------------- | ----------------------------------------------------------------------------------------------- | +| `branch` | | ✅ | | - [branch](_examples/branch/main.go) | +| `checkout` | | ✅ | Basic usages of checkout are supported. | - [checkout](_examples/checkout/main.go) | +| `merge` | | ⚠️ (partial) | Fast-forward only | | +| `mergetool` | | ❌ | | | +| `stash` | | ❌ | | | +| `tag` | | ✅ | | - [tag](_examples/tag/main.go)
- [tag create and push](_examples/tag-create-push/main.go) | ## Sharing and updating projects diff --git a/vendor/github.com/go-git/go-git/v5/Makefile b/vendor/github.com/go-git/go-git/v5/Makefile index 1e103967478..3d5b54f7e65 100644 --- a/vendor/github.com/go-git/go-git/v5/Makefile +++ b/vendor/github.com/go-git/go-git/v5/Makefile @@ -28,6 +28,7 @@ build-git: test: @echo "running against `git version`"; \ $(GOTEST) -race ./... + $(GOTEST) -v _examples/common_test.go _examples/common.go --examples TEMP_REPO := $(shell mktemp) test-sha256: diff --git a/vendor/github.com/go-git/go-git/v5/options.go b/vendor/github.com/go-git/go-git/v5/options.go index 8902b7e3e45..d7776dad5e3 100644 --- a/vendor/github.com/go-git/go-git/v5/options.go +++ b/vendor/github.com/go-git/go-git/v5/options.go @@ -89,6 +89,25 @@ type CloneOptions struct { Shared bool } +// MergeOptions describes how a merge should be performed. +type MergeOptions struct { + // Strategy defines the merge strategy to be used. + Strategy MergeStrategy +} + +// MergeStrategy represents the different types of merge strategies. +type MergeStrategy int8 + +const ( + // FastForwardMerge represents a Git merge strategy where the current + // branch can be simply updated to point to the HEAD of the branch being + // merged. This is only possible if the history of the branch being merged + // is a linear descendant of the current branch, with no conflicting commits. + // + // This is the default option. + FastForwardMerge MergeStrategy = iota +) + // Validate validates the fields and sets the default values. func (o *CloneOptions) Validate() error { if o.URL == "" { @@ -166,7 +185,7 @@ const ( // AllTags fetch all tags from the remote (i.e., fetch remote tags // refs/tags/* into local tags with the same name) AllTags - //NoTags fetch no tags from the remote at all + // NoTags fetch no tags from the remote at all NoTags ) @@ -198,6 +217,9 @@ type FetchOptions struct { CABundle []byte // ProxyOptions provides info required for connecting to a proxy. ProxyOptions transport.ProxyOptions + // Prune specify that local refs that match given RefSpecs and that do + // not exist remotely will be removed. + Prune bool } // Validate validates the fields and sets the default values. @@ -324,9 +346,9 @@ var ( // CheckoutOptions describes how a checkout operation should be performed. type CheckoutOptions struct { - // Hash is the hash of the commit to be checked out. If used, HEAD will be - // in detached mode. If Create is not used, Branch and Hash are mutually - // exclusive. + // Hash is the hash of a commit or tag to be checked out. If used, HEAD + // will be in detached mode. If Create is not used, Branch and Hash are + // mutually exclusive. Hash plumbing.Hash // Branch to be checked out, if Branch and Hash are empty is set to `master`. Branch plumbing.ReferenceName @@ -405,6 +427,11 @@ func (o *ResetOptions) Validate(r *Repository) error { } o.Commit = ref.Hash() + } else { + _, err := r.CommitObject(o.Commit) + if err != nil { + return fmt.Errorf("invalid reset option: %w", err) + } } return nil @@ -474,6 +501,11 @@ type AddOptions struct { // Glob adds all paths, matching pattern, to the index. If pattern matches a // directory path, all directory contents are added to the index recursively. Glob string + // SkipStatus adds the path with no status check. This option is relevant only + // when the `Path` option is specified and does not apply when the `All` option is used. + // Notice that when passing an ignored path it will be added anyway. + // When true it can speed up adding files to the worktree in very large repositories. + SkipStatus bool } // Validate validates the fields and sets the default values. @@ -507,6 +539,10 @@ type CommitOptions struct { // commit will not be signed. The private key must be present and already // decrypted. SignKey *openpgp.Entity + // Signer denotes a cryptographic signer to sign the commit with. + // A nil value here means the commit will not be signed. + // Takes precedence over SignKey. + Signer Signer // Amend will create a new commit object and replace the commit that HEAD currently // points to. Cannot be used with All nor Parents. Amend bool diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go index d8fb30c1664..aca5d0dbd23 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go @@ -116,7 +116,7 @@ func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { return } -// LoadGlobalPatterns loads gitignore patterns from from the gitignore file +// LoadGlobalPatterns loads gitignore patterns from the gitignore file // declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not // exist the function will return nil. If the core.excludesfile property // is not declared, the function will return nil. If the file pointed to by @@ -132,7 +132,7 @@ func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) { return loadPatterns(fs, fs.Join(home, gitconfigFile)) } -// LoadSystemPatterns loads gitignore patterns from from the gitignore file +// LoadSystemPatterns loads gitignore patterns from the gitignore file // declared in a system's /etc/gitconfig file. If the /etc/gitconfig file does // not exist the function will return nil. If the core.excludesfile property // is not declared, the function will return nil. If the file pointed to by diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go index ceed5d01e70..3d096e18b80 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go @@ -27,7 +27,7 @@ const ( // the commit with the "mergetag" header. headermergetag string = "mergetag" - defaultUtf8CommitMesageEncoding MessageEncoding = "UTF-8" + defaultUtf8CommitMessageEncoding MessageEncoding = "UTF-8" ) // Hash represents the hash of an object @@ -189,7 +189,7 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { } c.Hash = o.Hash() - c.Encoding = defaultUtf8CommitMesageEncoding + c.Encoding = defaultUtf8CommitMessageEncoding reader, err := o.Reader() if err != nil { @@ -335,7 +335,7 @@ func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) { } } - if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMesageEncoding { + if string(c.Encoding) != "" && c.Encoding != defaultUtf8CommitMessageEncoding { if _, err = fmt.Fprintf(w, "\n%s %s", headerencoding, c.Encoding); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go index aa0ca15fd0b..c1ec8ba7ae1 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go @@ -57,6 +57,8 @@ func (c *commitPathIter) Next() (*Commit, error) { } func (c *commitPathIter) getNextFileCommit() (*Commit, error) { + var parentTree, currentTree *Tree + for { // Parent-commit can be nil if the current-commit is the initial commit parentCommit, parentCommitErr := c.sourceIter.Next() @@ -68,13 +70,17 @@ func (c *commitPathIter) getNextFileCommit() (*Commit, error) { parentCommit = nil } - // Fetch the trees of the current and parent commits - currentTree, currTreeErr := c.currentCommit.Tree() - if currTreeErr != nil { - return nil, currTreeErr + if parentTree == nil { + var currTreeErr error + currentTree, currTreeErr = c.currentCommit.Tree() + if currTreeErr != nil { + return nil, currTreeErr + } + } else { + currentTree = parentTree + parentTree = nil } - var parentTree *Tree if parentCommit != nil { var parentTreeErr error parentTree, parentTreeErr = parentCommit.Tree() @@ -115,7 +121,8 @@ func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool { // filename matches, now check if source iterator contains all commits (from all refs) if c.checkParent { - if parent != nil && isParentHash(parent.Hash, c.currentCommit) { + // Check if parent is beyond the initial commit + if parent == nil || isParentHash(parent.Hash, c.currentCommit) { return true } continue diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go index dd8fef44783..3c61f626abb 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go @@ -6,7 +6,7 @@ import ( "errors" "fmt" "io" - "math" + "strconv" "strings" "github.com/go-git/go-git/v5/plumbing" @@ -234,69 +234,56 @@ func (fileStats FileStats) String() string { return printStat(fileStats) } +// printStat prints the stats of changes in content of files. +// Original implementation: https://github.com/git/git/blob/1a87c842ece327d03d08096395969aca5e0a6996/diff.c#L2615 +// Parts of the output: +// |<+++/---> +// example: " main.go | 10 +++++++--- " func printStat(fileStats []FileStat) string { - padLength := float64(len(" ")) - newlineLength := float64(len("\n")) - separatorLength := float64(len("|")) - // Soft line length limit. The text length calculation below excludes - // length of the change number. Adding that would take it closer to 80, - // but probably not more than 80, until it's a huge number. - lineLength := 72.0 - - // Get the longest filename and longest total change. - var longestLength float64 - var longestTotalChange float64 - for _, fs := range fileStats { - if int(longestLength) < len(fs.Name) { - longestLength = float64(len(fs.Name)) - } - totalChange := fs.Addition + fs.Deletion - if int(longestTotalChange) < totalChange { - longestTotalChange = float64(totalChange) - } - } - - // Parts of the output: - // |<+++/---> - // example: " main.go | 10 +++++++--- " - - // - leftTextLength := padLength + longestLength + padLength - - // <+++++/-----> - // Excluding number length here. - rightTextLength := padLength + padLength + newlineLength + maxGraphWidth := uint(53) + maxNameLen := 0 + maxChangeLen := 0 - totalTextArea := leftTextLength + separatorLength + rightTextLength - heightOfHistogram := lineLength - totalTextArea + scaleLinear := func(it, width, max uint) uint { + if it == 0 || max == 0 { + return 0 + } - // Scale the histogram. - var scaleFactor float64 - if longestTotalChange > heightOfHistogram { - // Scale down to heightOfHistogram. - scaleFactor = longestTotalChange / heightOfHistogram - } else { - scaleFactor = 1.0 + return 1 + (it * (width - 1) / max) } - finalOutput := "" for _, fs := range fileStats { - addn := float64(fs.Addition) - deln := float64(fs.Deletion) - addc := int(math.Floor(addn/scaleFactor)) - delc := int(math.Floor(deln/scaleFactor)) - if addc < 0 { - addc = 0 + if len(fs.Name) > maxNameLen { + maxNameLen = len(fs.Name) } - if delc < 0 { - delc = 0 + + changes := strconv.Itoa(fs.Addition + fs.Deletion) + if len(changes) > maxChangeLen { + maxChangeLen = len(changes) } - adds := strings.Repeat("+", addc) - dels := strings.Repeat("-", delc) - finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels) } - return finalOutput + result := "" + for _, fs := range fileStats { + add := uint(fs.Addition) + del := uint(fs.Deletion) + np := maxNameLen - len(fs.Name) + cp := maxChangeLen - len(strconv.Itoa(fs.Addition+fs.Deletion)) + + total := add + del + if total > maxGraphWidth { + add = scaleLinear(add, maxGraphWidth, total) + del = scaleLinear(del, maxGraphWidth, total) + } + + adds := strings.Repeat("+", int(add)) + dels := strings.Repeat("-", int(del)) + namePad := strings.Repeat(" ", np) + changePad := strings.Repeat(" ", cp) + + result += fmt.Sprintf(" %s%s | %s%d %s%s\n", fs.Name, namePad, changePad, total, adds, dels) + } + return result } func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats { diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go index e9f7666b838..0fd0e51398f 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go @@ -7,6 +7,7 @@ import ( "io" "path" "path/filepath" + "sort" "strings" "github.com/go-git/go-git/v5/plumbing" @@ -27,6 +28,7 @@ var ( ErrFileNotFound = errors.New("file not found") ErrDirectoryNotFound = errors.New("directory not found") ErrEntryNotFound = errors.New("entry not found") + ErrEntriesNotSorted = errors.New("entries in tree are not sorted") ) // Tree is basically like a directory - it references a bunch of other trees @@ -270,6 +272,28 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { return nil } +type TreeEntrySorter []TreeEntry + +func (s TreeEntrySorter) Len() int { + return len(s) +} + +func (s TreeEntrySorter) Less(i, j int) bool { + name1 := s[i].Name + name2 := s[j].Name + if s[i].Mode == filemode.Dir { + name1 += "/" + } + if s[j].Mode == filemode.Dir { + name2 += "/" + } + return name1 < name2 +} + +func (s TreeEntrySorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + // Encode transforms a Tree into a plumbing.EncodedObject. func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { o.SetType(plumbing.TreeObject) @@ -279,7 +303,15 @@ func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { } defer ioutil.CheckClose(w, &err) + + if !sort.IsSorted(TreeEntrySorter(t.Entries)) { + return ErrEntriesNotSorted + } + for _, entry := range t.Entries { + if strings.IndexByte(entry.Name, 0) != -1 { + return fmt.Errorf("malformed filename %q", entry.Name) + } if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil { return err } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go index 6e7b334cbd1..2adb6452880 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go @@ -88,7 +88,9 @@ func (t *treeNoder) Children() ([]noder.Noder, error) { } } - return transformChildren(parent) + var err error + t.children, err = transformChildren(parent) + return t.children, err } // Returns the children of a tree as treenoders. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go index 54126febf48..1c4ceee68d0 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go @@ -91,9 +91,9 @@ func advertisedReferences(ctx context.Context, s *session, serviceName string) ( } type client struct { - c *http.Client + client *http.Client transports *lru.Cache - m sync.RWMutex + mutex sync.RWMutex } // ClientOptions holds user configurable options for the client. @@ -147,7 +147,7 @@ func NewClientWithOptions(c *http.Client, opts *ClientOptions) transport.Transpo } } cl := &client{ - c: c, + client: c, } if opts != nil { @@ -234,10 +234,10 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (* // if the client wasn't configured to have a cache for transports then just configure // the transport and use it directly, otherwise try to use the cache. if c.transports == nil { - tr, ok := c.c.Transport.(*http.Transport) + tr, ok := c.client.Transport.(*http.Transport) if !ok { return nil, fmt.Errorf("expected underlying client transport to be of type: %s; got: %s", - reflect.TypeOf(transport), reflect.TypeOf(c.c.Transport)) + reflect.TypeOf(transport), reflect.TypeOf(c.client.Transport)) } transport = tr.Clone() @@ -258,7 +258,7 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (* transport, found = c.fetchTransport(transportOpts) if !found { - transport = c.c.Transport.(*http.Transport).Clone() + transport = c.client.Transport.(*http.Transport).Clone() configureTransport(transport, ep) c.addTransport(transportOpts, transport) } @@ -266,12 +266,12 @@ func newSession(c *client, ep *transport.Endpoint, auth transport.AuthMethod) (* httpClient = &http.Client{ Transport: transport, - CheckRedirect: c.c.CheckRedirect, - Jar: c.c.Jar, - Timeout: c.c.Timeout, + CheckRedirect: c.client.CheckRedirect, + Jar: c.client.Jar, + Timeout: c.client.Timeout, } } else { - httpClient = c.c + httpClient = c.client } s := &session{ diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go index 052f3c8e284..c8db389204a 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/http/transport.go @@ -14,21 +14,21 @@ type transportOptions struct { } func (c *client) addTransport(opts transportOptions, transport *http.Transport) { - c.m.Lock() + c.mutex.Lock() c.transports.Add(opts, transport) - c.m.Unlock() + c.mutex.Unlock() } func (c *client) removeTransport(opts transportOptions) { - c.m.Lock() + c.mutex.Lock() c.transports.Remove(opts) - c.m.Unlock() + c.mutex.Unlock() } func (c *client) fetchTransport(opts transportOptions) (*http.Transport, bool) { - c.m.RLock() + c.mutex.RLock() t, ok := c.transports.Get(opts) - c.m.RUnlock() + c.mutex.RUnlock() if !ok { return nil, false } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go index 46fda73fa41..05dea448f8f 100644 --- a/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go +++ b/vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go @@ -49,7 +49,9 @@ type runner struct { func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { c := &command{command: cmd, endpoint: ep, config: r.config} if auth != nil { - c.setAuth(auth) + if err := c.setAuth(auth); err != nil { + return nil, err + } } if err := c.connect(); err != nil { diff --git a/vendor/github.com/go-git/go-git/v5/remote.go b/vendor/github.com/go-git/go-git/v5/remote.go index 0cb70bc0093..7cc0db9b7db 100644 --- a/vendor/github.com/go-git/go-git/v5/remote.go +++ b/vendor/github.com/go-git/go-git/v5/remote.go @@ -470,6 +470,14 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen } } + var updatedPrune bool + if o.Prune { + updatedPrune, err = r.pruneRemotes(o.RefSpecs, localRefs, remoteRefs) + if err != nil { + return nil, err + } + } + updated, err := r.updateLocalReferenceStorage(o.RefSpecs, refs, remoteRefs, specToRefs, o.Tags, o.Force) if err != nil { return nil, err @@ -482,7 +490,7 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen } } - if !updated { + if !updated && !updatedPrune { return remoteRefs, NoErrAlreadyUpToDate } @@ -574,6 +582,27 @@ func (r *Remote) fetchPack(ctx context.Context, o *FetchOptions, s transport.Upl return err } +func (r *Remote) pruneRemotes(specs []config.RefSpec, localRefs []*plumbing.Reference, remoteRefs memory.ReferenceStorage) (bool, error) { + var updatedPrune bool + for _, spec := range specs { + rev := spec.Reverse() + for _, ref := range localRefs { + if !rev.Match(ref.Name()) { + continue + } + _, err := remoteRefs.Reference(rev.Dst(ref.Name())) + if errors.Is(err, plumbing.ErrReferenceNotFound) { + updatedPrune = true + err := r.s.RemoveReference(ref.Name()) + if err != nil { + return false, err + } + } + } + } + return updatedPrune, nil +} + func (r *Remote) addReferencesToUpdate( refspecs []config.RefSpec, localRefs []*plumbing.Reference, @@ -1099,7 +1128,7 @@ func isFastForward(s storer.EncodedObjectStorer, old, new plumbing.Hash, earlies } found := false - // stop iterating at the earlist shallow commit, ignoring its parents + // stop iterating at the earliest shallow commit, ignoring its parents // note: when pull depth is smaller than the number of new changes on the remote, this fails due to missing parents. // as far as i can tell, without the commits in-between the shallow pull and the earliest shallow, there's no // real way of telling whether it will be a fast-forward merge. diff --git a/vendor/github.com/go-git/go-git/v5/repository.go b/vendor/github.com/go-git/go-git/v5/repository.go index 1524a691305..a57c7141f8d 100644 --- a/vendor/github.com/go-git/go-git/v5/repository.go +++ b/vendor/github.com/go-git/go-git/v5/repository.go @@ -51,19 +51,21 @@ var ( // ErrFetching is returned when the packfile could not be downloaded ErrFetching = errors.New("unable to fetch packfile") - ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") - ErrRepositoryNotExists = errors.New("repository does not exist") - ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") - ErrRepositoryAlreadyExists = errors.New("repository already exists") - ErrRemoteNotFound = errors.New("remote not found") - ErrRemoteExists = errors.New("remote already exists") - ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") - ErrWorktreeNotProvided = errors.New("worktree should be provided") - ErrIsBareRepository = errors.New("worktree not available in a bare repository") - ErrUnableToResolveCommit = errors.New("unable to resolve commit") - ErrPackedObjectsNotSupported = errors.New("packed objects not supported") - ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support") - ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme") + ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") + ErrRepositoryNotExists = errors.New("repository does not exist") + ErrRepositoryIncomplete = errors.New("repository's commondir path does not exist") + ErrRepositoryAlreadyExists = errors.New("repository already exists") + ErrRemoteNotFound = errors.New("remote not found") + ErrRemoteExists = errors.New("remote already exists") + ErrAnonymousRemoteName = errors.New("anonymous remote name must be 'anonymous'") + ErrWorktreeNotProvided = errors.New("worktree should be provided") + ErrIsBareRepository = errors.New("worktree not available in a bare repository") + ErrUnableToResolveCommit = errors.New("unable to resolve commit") + ErrPackedObjectsNotSupported = errors.New("packed objects not supported") + ErrSHA256NotSupported = errors.New("go-git was not compiled with SHA256 support") + ErrAlternatePathNotSupported = errors.New("alternate path must use the file scheme") + ErrUnsupportedMergeStrategy = errors.New("unsupported merge strategy") + ErrFastForwardMergeNotPossible = errors.New("not possible to fast-forward merge changes") ) // Repository represents a git repository @@ -1769,8 +1771,43 @@ func (r *Repository) RepackObjects(cfg *RepackConfig) (err error) { return nil } +// Merge merges the reference branch into the current branch. +// +// If the merge is not possible (or supported) returns an error without changing +// the HEAD for the current branch. Possible errors include: +// - The merge strategy is not supported. +// - The specific strategy cannot be used (e.g. using FastForwardMerge when one is not possible). +func (r *Repository) Merge(ref plumbing.Reference, opts MergeOptions) error { + if opts.Strategy != FastForwardMerge { + return ErrUnsupportedMergeStrategy + } + + // Ignore error as not having a shallow list is optional here. + shallowList, _ := r.Storer.Shallow() + var earliestShallow *plumbing.Hash + if len(shallowList) > 0 { + earliestShallow = &shallowList[0] + } + + head, err := r.Head() + if err != nil { + return err + } + + ff, err := isFastForward(r.Storer, head.Hash(), ref.Hash(), earliestShallow) + if err != nil { + return err + } + + if !ff { + return ErrFastForwardMergeNotPossible + } + + return r.Storer.SetReference(plumbing.NewHashReference(head.Name(), ref.Hash())) +} + // createNewObjectPack is a helper for RepackObjects taking care -// of creating a new pack. It is used so the the PackfileWriter +// of creating a new pack. It is used so the PackfileWriter // deferred close has the right scope. func (r *Repository) createNewObjectPack(cfg *RepackConfig) (h plumbing.Hash, err error) { ow := newObjectWalker(r.Storer) diff --git a/vendor/github.com/go-git/go-git/v5/signer.go b/vendor/github.com/go-git/go-git/v5/signer.go new file mode 100644 index 00000000000..e3ef7ebd31d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/signer.go @@ -0,0 +1,33 @@ +package git + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" +) + +// signableObject is an object which can be signed. +type signableObject interface { + EncodeWithoutSignature(o plumbing.EncodedObject) error +} + +// Signer is an interface for signing git objects. +// message is a reader containing the encoded object to be signed. +// Implementors should return the encoded signature and an error if any. +// See https://git-scm.com/docs/gitformat-signature for more information. +type Signer interface { + Sign(message io.Reader) ([]byte, error) +} + +func signObject(signer Signer, obj signableObject) ([]byte, error) { + encoded := &plumbing.MemoryObject{} + if err := obj.EncodeWithoutSignature(encoded); err != nil { + return nil, err + } + r, err := encoded.Reader() + if err != nil { + return nil, err + } + + return signer.Sign(r) +} diff --git a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go index 7bba0d03e31..33800627de7 100644 --- a/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go +++ b/vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go @@ -29,6 +29,8 @@ type node struct { hash []byte children []noder.Noder isDir bool + mode os.FileMode + size int64 } // NewRootNode returns the root node based on a given billy.Filesystem. @@ -48,8 +50,15 @@ func NewRootNode( // difftree algorithm will detect changes in the contents of files and also in // their mode. // +// Please note that the hash is calculated on first invocation of Hash(), +// meaning that it will not update when the underlying file changes +// between invocations. +// // The hash of a directory is always a 24-bytes slice of zero values func (n *node) Hash() []byte { + if n.hash == nil { + n.calculateHash() + } return n.hash } @@ -121,81 +130,74 @@ func (n *node) calculateChildren() error { func (n *node) newChildNode(file os.FileInfo) (*node, error) { path := path.Join(n.path, file.Name()) - hash, err := n.calculateHash(path, file) - if err != nil { - return nil, err - } - node := &node{ fs: n.fs, submodules: n.submodules, path: path, - hash: hash, isDir: file.IsDir(), + size: file.Size(), + mode: file.Mode(), } - if hash, isSubmodule := n.submodules[path]; isSubmodule { - node.hash = append(hash[:], filemode.Submodule.Bytes()...) + if _, isSubmodule := n.submodules[path]; isSubmodule { node.isDir = false } return node, nil } -func (n *node) calculateHash(path string, file os.FileInfo) ([]byte, error) { - if file.IsDir() { - return make([]byte, 24), nil - } - - var hash plumbing.Hash - var err error - if file.Mode()&os.ModeSymlink != 0 { - hash, err = n.doCalculateHashForSymlink(path, file) - } else { - hash, err = n.doCalculateHashForRegular(path, file) +func (n *node) calculateHash() { + if n.isDir { + n.hash = make([]byte, 24) + return } - + mode, err := filemode.NewFromOSFileMode(n.mode) if err != nil { - return nil, err + n.hash = plumbing.ZeroHash[:] + return } - - mode, err := filemode.NewFromOSFileMode(file.Mode()) - if err != nil { - return nil, err + if submoduleHash, isSubmodule := n.submodules[n.path]; isSubmodule { + n.hash = append(submoduleHash[:], filemode.Submodule.Bytes()...) + return } - - return append(hash[:], mode.Bytes()...), nil + var hash plumbing.Hash + if n.mode&os.ModeSymlink != 0 { + hash = n.doCalculateHashForSymlink() + } else { + hash = n.doCalculateHashForRegular() + } + n.hash = append(hash[:], mode.Bytes()...) } -func (n *node) doCalculateHashForRegular(path string, file os.FileInfo) (plumbing.Hash, error) { - f, err := n.fs.Open(path) +func (n *node) doCalculateHashForRegular() plumbing.Hash { + f, err := n.fs.Open(n.path) if err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } defer f.Close() - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) + h := plumbing.NewHasher(plumbing.BlobObject, n.size) if _, err := io.Copy(h, f); err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - return h.Sum(), nil + return h.Sum() } -func (n *node) doCalculateHashForSymlink(path string, file os.FileInfo) (plumbing.Hash, error) { - target, err := n.fs.Readlink(path) +func (n *node) doCalculateHashForSymlink() plumbing.Hash { + target, err := n.fs.Readlink(n.path) if err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - h := plumbing.NewHasher(plumbing.BlobObject, file.Size()) + h := plumbing.NewHasher(plumbing.BlobObject, n.size) if _, err := h.Write([]byte(target)); err != nil { - return plumbing.ZeroHash, err + return plumbing.ZeroHash } - return h.Sum(), nil + return h.Sum() } func (n *node) String() string { diff --git a/vendor/github.com/go-git/go-git/v5/worktree.go b/vendor/github.com/go-git/go-git/v5/worktree.go index ad525c1a494..ab11d42db83 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree.go +++ b/vendor/github.com/go-git/go-git/v5/worktree.go @@ -227,20 +227,17 @@ func (w *Worktree) createBranch(opts *CheckoutOptions) error { } func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing.Hash, error) { - if !opts.Hash.IsZero() { - return opts.Hash, nil - } - - b, err := w.r.Reference(opts.Branch, true) - if err != nil { - return plumbing.ZeroHash, err - } + hash := opts.Hash + if hash.IsZero() { + b, err := w.r.Reference(opts.Branch, true) + if err != nil { + return plumbing.ZeroHash, err + } - if !b.Name().IsTag() { - return b.Hash(), nil + hash = b.Hash() } - o, err := w.r.Object(plumbing.AnyObject, b.Hash()) + o, err := w.r.Object(plumbing.AnyObject, hash) if err != nil { return plumbing.ZeroHash, err } @@ -248,7 +245,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing switch o := o.(type) { case *object.Tag: if o.TargetType != plumbing.CommitObject { - return plumbing.ZeroHash, fmt.Errorf("unsupported tag object target %q", o.TargetType) + return plumbing.ZeroHash, fmt.Errorf("%w: tag target %q", object.ErrUnsupportedObject, o.TargetType) } return o.Target, nil @@ -256,7 +253,7 @@ func (w *Worktree) getCommitFromCheckoutOptions(opts *CheckoutOptions) (plumbing return o.Hash, nil } - return plumbing.ZeroHash, fmt.Errorf("unsupported tag target %q", o.Type()) + return plumbing.ZeroHash, fmt.Errorf("%w: %q", object.ErrUnsupportedObject, o.Type()) } func (w *Worktree) setHEADToCommit(commit plumbing.Hash) error { @@ -431,6 +428,10 @@ var worktreeDeny = map[string]struct{}{ func validPath(paths ...string) error { for _, p := range paths { parts := strings.FieldsFunc(p, func(r rune) bool { return (r == '\\' || r == '/') }) + if len(parts) == 0 { + return fmt.Errorf("invalid path: %q", p) + } + if _, denied := worktreeDeny[strings.ToLower(parts[0])]; denied { return fmt.Errorf("invalid path prefix: %q", p) } diff --git a/vendor/github.com/go-git/go-git/v5/worktree_commit.go b/vendor/github.com/go-git/go-git/v5/worktree_commit.go index eaa21c3f191..f62054bcb44 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_commit.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_commit.go @@ -3,6 +3,7 @@ package git import ( "bytes" "errors" + "io" "path" "sort" "strings" @@ -14,6 +15,7 @@ import ( "github.com/go-git/go-git/v5/storage" "github.com/ProtonMail/go-crypto/openpgp" + "github.com/ProtonMail/go-crypto/openpgp/packet" "github.com/go-git/go-billy/v5" ) @@ -43,29 +45,30 @@ func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error if err != nil { return plumbing.ZeroHash, err } - - t, err := w.r.getTreeFromCommitHash(head.Hash()) + headCommit, err := w.r.CommitObject(head.Hash()) if err != nil { return plumbing.ZeroHash, err } - treeHash = t.Hash - opts.Parents = []plumbing.Hash{head.Hash()} - } else { - idx, err := w.r.Storer.Index() - if err != nil { - return plumbing.ZeroHash, err + opts.Parents = nil + if len(headCommit.ParentHashes) != 0 { + opts.Parents = []plumbing.Hash{headCommit.ParentHashes[0]} } + } - h := &buildTreeHelper{ - fs: w.Filesystem, - s: w.r.Storer, - } + idx, err := w.r.Storer.Index() + if err != nil { + return plumbing.ZeroHash, err + } - treeHash, err = h.BuildTree(idx, opts) - if err != nil { - return plumbing.ZeroHash, err - } + h := &buildTreeHelper{ + fs: w.Filesystem, + s: w.r.Storer, + } + + treeHash, err = h.BuildTree(idx, opts) + if err != nil { + return plumbing.ZeroHash, err } commit, err := w.buildCommitObject(msg, opts, treeHash) @@ -125,12 +128,17 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb ParentHashes: opts.Parents, } - if opts.SignKey != nil { - sig, err := w.buildCommitSignature(commit, opts.SignKey) + // Convert SignKey into a Signer if set. Existing Signer should take priority. + signer := opts.Signer + if signer == nil && opts.SignKey != nil { + signer = &gpgSigner{key: opts.SignKey} + } + if signer != nil { + sig, err := signObject(signer, commit) if err != nil { return plumbing.ZeroHash, err } - commit.PGPSignature = sig + commit.PGPSignature = string(sig) } obj := w.r.Storer.NewEncodedObject() @@ -140,20 +148,17 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb return w.r.Storer.SetEncodedObject(obj) } -func (w *Worktree) buildCommitSignature(commit *object.Commit, signKey *openpgp.Entity) (string, error) { - encoded := &plumbing.MemoryObject{} - if err := commit.Encode(encoded); err != nil { - return "", err - } - r, err := encoded.Reader() - if err != nil { - return "", err - } +type gpgSigner struct { + key *openpgp.Entity + cfg *packet.Config +} + +func (s *gpgSigner) Sign(message io.Reader) ([]byte, error) { var b bytes.Buffer - if err := openpgp.ArmoredDetachSign(&b, signKey, r, nil); err != nil { - return "", err + if err := openpgp.ArmoredDetachSign(&b, s.key, message, s.cfg); err != nil { + return nil, err } - return b.String(), nil + return b.Bytes(), nil } // buildTreeHelper converts a given index.Index file into multiple git objects @@ -263,4 +268,4 @@ func (h *buildTreeHelper) copyTreeToStorageRecursive(parent string, t *object.Tr return hash, nil } return h.s.SetEncodedObject(o) -} \ No newline at end of file +} diff --git a/vendor/github.com/go-git/go-git/v5/worktree_status.go b/vendor/github.com/go-git/go-git/v5/worktree_status.go index 730108754b9..dd9b2439cfd 100644 --- a/vendor/github.com/go-git/go-git/v5/worktree_status.go +++ b/vendor/github.com/go-git/go-git/v5/worktree_status.go @@ -271,7 +271,7 @@ func diffTreeIsEquals(a, b noder.Hasher) bool { // no error is returned. When path is a file, the blob.Hash is returned. func (w *Worktree) Add(path string) (plumbing.Hash, error) { // TODO(mcuadros): deprecate in favor of AddWithOption in v6. - return w.doAdd(path, make([]gitignore.Pattern, 0)) + return w.doAdd(path, make([]gitignore.Pattern, 0), false) } func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) { @@ -321,7 +321,7 @@ func (w *Worktree) AddWithOptions(opts *AddOptions) error { } if opts.All { - _, err := w.doAdd(".", w.Excludes) + _, err := w.doAdd(".", w.Excludes, false) return err } @@ -329,16 +329,11 @@ func (w *Worktree) AddWithOptions(opts *AddOptions) error { return w.AddGlob(opts.Glob) } - _, err := w.Add(opts.Path) + _, err := w.doAdd(opts.Path, make([]gitignore.Pattern, 0), opts.SkipStatus) return err } -func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbing.Hash, error) { - s, err := w.Status() - if err != nil { - return plumbing.ZeroHash, err - } - +func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern, skipStatus bool) (plumbing.Hash, error) { idx, err := w.r.Storer.Index() if err != nil { return plumbing.ZeroHash, err @@ -348,6 +343,17 @@ func (w *Worktree) doAdd(path string, ignorePattern []gitignore.Pattern) (plumbi var added bool fi, err := w.Filesystem.Lstat(path) + + // status is required for doAddDirectory + var s Status + var err2 error + if !skipStatus || fi == nil || fi.IsDir() { + s, err2 = w.Status() + if err2 != nil { + return plumbing.ZeroHash, err2 + } + } + if err != nil || !fi.IsDir() { added, h, err = w.doAddFile(idx, s, path, ignorePattern) } else { @@ -421,8 +427,9 @@ func (w *Worktree) AddGlob(pattern string) error { // doAddFile create a new blob from path and update the index, added is true if // the file added is different from the index. +// if s status is nil will skip the status check and update the index anyway func (w *Worktree) doAddFile(idx *index.Index, s Status, path string, ignorePattern []gitignore.Pattern) (added bool, h plumbing.Hash, err error) { - if s.File(path).Worktree == Unmodified { + if s != nil && s.File(path).Worktree == Unmodified { return false, h, nil } if len(ignorePattern) > 0 { diff --git a/vendor/github.com/go-openapi/runtime/client/request.go b/vendor/github.com/go-openapi/runtime/client/request.go index c238953f384..c4a891d0bc5 100644 --- a/vendor/github.com/go-openapi/runtime/client/request.go +++ b/vendor/github.com/go-openapi/runtime/client/request.go @@ -36,7 +36,7 @@ import ( ) // NewRequest creates a new swagger http client request -func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) (*request, error) { +func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) *request { return &request{ pathPattern: pathPattern, method: method, @@ -45,7 +45,7 @@ func newRequest(method, pathPattern string, writer runtime.ClientRequestWriter) query: make(url.Values), timeout: DefaultTimeout, getBody: getRequestBuffer, - }, nil + } } // Request represents a swagger client request. diff --git a/vendor/github.com/go-openapi/runtime/client/runtime.go b/vendor/github.com/go-openapi/runtime/client/runtime.go index fdf97176bf8..5bd4d75d906 100644 --- a/vendor/github.com/go-openapi/runtime/client/runtime.go +++ b/vendor/github.com/go-openapi/runtime/client/runtime.go @@ -32,12 +32,13 @@ import ( "sync" "time" + "github.com/go-openapi/strfmt" + "github.com/opentracing/opentracing-go" + "github.com/go-openapi/runtime" "github.com/go-openapi/runtime/logger" "github.com/go-openapi/runtime/middleware" "github.com/go-openapi/runtime/yamlpc" - "github.com/go-openapi/strfmt" - "github.com/opentracing/opentracing-go" ) const ( @@ -379,14 +380,11 @@ func (r *Runtime) EnableConnectionReuse() { func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*request, *http.Request, error) { //nolint:revive,stylecheck params, _, auth := operation.Params, operation.Reader, operation.AuthInfo - request, err := newRequest(operation.Method, operation.PathPattern, params) - if err != nil { - return nil, nil, err - } + request := newRequest(operation.Method, operation.PathPattern, params) var accept []string accept = append(accept, operation.ProducesMediaTypes...) - if err = request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil { + if err := request.SetHeaderParam(runtime.HeaderAccept, accept...); err != nil { return nil, nil, err } diff --git a/vendor/github.com/go-openapi/runtime/csv.go b/vendor/github.com/go-openapi/runtime/csv.go index b1640eaff9c..c9597bcd6e0 100644 --- a/vendor/github.com/go-openapi/runtime/csv.go +++ b/vendor/github.com/go-openapi/runtime/csv.go @@ -125,15 +125,10 @@ func CSVConsumer(opts ...CSVOpt) Consumer { return err } - /* - // with go1.20: - v.Grow(len(csvWriter.records)) - v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity - v.SetLen(len(csvWriter.records)) - reflect.Copy(v, reflect.ValueOf(csvWriter.records)) - */ - v.SetLen(0) - v.Set(reflect.AppendSlice(v, reflect.ValueOf(csvWriter.records))) + v.Grow(len(csvWriter.records)) + v.SetCap(len(csvWriter.records)) // in case Grow was unnessary, trim down the capacity + v.SetLen(len(csvWriter.records)) + reflect.Copy(v, reflect.ValueOf(csvWriter.records)) return nil diff --git a/vendor/github.com/go-openapi/swag/string_bytes.go b/vendor/github.com/go-openapi/swag/string_bytes.go index c52d6bf7194..90745d5ca9f 100644 --- a/vendor/github.com/go-openapi/swag/string_bytes.go +++ b/vendor/github.com/go-openapi/swag/string_bytes.go @@ -2,21 +2,7 @@ package swag import "unsafe" -type internalString struct { - Data unsafe.Pointer - Len int -} - // hackStringBytes returns the (unsafe) underlying bytes slice of a string. -func hackStringBytes(str string) []byte { - p := (*internalString)(unsafe.Pointer(&str)).Data - return unsafe.Slice((*byte)(p), len(str)) -} - -/* - * go1.20 version (for when go mod moves to a go1.20 requirement): - func hackStringBytes(str string) []byte { return unsafe.Slice(unsafe.StringData(str), len(str)) } -*/ diff --git a/vendor/github.com/gocql/gocql/conn.go b/vendor/github.com/gocql/gocql/conn.go index 9a223f80a32..0f687aaa75e 100644 --- a/vendor/github.com/gocql/gocql/conn.go +++ b/vendor/github.com/gocql/gocql/conn.go @@ -208,7 +208,8 @@ type Conn struct { timeouts int64 - logger StdLogger + logger StdLogger + tabletsRoutingV1 bool } // connect establishes a connection to a Cassandra node using session's connection config. @@ -724,6 +725,9 @@ func (c *Conn) recv(ctx context.Context) error { } else if head.stream == -1 { // TODO: handle cassandra event frames, we shouldnt get any currently framer := newFramerWithExts(c.compressor, c.version, c.cqlProtoExts) + c.mu.Lock() + c.tabletsRoutingV1 = framer.tabletsRoutingV1 + c.mu.Unlock() if err := framer.readFrame(c, &head); err != nil { return err } @@ -733,6 +737,9 @@ func (c *Conn) recv(ctx context.Context) error { // reserved stream that we dont use, probably due to a protocol error // or a bug in Cassandra, this should be an error, parse it and return. framer := newFramerWithExts(c.compressor, c.version, c.cqlProtoExts) + c.mu.Lock() + c.tabletsRoutingV1 = framer.tabletsRoutingV1 + c.mu.Unlock() if err := framer.readFrame(c, &head); err != nil { return err } @@ -1069,6 +1076,9 @@ func (c *Conn) exec(ctx context.Context, req frameBuilder, tracer Tracer) (*fram // resp is basically a waiting semaphore protecting the framer framer := newFramerWithExts(c.compressor, c.version, c.cqlProtoExts) + c.mu.Lock() + c.tabletsRoutingV1 = framer.tabletsRoutingV1 + c.mu.Unlock() call := &callReq{ timeout: make(chan struct{}), @@ -1453,6 +1463,63 @@ func (c *Conn) executeQuery(ctx context.Context, qry *Query) *Iter { return &Iter{err: err} } + if len(framer.customPayload) > 0 { + if tabletInfo, ok := framer.customPayload["tablets-routing-v1"]; ok { + var firstToken string + var lastToken string + var replicas [][]interface{} + tabletInfoValue := []interface{}{&firstToken, &lastToken, &replicas} + Unmarshal(TupleTypeInfo{ + NativeType: NativeType{proto: c.version, typ: TypeTuple}, + Elems: []TypeInfo{ + NativeType{typ: TypeBigInt}, + NativeType{typ: TypeBigInt}, + CollectionType{ + NativeType: NativeType{proto: c.version, typ: TypeList}, + Elem: TupleTypeInfo{ + NativeType: NativeType{proto: c.version, typ: TypeTuple}, + Elems: []TypeInfo{ + NativeType{proto: c.version, typ: TypeUUID}, + NativeType{proto: c.version, typ: TypeInt}, + }}, + }, + }, + }, tabletInfo, tabletInfoValue) + + tablet := TabletInfo{} + tablet.firstToken, err = strconv.ParseInt(firstToken, 10, 64) + if err != nil { + return &Iter{err: err} + } + tablet.lastToken, err = strconv.ParseInt(lastToken, 10, 64) + if err != nil { + return &Iter{err: err} + } + + tabletReplicas := make([]ReplicaInfo, 0, len(replicas)) + for _, replica := range replicas { + if len(replica) != 2 { + return &Iter{err: err} + } + if hostId, ok := replica[0].(UUID); ok { + if shardId, ok := replica[1].(int); ok { + repInfo := ReplicaInfo{hostId, shardId} + tabletReplicas = append(tabletReplicas, repInfo) + } else { + return &Iter{err: err} + } + } else { + return &Iter{err: err} + } + } + tablet.replicas = tabletReplicas + tablet.keyspaceName = qry.routingInfo.keyspace + tablet.tableName = qry.routingInfo.table + + addTablet(c.session.hostSource, &tablet) + } + } + if len(framer.traceID) > 0 && qry.trace != nil { qry.trace.Trace(framer.traceID) } diff --git a/vendor/github.com/gocql/gocql/connectionpool.go b/vendor/github.com/gocql/gocql/connectionpool.go index d207fa0aaca..4e61f306298 100644 --- a/vendor/github.com/gocql/gocql/connectionpool.go +++ b/vendor/github.com/gocql/gocql/connectionpool.go @@ -26,6 +26,12 @@ type SetPartitioner interface { SetPartitioner(partitioner string) } +// interface to implement to receive the tablets value +// Experimental, this interface and use may change +type SetTablets interface { + SetTablets(tablets []*TabletInfo) +} + func setupTLSConfig(sslOpts *SslOptions) (*tls.Config, error) { // Config.InsecureSkipVerify | EnableHostVerification | Result // Config is nil | true | verify host @@ -312,7 +318,7 @@ func newHostConnPool(session *Session, host *HostInfo, port, size int, } // Pick a connection from this connection pool for the given query. -func (pool *hostConnPool) Pick(token token) *Conn { +func (pool *hostConnPool) Pick(token token, keyspace string, table string) *Conn { pool.mu.RLock() defer pool.mu.RUnlock() @@ -330,7 +336,7 @@ func (pool *hostConnPool) Pick(token token) *Conn { } } - return pool.connPicker.Pick(token) + return pool.connPicker.Pick(token, keyspace, table) } // Size returns the number of connections currently active in the pool diff --git a/vendor/github.com/gocql/gocql/connpicker.go b/vendor/github.com/gocql/gocql/connpicker.go index 66adcdc8e94..af43d35c035 100644 --- a/vendor/github.com/gocql/gocql/connpicker.go +++ b/vendor/github.com/gocql/gocql/connpicker.go @@ -7,7 +7,7 @@ import ( ) type ConnPicker interface { - Pick(token) *Conn + Pick(token, string, string) *Conn Put(*Conn) Remove(conn *Conn) Size() (int, int) @@ -65,7 +65,7 @@ func (p *defaultConnPicker) Size() (int, int) { return size, p.size - size } -func (p *defaultConnPicker) Pick(token) *Conn { +func (p *defaultConnPicker) Pick(token, string, string) *Conn { pos := int(atomic.AddUint32(&p.pos, 1) - 1) size := len(p.conns) @@ -104,7 +104,7 @@ func (*defaultConnPicker) NextShard() (shardID, nrShards int) { // to the point where we have first connection. type nopConnPicker struct{} -func (nopConnPicker) Pick(token) *Conn { +func (nopConnPicker) Pick(token, string, string) *Conn { return nil } diff --git a/vendor/github.com/gocql/gocql/docker-compose.yml b/vendor/github.com/gocql/gocql/docker-compose.yml index 9e7490c7d5e..8090eb7e219 100644 --- a/vendor/github.com/gocql/gocql/docker-compose.yml +++ b/vendor/github.com/gocql/gocql/docker-compose.yml @@ -32,6 +32,60 @@ services: interval: 5s timeout: 5s retries: 18 + node_2: + image: scylladb/scylla-nightly + command: | + --experimental-features consistent-topology-changes + --experimental-features tablets + --smp 2 + --memory 1G + --seeds 192.168.100.12 + networks: + public: + ipv4_address: 192.168.100.12 + healthcheck: + test: [ "CMD", "cqlsh", "192.168.100.12", "-e", "select * from system.local" ] + interval: 5s + timeout: 5s + retries: 18 + node_3: + image: scylladb/scylla-nightly + command: | + --experimental-features consistent-topology-changes + --experimental-features tablets + --smp 2 + --memory 1G + --seeds 192.168.100.12 + networks: + public: + ipv4_address: 192.168.100.13 + healthcheck: + test: [ "CMD", "cqlsh", "192.168.100.13", "-e", "select * from system.local" ] + interval: 5s + timeout: 5s + retries: 18 + depends_on: + node_2: + condition: service_healthy + node_4: + image: scylladb/scylla-nightly + command: | + --experimental-features consistent-topology-changes + --experimental-features tablets + --smp 2 + --memory 1G + --seeds 192.168.100.12 + networks: + public: + ipv4_address: 192.168.100.14 + healthcheck: + test: [ "CMD", "cqlsh", "192.168.100.14", "-e", "select * from system.local" ] + interval: 5s + timeout: 5s + retries: 18 + depends_on: + node_3: + condition: service_healthy networks: public: driver: bridge diff --git a/vendor/github.com/gocql/gocql/frame.go b/vendor/github.com/gocql/gocql/frame.go index caf00eb34e4..d7e6b8dc4a1 100644 --- a/vendor/github.com/gocql/gocql/frame.go +++ b/vendor/github.com/gocql/gocql/frame.go @@ -367,6 +367,7 @@ type framer struct { flagLWT int rateLimitingErrorCode int + tabletsRoutingV1 bool } func newFramer(compressor Compressor, version byte) *framer { @@ -398,6 +399,8 @@ func newFramer(compressor Compressor, version byte) *framer { f.header = nil f.traceID = nil + f.tabletsRoutingV1 = false + return f } @@ -427,6 +430,17 @@ func newFramerWithExts(compressor Compressor, version byte, cqlProtoExts []cqlPr f.rateLimitingErrorCode = castedExt.rateLimitErrorCode } + if tabletsExt := findCQLProtoExtByName(cqlProtoExts, tabletsRoutingV1); tabletsExt != nil { + _, ok := tabletsExt.(*tabletsRoutingV1Ext) + if !ok { + Logger.Println( + fmt.Errorf("Failed to cast CQL protocol extension identified by name %s to type %T", + tabletsRoutingV1, tabletsRoutingV1Ext{})) + return f + } + f.tabletsRoutingV1 = true + } + return f } diff --git a/vendor/github.com/gocql/gocql/host_source.go b/vendor/github.com/gocql/gocql/host_source.go index ae0de33b5f1..31132e38f0d 100644 --- a/vendor/github.com/gocql/gocql/host_source.go +++ b/vendor/github.com/gocql/gocql/host_source.go @@ -472,12 +472,151 @@ func (h *HostInfo) ScyllaShardAwarePortTLS() uint16 { return h.scyllaShardAwarePortTLS } +// Experimental, this interface and use may change +type ReplicaInfo struct { + hostId UUID + shardId int +} + +// Experimental, this interface and use may change +type TabletInfo struct { + mu sync.RWMutex + keyspaceName string + tableName string + firstToken int64 + lastToken int64 + replicas []ReplicaInfo +} + +func (t *TabletInfo) KeyspaceName() string { + t.mu.RLock() + defer t.mu.RUnlock() + return t.keyspaceName +} + +func (t *TabletInfo) FirstToken() int64 { + t.mu.RLock() + defer t.mu.RUnlock() + return t.firstToken +} + +func (t *TabletInfo) LastToken() int64 { + t.mu.RLock() + defer t.mu.RUnlock() + return t.lastToken +} + +func (t *TabletInfo) TableName() string { + t.mu.RLock() + defer t.mu.RUnlock() + return t.tableName +} + +func (t *TabletInfo) Replicas() []ReplicaInfo { + t.mu.RLock() + defer t.mu.RUnlock() + return t.replicas +} + +// Search for place in tablets table with specific Keyspace and Table name +func findTablets(tablets []*TabletInfo, k string, t string) (int, int) { + l := -1 + r := -1 + for i, tablet := range tablets { + if tablet.KeyspaceName() == k && tablet.TableName() == t { + if l == -1 { + l = i + } + r = i + } else if l != -1 { + break + } + } + + return l, r +} + +func addTabletToTabletsList(tablets []*TabletInfo, tablet *TabletInfo) []*TabletInfo { + l, r := findTablets(tablets, tablet.keyspaceName, tablet.tableName) + if l == -1 && r == -1 { + l = 0 + r = 0 + } else { + r = r + 1 + } + + l1, r1 := l, r + l2, r2 := l1, r1 + + // find first overlaping range + for l1 < r1 { + mid := (l1 + r1) / 2 + if tablets[mid].FirstToken() < tablet.FirstToken() { + l1 = mid + 1 + } else { + r1 = mid + } + } + start := l1 + + if start > l && tablets[start-1].LastToken() > tablet.FirstToken() { + start = start - 1 + } + + // find last overlaping range + for l2 < r2 { + mid := (l2 + r2) / 2 + if tablets[mid].LastToken() < tablet.LastToken() { + l2 = mid + 1 + } else { + r2 = mid + } + } + end := l2 + if end < r && tablets[end].FirstToken() >= tablet.LastToken() { + end = end - 1 + } + if end == len(tablets) { + end = end - 1 + } + + updated_tablets := tablets + if start <= end { + // Delete elements from index start to end + updated_tablets = append(tablets[:start], tablets[end+1:]...) + } + // Insert tablet element at index start + updated_tablets2 := append(updated_tablets[:start], append([]*TabletInfo{tablet}, updated_tablets[start:]...)...) + return updated_tablets2 +} + +// Search for place in tablets table for token starting from index l to index r +func findTabletForToken(tablets []*TabletInfo, token token, l int, r int) *TabletInfo { + for l < r { + var m int + if r*l > 0 { + m = l + (r-l)/2 + } else { + m = (r + l) / 2 + } + if int64Token(tablets[m].LastToken()).Less(token) { + l = m + 1 + } else { + r = m + } + } + + return tablets[l] +} + // Polls system.peers at a specific interval to find new hosts type ringDescriber struct { session *Session mu sync.Mutex prevHosts []*HostInfo prevPartitioner string + // Experimental, this interface and use may change + prevTablets []*TabletInfo } // Returns true if we are using system_schema.keyspaces instead of system.schema_keyspaces @@ -835,6 +974,23 @@ func refreshRing(r *ringDescriber) error { r.session.metadata.setPartitioner(partitioner) r.session.policy.SetPartitioner(partitioner) + + return nil +} + +// Experimental, this interface and use may change +func addTablet(r *ringDescriber, tablet *TabletInfo) error { + r.mu.Lock() + defer r.mu.Unlock() + + tablets := r.session.getTablets() + tablets = addTabletToTabletsList(tablets, tablet) + + r.session.ring.setTablets(tablets) + r.session.policy.SetTablets(tablets) + + r.session.schemaDescriber.refreshTabletsSchema() + return nil } diff --git a/vendor/github.com/gocql/gocql/integration.sh b/vendor/github.com/gocql/gocql/integration.sh index 5c29615e957..6598599d10e 100644 --- a/vendor/github.com/gocql/gocql/integration.sh +++ b/vendor/github.com/gocql/gocql/integration.sh @@ -28,10 +28,25 @@ function scylla_restart() { scylla_restart readonly clusterSize=1 +readonly multiNodeClusterSize=3 readonly scylla_liveset="192.168.100.11" +readonly scylla_tablet_liveset="192.168.100.12" readonly cversion="3.11.4" readonly proto=4 readonly args="-gocql.timeout=60s -proto=${proto} -rf=${clusterSize} -clusterSize=${clusterSize} -autowait=2000ms -compressor=snappy -gocql.cversion=${cversion} -cluster=${scylla_liveset}" - -echo "==> Running $* tests with args: ${args}" -go test -timeout=5m -race -tags="$*" ${args} ./... +readonly tabletArgs="-gocql.timeout=60s -proto=${proto} -rf=1 -clusterSize=${multiNodeClusterSize} -autowait=2000ms -compressor=snappy -gocql.cversion=${cversion} -multiCluster=${scylla_tablet_liveset}" + +if [[ "$*" == *"tablet"* ]]; +then + echo "==> Running tablet tests with args: ${tabletArgs}" + go test -timeout=5m -race -tags="tablet" ${tabletArgs} ./... +fi + +TAGS=$* +TAGS=${TAGS//"tablet"/} + +if [ ! -z "$TAGS" ]; +then + echo "==> Running ${TAGS} tests with args: ${args}" + go test -timeout=5m -race -tags="$TAGS" ${args} ./... +fi diff --git a/vendor/github.com/gocql/gocql/metadata_scylla.go b/vendor/github.com/gocql/gocql/metadata_scylla.go index 7efdeb9b414..c413d97c066 100644 --- a/vendor/github.com/gocql/gocql/metadata_scylla.go +++ b/vendor/github.com/gocql/gocql/metadata_scylla.go @@ -1,3 +1,4 @@ +//go:build !cassandra || scylla // +build !cassandra scylla // Copyright (c) 2015 The gocql Authors. All rights reserved. @@ -132,6 +133,29 @@ type IndexMetadata struct { Options map[string]string } +// TabletsMetadata holds metadata for tablet list +// Experimental, this interface and use may change +type TabletsMetadata struct { + Tablets []*TabletMetadata +} + +// TabletMetadata holds metadata for single tablet +// Experimental, this interface and use may change +type TabletMetadata struct { + KeyspaceName string + TableName string + FirstToken int64 + LastToken int64 + Replicas []ReplicaMetadata +} + +// TabletMetadata holds metadata for single replica +// Experimental, this interface and use may change +type ReplicaMetadata struct { + HostId UUID + ShardId int +} + const ( IndexKindCustom = "CUSTOM" ) @@ -215,20 +239,24 @@ func columnKindFromSchema(kind string) (ColumnKind, error) { } } -// queries the cluster for schema information for a specific keyspace +// queries the cluster for schema information for a specific keyspace and for tablets type schemaDescriber struct { session *Session mu sync.Mutex - cache map[string]*KeyspaceMetadata + cache map[string]*KeyspaceMetadata + + // Experimental, this interface and use may change + tabletsCache *TabletsMetadata } // creates a session bound schema describer which will query and cache -// keyspace metadata +// keyspace metadata and tablets metadata func newSchemaDescriber(session *Session) *schemaDescriber { return &schemaDescriber{ - session: session, - cache: map[string]*KeyspaceMetadata{}, + session: session, + cache: map[string]*KeyspaceMetadata{}, + tabletsCache: &TabletsMetadata{}, } } @@ -252,6 +280,36 @@ func (s *schemaDescriber) getSchema(keyspaceName string) (*KeyspaceMetadata, err return metadata, nil } +// Experimental, this interface and use may change +func (s *schemaDescriber) getTabletsSchema() *TabletsMetadata { + s.mu.Lock() + defer s.mu.Unlock() + + metadata := s.tabletsCache + + return metadata +} + +// Experimental, this interface and use may change +func (s *schemaDescriber) refreshTabletsSchema() { + tablets := s.session.getTablets() + s.tabletsCache.Tablets = []*TabletMetadata{} + + for _, tablet := range tablets { + t := &TabletMetadata{} + t.KeyspaceName = tablet.KeyspaceName() + t.TableName = tablet.TableName() + t.FirstToken = tablet.FirstToken() + t.LastToken = tablet.LastToken() + t.Replicas = []ReplicaMetadata{} + for _, replica := range tablet.Replicas() { + t.Replicas = append(t.Replicas, ReplicaMetadata{replica.hostId, replica.shardId}) + } + + s.tabletsCache.Tablets = append(s.tabletsCache.Tablets, t) + } +} + // clears the already cached keyspace metadata func (s *schemaDescriber) clearSchema(keyspaceName string) { s.mu.Lock() diff --git a/vendor/github.com/gocql/gocql/policies.go b/vendor/github.com/gocql/gocql/policies.go index 6373a2c7c91..70ea00164a8 100644 --- a/vendor/github.com/gocql/gocql/policies.go +++ b/vendor/github.com/gocql/gocql/policies.go @@ -95,6 +95,34 @@ func (c *cowHostList) remove(ip net.IP) bool { return true } +// cowTabletList implements a copy on write tablet list, its equivalent type is []*TabletInfo +// Experimental, this interface and use may change +type cowTabletList struct { + list atomic.Value + mu sync.Mutex +} + +func (c *cowTabletList) get() []*TabletInfo { + l, ok := c.list.Load().(*[]*TabletInfo) + if !ok { + return nil + } + return *l +} + +func (c *cowTabletList) set(tablets []*TabletInfo) { + c.mu.Lock() + defer c.mu.Unlock() + + n := len(tablets) + l := make([]*TabletInfo, n) + for i := 0; i < n; i++ { + l[i] = tablets[i] + } + + c.list.Store(&l) +} + // RetryableQuery is an interface that represents a query or batch statement that // exposes the correct functions for the retry policy logic to evaluate correctly. type RetryableQuery interface { @@ -279,6 +307,8 @@ type HostTierer interface { type HostSelectionPolicy interface { HostStateNotifier SetPartitioner + // Experimental, this interface and use may change + SetTablets KeyspaceChanged(KeyspaceUpdateEvent) Init(*Session) IsLocal(host *HostInfo) bool @@ -331,6 +361,9 @@ func (r *roundRobinHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {} func (r *roundRobinHostPolicy) SetPartitioner(partitioner string) {} func (r *roundRobinHostPolicy) Init(*Session) {} +// Experimental, this interface and use may change +func (r *roundRobinHostPolicy) SetTablets(tablets []*TabletInfo) {} + func (r *roundRobinHostPolicy) Pick(qry ExecutableQuery) NextHost { nextStartOffset := atomic.AddUint64(&r.lastUsedHostIdx, 1) return roundRobbin(int(nextStartOffset), r.hosts.get()) @@ -407,6 +440,9 @@ type tokenAwareHostPolicy struct { metadata atomic.Value // *clusterMeta logger StdLogger + + // Experimental, this interface and use may change + tablets cowTabletList } func (t *tokenAwareHostPolicy) Init(s *Session) { @@ -473,6 +509,14 @@ func (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) { } } +// Experimental, this interface and use may change +func (t *tokenAwareHostPolicy) SetTablets(tablets []*TabletInfo) { + t.mu.Lock() + defer t.mu.Unlock() + + t.tablets.set(tablets) +} + func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) { t.mu.Lock() if t.hosts.add(host) { @@ -589,17 +633,58 @@ func (t *tokenAwareHostPolicy) Pick(qry ExecutableQuery) NextHost { } token := partitioner.Hash(routingKey) - ht := meta.replicas[qry.Keyspace()].replicasFor(token) var replicas []*HostInfo - if ht == nil { - host, _ := meta.tokenRing.GetHostForToken(token) - replicas = []*HostInfo{host} - } else { - replicas = ht.hosts + + if qry.GetSession() != nil && qry.GetSession().tabletsRoutingV1 { + t.tablets.mu.Lock() + tablets := t.tablets.get() + + // Search for tablets with Keyspace and Table from the Query + l, r := findTablets(tablets, qry.Keyspace(), qry.Table()) + if l != -1 { + tablet := findTabletForToken(tablets, token, l, r) + + replicas = []*HostInfo{} + for _, replica := range tablet.Replicas() { + t.hosts.mu.Lock() + hosts := t.hosts.get() + for _, host := range hosts { + if host.hostId == replica.hostId.String() { + replicas = append(replicas, host) + break + } + } + t.hosts.mu.Unlock() + } + } else { + ht := meta.replicas[qry.Keyspace()].replicasFor(token) + + if ht == nil { + host, _ := meta.tokenRing.GetHostForToken(token) + replicas = []*HostInfo{host} + } else { + replicas = ht.hosts + } + } + if t.shuffleReplicas && !qry.IsLWT() { replicas = shuffleHosts(replicas) } + + t.tablets.mu.Unlock() + } else { + ht := meta.replicas[qry.Keyspace()].replicasFor(token) + + if ht == nil { + host, _ := meta.tokenRing.GetHostForToken(token) + replicas = []*HostInfo{host} + } else { + replicas = ht.hosts + if t.shuffleReplicas && !qry.IsLWT() { + replicas = shuffleHosts(replicas) + } + } } var ( @@ -711,6 +796,9 @@ func (r *hostPoolHostPolicy) KeyspaceChanged(KeyspaceUpdateEvent) {} func (r *hostPoolHostPolicy) SetPartitioner(string) {} func (r *hostPoolHostPolicy) IsLocal(*HostInfo) bool { return true } +// Experimental, this interface and use may change +func (r *hostPoolHostPolicy) SetTablets(tablets []*TabletInfo) {} + func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) { peers := make([]string, len(hosts)) hostMap := make(map[string]*HostInfo, len(hosts)) @@ -850,6 +938,9 @@ func (d *dcAwareRR) IsLocal(host *HostInfo) bool { return host.DataCenter() == d.local } +// Experimental, this interface and use may change +func (d *dcAwareRR) SetTablets(tablets []*TabletInfo) {} + func (d *dcAwareRR) AddHost(host *HostInfo) { if d.IsLocal(host) { d.localHosts.add(host) @@ -943,6 +1034,9 @@ func (d *rackAwareRR) MaxHostTier() uint { return 2 } +// Experimental, this interface and use may change +func (d *rackAwareRR) SetTablets(tablets []*TabletInfo) {} + func (d *rackAwareRR) HostTier(host *HostInfo) uint { if host.DataCenter() == d.localDC { if host.Rack() == d.localRack { diff --git a/vendor/github.com/gocql/gocql/query_executor.go b/vendor/github.com/gocql/gocql/query_executor.go index e4dbed9cdc8..f0d4e761f75 100644 --- a/vendor/github.com/gocql/gocql/query_executor.go +++ b/vendor/github.com/gocql/gocql/query_executor.go @@ -23,6 +23,8 @@ type ExecutableQuery interface { withContext(context.Context) ExecutableQuery RetryableQuery + + GetSession() *Session } type queryExecutor struct { @@ -123,7 +125,7 @@ func (q *queryExecutor) do(ctx context.Context, qry ExecutableQuery, hostIter Ne continue } - conn := pool.Pick(selectedHost.Token()) + conn := pool.Pick(selectedHost.Token(), qry.Keyspace(), qry.Table()) if conn == nil { selectedHost = hostIter() continue diff --git a/vendor/github.com/gocql/gocql/ring.go b/vendor/github.com/gocql/gocql/ring.go index 5b77370a160..86970a7669e 100644 --- a/vendor/github.com/gocql/gocql/ring.go +++ b/vendor/github.com/gocql/gocql/ring.go @@ -22,6 +22,9 @@ type ring struct { hostList []*HostInfo pos uint32 + // Experimental, this interface and use may change + tabletList []*TabletInfo + // TODO: we should store the ring metadata here also. } @@ -141,3 +144,11 @@ func (c *clusterMetadata) setPartitioner(partitioner string) { c.partitioner = partitioner } } + +// Experimental, this interface and use may change +func (r *ring) setTablets(newTablets []*TabletInfo) { + r.mu.Lock() + defer r.mu.Unlock() + + r.tabletList = newTablets +} diff --git a/vendor/github.com/gocql/gocql/scylla.go b/vendor/github.com/gocql/gocql/scylla.go index 7790a26eeb1..7dece242a32 100644 --- a/vendor/github.com/gocql/gocql/scylla.go +++ b/vendor/github.com/gocql/gocql/scylla.go @@ -51,8 +51,38 @@ func findCQLProtoExtByName(exts []cqlProtocolExtension, name string) cqlProtocol const ( lwtAddMetadataMarkKey = "SCYLLA_LWT_ADD_METADATA_MARK" rateLimitError = "SCYLLA_RATE_LIMIT_ERROR" + tabletsRoutingV1 = "TABLETS_ROUTING_V1" ) +// "tabletsRoutingV1" CQL Protocol Extension. +// This extension, if enabled (properly negotiated), allows Scylla server +// to send a tablet information in `custom_payload`. +// +// Implements cqlProtocolExtension interface. +type tabletsRoutingV1Ext struct { +} + +var _ cqlProtocolExtension = &tabletsRoutingV1Ext{} + +// Factory function to deserialize and create an `tabletsRoutingV1Ext` instance +// from SUPPORTED message payload. +func newTabletsRoutingV1Ext(supported map[string][]string) *tabletsRoutingV1Ext { + if _, found := supported[tabletsRoutingV1]; found { + return &tabletsRoutingV1Ext{} + } + return nil +} + +func (ext *tabletsRoutingV1Ext) serialize() map[string]string { + return map[string]string{ + tabletsRoutingV1: "", + } +} + +func (ext *tabletsRoutingV1Ext) name() string { + return tabletsRoutingV1 +} + // "Rate limit" CQL Protocol Extension. // This extension, if enabled (properly negotiated), allows Scylla server // to send a special kind of error. @@ -243,6 +273,11 @@ func parseCQLProtocolExtensions(supported map[string][]string) []cqlProtocolExte exts = append(exts, rateLimitExt) } + tabletsExt := newTabletsRoutingV1Ext(supported) + if tabletsExt != nil { + exts = append(exts, tabletsExt) + } + return exts } @@ -265,6 +300,7 @@ func isScyllaConn(conn *Conn) bool { // in a round-robin fashion. type scyllaConnPicker struct { address string + hostId string shardAwareAddress string conns []*Conn excessConns []*Conn @@ -281,6 +317,7 @@ type scyllaConnPicker struct { func newScyllaConnPicker(conn *Conn) *scyllaConnPicker { addr := conn.Address() + hostId := conn.host.hostId if conn.scyllaSupported.nrShards == 0 { panic(fmt.Sprintf("scylla: %s not a sharded connection", addr)) @@ -305,6 +342,7 @@ func newScyllaConnPicker(conn *Conn) *scyllaConnPicker { return &scyllaConnPicker{ address: addr, + hostId: hostId, shardAwareAddress: shardAwareAddress, nrShards: conn.scyllaSupported.nrShards, msbIgnore: conn.scyllaSupported.msbIgnore, @@ -315,7 +353,7 @@ func newScyllaConnPicker(conn *Conn) *scyllaConnPicker { } } -func (p *scyllaConnPicker) Pick(t token) *Conn { +func (p *scyllaConnPicker) Pick(t token, keyspace string, table string) *Conn { if len(p.conns) == 0 { return nil } @@ -330,7 +368,39 @@ func (p *scyllaConnPicker) Pick(t token) *Conn { return nil } - idx := p.shardOf(mmt) + idx := -1 + + for _, conn := range p.conns { + if conn == nil { + continue + } + + conn.mu.Lock() + if conn.tabletsRoutingV1 { + tablets := conn.session.getTablets() + + // Search for tablets with Keyspace and Table from the Query + l, r := findTablets(tablets, keyspace, table) + + if l != -1 { + tablet := findTabletForToken(tablets, mmt, l, r) + + for _, replica := range tablet.replicas { + if replica.hostId.String() == p.hostId { + idx = replica.shardId + } + } + } + } + conn.mu.Unlock() + + break + } + + if idx == -1 { + idx = p.shardOf(mmt) + } + if c := p.conns[idx]; c != nil { // We have this shard's connection // so let's give it to the caller. diff --git a/vendor/github.com/gocql/gocql/session.go b/vendor/github.com/gocql/gocql/session.go index f3058669e3b..6bdfb88732b 100644 --- a/vendor/github.com/gocql/gocql/session.go +++ b/vendor/github.com/gocql/gocql/session.go @@ -83,6 +83,8 @@ type Session struct { isInitialized bool logger StdLogger + + tabletsRoutingV1 bool } var queryPool = &sync.Pool{ @@ -227,6 +229,9 @@ func (s *Session) init() error { if err := s.control.connect(hosts); err != nil { return err } + s.control.getConn().conn.mu.Lock() + s.tabletsRoutingV1 = s.control.getConn().conn.tabletsRoutingV1 + s.control.getConn().conn.mu.Unlock() if !s.cfg.DisableInitialHostLookup { var partitioner string @@ -243,6 +248,12 @@ func (s *Session) init() error { } hosts = filteredHosts + + if s.tabletsRoutingV1 { + tablets := []*TabletInfo{} + s.ring.setTablets(tablets) + s.policy.SetTablets(tablets) + } } } @@ -566,6 +577,19 @@ func (s *Session) KeyspaceMetadata(keyspace string) (*KeyspaceMetadata, error) { return s.schemaDescriber.getSchema(keyspace) } +// TabletsMetadata returns the metadata about tablets +// Experimental, this interface and use may change +func (s *Session) TabletsMetadata() (*TabletsMetadata, error) { + // fail fast + if s.Closed() { + return nil, ErrSessionClosed + } else if !s.tabletsRoutingV1 { + return nil, ErrTabletsNotUsed + } + + return s.schemaDescriber.getTabletsSchema(), nil +} + func (s *Session) getConn() *Conn { hosts := s.ring.allHosts() for _, host := range hosts { @@ -576,7 +600,7 @@ func (s *Session) getConn() *Conn { pool, ok := s.pool.getPool(host) if !ok { continue - } else if conn := pool.Pick(nil); conn != nil { + } else if conn := pool.Pick(nil, "", ""); conn != nil { return conn } } @@ -584,6 +608,14 @@ func (s *Session) getConn() *Conn { return nil } +// Experimental, this interface and use may change +func (s *Session) getTablets() []*TabletInfo { + s.ring.mu.Lock() + defer s.ring.mu.Unlock() + + return s.ring.tabletList +} + // returns routing key indexes and type info func (s *Session) routingKeyInfo(ctx context.Context, stmt string) (*routingKeyInfo, error) { s.routingKeyInfoCache.mu.Lock() @@ -1183,6 +1215,10 @@ func (q *Query) Table() string { return q.routingInfo.table } +func (q *Query) GetSession() *Session { + return q.session +} + // GetRoutingKey gets the routing key to use for routing this query. If // a routing key has not been explicitly set, then the routing key will // be constructed if possible using the keyspace's schema and the query @@ -1843,6 +1879,10 @@ func (b *Batch) Table() string { return b.routingInfo.table } +func (b *Batch) GetSession() *Session { + return b.session +} + // Attempts returns the number of attempts made to execute the batch. func (b *Batch) Attempts() int { return b.metrics.attempts() @@ -2347,6 +2387,7 @@ var ( ErrNoKeyspace = errors.New("no keyspace provided") ErrKeyspaceDoesNotExist = errors.New("keyspace does not exist") ErrNoMetadata = errors.New("no metadata available") + ErrTabletsNotUsed = errors.New("tablets not used") ) type ErrProtocol struct{ error } diff --git a/vendor/github.com/golang-jwt/jwt/v4/.gitignore b/vendor/github.com/golang-jwt/jwt/v4/.gitignore new file mode 100644 index 00000000000..09573e0169c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin +.idea/ + diff --git a/vendor/github.com/golang-jwt/jwt/v4/LICENSE b/vendor/github.com/golang-jwt/jwt/v4/LICENSE new file mode 100644 index 00000000000..35dbc252041 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md new file mode 100644 index 00000000000..32966f59818 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md @@ -0,0 +1,22 @@ +## Migration Guide (v4.0.0) + +Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be: + + "github.com/golang-jwt/jwt/v4" + +The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as +`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having +troubles migrating, please open an issue. + +You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`. + +And then you'd typically run: + +``` +go get github.com/golang-jwt/jwt/v4 +go mod tidy +``` + +## Older releases (before v3.2.0) + +The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md new file mode 100644 index 00000000000..30f2f2a6f70 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -0,0 +1,138 @@ +# jwt-go + +[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v4.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). + +Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. +See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. + +> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. + + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +### Supported Go versions + +Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). +So we will support a major version of Go until there are two newer major releases. +We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities +which will not be fixed. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Installation Guidelines + +1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program. + +```sh +go get -u github.com/golang-jwt/jwt/v4 +``` + +2. Import it in your code: + +```go +import "github.com/golang-jwt/jwt/v4" +``` + +## Examples + +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage: + +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`. + +A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards. + +| Extension | Purpose | Repo | +| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ | +| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go | +| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms | +| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc | + +*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers + +## Compliance + +This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). + +**BREAKING CHANGES:*** +A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. The companion project https://github.com/golang-jwt/jwe aims at a (very) experimental implementation of the JWE standard. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt/v4#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v4). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. + +[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt). diff --git a/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md new file mode 100644 index 00000000000..b08402c3427 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +As of February 2022 (and until this document is updated), the latest version `v4` is supported. + +## Reporting a Vulnerability + +If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s). + +You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem. + +## Public Discussions + +Please avoid publicly discussing a potential security vulnerability. + +Let's take this offline and find a solution first, this limits the potential impact as much as possible. + +We appreciate your help! diff --git a/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md new file mode 100644 index 00000000000..afbfc4e408d --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md @@ -0,0 +1,135 @@ +## `jwt-go` Version History + +#### 4.0.0 + +* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`. + +#### 3.2.2 + +* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). +* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). +* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). +* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). + +#### 3.2.1 + +* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code + * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` +* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go new file mode 100644 index 00000000000..364cec8773c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -0,0 +1,269 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// Claims must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// RegisteredClaims are a structured version of the JWT Claims Set, +// restricted to Registered Claim Names, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 +// +// This type can be used on its own, but then additional private and +// public claims embedded in the JWT will not be parsed. The typical usecase +// therefore is to embedded this in a user-defined claim type. +// +// See examples for how to use this with your own claim types. +type RegisteredClaims struct { + // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1 + Issuer string `json:"iss,omitempty"` + + // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 + Subject string `json:"sub,omitempty"` + + // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3 + Audience ClaimStrings `json:"aud,omitempty"` + + // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 + ExpiresAt *NumericDate `json:"exp,omitempty"` + + // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5 + NotBefore *NumericDate `json:"nbf,omitempty"` + + // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6 + IssuedAt *NumericDate `json:"iat,omitempty"` + + // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7 + ID string `json:"jti,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c RegisteredClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := now.Sub(c.ExpiresAt.Time) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = ErrTokenUsedBeforeIssued + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = ErrTokenNotValidYet + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). +// If req is false, it will return true, if exp is unset. +func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool { + if c.ExpiresAt == nil { + return verifyExp(nil, cmp, req) + } + + return verifyExp(&c.ExpiresAt.Time, cmp, req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool { + if c.IssuedAt == nil { + return verifyIat(nil, cmp, req) + } + + return verifyIat(&c.IssuedAt.Time, cmp, req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool { + if c.NotBefore == nil { + return verifyNbf(nil, cmp, req) + } + + return verifyNbf(&c.NotBefore.Time, cmp, req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// StandardClaims are a structured version of the JWT Claims Set, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the +// specification exactly, since they were based on an earlier draft of the +// specification and not updated. The main difference is that they only +// support integer-based date fields and singular audiences. This might lead to +// incompatibilities with other JWT implementations. The use of this is discouraged, instead +// the newer RegisteredClaims struct should be used. +// +// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct. +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = ErrTokenUsedBeforeIssued + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = ErrTokenNotValidYet + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud([]string{c.Audience}, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp < exp). +// If req is false, it will return true, if exp is unset. +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + if c.ExpiresAt == 0 { + return verifyExp(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.ExpiresAt, 0) + return verifyExp(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + if c.IssuedAt == 0 { + return verifyIat(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.IssuedAt, 0) + return verifyIat(&t, time.Unix(cmp, 0), req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + if c.NotBefore == 0 { + return verifyNbf(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.NotBefore, 0) + return verifyNbf(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + // use a var here to keep constant time compare when looping over a number of claims + result := false + + var stringClaims string + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + result = true + } + stringClaims = stringClaims + a + } + + // case where "" is sent in one or many aud claims + if len(stringClaims) == 0 { + return !required + } + + return result +} + +func verifyExp(exp *time.Time, now time.Time, required bool) bool { + if exp == nil { + return !required + } + return now.Before(*exp) +} + +func verifyIat(iat *time.Time, now time.Time, required bool) bool { + if iat == nil { + return !required + } + return now.After(*iat) || now.Equal(*iat) +} + +func verifyNbf(nbf *time.Time, now time.Time, required bool) bool { + if nbf == nil { + return !required + } + return now.After(*nbf) || now.Equal(*nbf) +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + return subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/doc.go b/vendor/github.com/golang-jwt/jwt/v4/doc.go new file mode 100644 index 00000000000..a86dc1a3b34 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go new file mode 100644 index 00000000000..eac023fc6c8 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go @@ -0,0 +1,142 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// SigningMethodECDSA implements the ECDSA family of signing methods. +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { + return nil + } + + return ErrECDSAVerification +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outputs (r and s) into big-endian byte arrays + // padded with zeros on the left to make sure the sizes work out. + // Output must be 2*keyBytes long. + out := make([]byte, 2*keyBytes) + r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. + s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go new file mode 100644 index 00000000000..5700636d35b --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") +) + +// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go new file mode 100644 index 00000000000..07d3aacd631 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go @@ -0,0 +1,85 @@ +package jwt + +import ( + "errors" + + "crypto" + "crypto/ed25519" + "crypto/rand" +) + +var ( + ErrEd25519Verification = errors.New("ed25519: verification error") +) + +// SigningMethodEd25519 implements the EdDSA family. +// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification +type SigningMethodEd25519 struct{} + +// Specific instance for EdDSA +var ( + SigningMethodEdDSA *SigningMethodEd25519 +) + +func init() { + SigningMethodEdDSA = &SigningMethodEd25519{} + RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { + return SigningMethodEdDSA + }) +} + +func (m *SigningMethodEd25519) Alg() string { + return "EdDSA" +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ed25519.PublicKey +func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error { + var err error + var ed25519Key ed25519.PublicKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PublicKey); !ok { + return ErrInvalidKeyType + } + + if len(ed25519Key) != ed25519.PublicKeySize { + return ErrInvalidKey + } + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Verify the signature + if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { + return ErrEd25519Verification + } + + return nil +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ed25519.PrivateKey +func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { + var ed25519Key crypto.Signer + var ok bool + + if ed25519Key, ok = key.(crypto.Signer); !ok { + return "", ErrInvalidKeyType + } + + if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { + return "", ErrInvalidKey + } + + // Sign the string and return the encoded result + // ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0) + sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0)) + if err != nil { + return "", err + } + return EncodeSegment(sig), nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go new file mode 100644 index 00000000000..cdb5e68e876 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go @@ -0,0 +1,64 @@ +package jwt + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key") + ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key") +) + +// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key +func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PrivateKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { + return nil, ErrNotEdPrivateKey + } + + return pkey, nil +} + +// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key +func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PublicKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { + return nil, ErrNotEdPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go new file mode 100644 index 00000000000..10ac8835cc8 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go @@ -0,0 +1,112 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") + + ErrTokenMalformed = errors.New("token is malformed") + ErrTokenUnverifiable = errors.New("token is unverifiable") + ErrTokenSignatureInvalid = errors.New("token signature is invalid") + + ErrTokenInvalidAudience = errors.New("token has invalid audience") + ErrTokenExpired = errors.New("token is expired") + ErrTokenUsedBeforeIssued = errors.New("token used before issued") + ErrTokenInvalidIssuer = errors.New("token has invalid issuer") + ErrTokenNotValidYet = errors.New("token is not valid yet") + ErrTokenInvalidId = errors.New("token has invalid id") + ErrTokenInvalidClaims = errors.New("token has invalid claims") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// NewValidationError is a helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// ValidationError represents an error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Error is the implementation of the err interface. +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// Unwrap gives errors.Is and errors.As access to the inner error. +func (e *ValidationError) Unwrap() error { + return e.Inner +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} + +// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message +// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use +// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables. +func (e *ValidationError) Is(err error) bool { + // Check, if our inner error is a direct match + if errors.Is(errors.Unwrap(e), err) { + return true + } + + // Otherwise, we need to match using our error flags + switch err { + case ErrTokenMalformed: + return e.Errors&ValidationErrorMalformed != 0 + case ErrTokenUnverifiable: + return e.Errors&ValidationErrorUnverifiable != 0 + case ErrTokenSignatureInvalid: + return e.Errors&ValidationErrorSignatureInvalid != 0 + case ErrTokenInvalidAudience: + return e.Errors&ValidationErrorAudience != 0 + case ErrTokenExpired: + return e.Errors&ValidationErrorExpired != 0 + case ErrTokenUsedBeforeIssued: + return e.Errors&ValidationErrorIssuedAt != 0 + case ErrTokenInvalidIssuer: + return e.Errors&ValidationErrorIssuer != 0 + case ErrTokenNotValidYet: + return e.Errors&ValidationErrorNotValidYet != 0 + case ErrTokenInvalidId: + return e.Errors&ValidationErrorId != 0 + case ErrTokenInvalidClaims: + return e.Errors&ValidationErrorClaimsInvalid != 0 + } + + return false +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go new file mode 100644 index 00000000000..011f68a2744 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// SigningMethodHMAC implements the HMAC-SHA family of signing methods. +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Sign implements token signing for the SigningMethod. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go new file mode 100644 index 00000000000..2700d64a0d0 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go @@ -0,0 +1,151 @@ +package jwt + +import ( + "encoding/json" + "errors" + "time" + // "fmt" +) + +// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding. +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// VerifyAudience Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + var aud []string + switch v := m["aud"].(type) { + case string: + aud = append(aud, v) + case []string: + aud = v + case []interface{}: + for _, a := range v { + vs, ok := a.(string) + if !ok { + return false + } + aud = append(aud, vs) + } + } + return verifyAud(aud, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["exp"] + if !ok { + return !req + } + + switch exp := v.(type) { + case float64: + if exp == 0 { + return verifyExp(nil, cmpTime, req) + } + + return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req) + case json.Number: + v, _ := exp.Float64() + + return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["iat"] + if !ok { + return !req + } + + switch iat := v.(type) { + case float64: + if iat == 0 { + return verifyIat(nil, cmpTime, req) + } + + return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req) + case json.Number: + v, _ := iat.Float64() + + return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["nbf"] + if !ok { + return !req + } + + switch nbf := v.(type) { + case float64: + if nbf == 0 { + return verifyNbf(nil, cmpTime, req) + } + + return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req) + case json.Number: + v, _ := nbf.Float64() + + return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if !m.VerifyExpiresAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenExpired + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if !m.VerifyIssuedAt(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !m.VerifyNotBefore(now, false) { + // TODO(oxisto): this should be replaced with ErrTokenNotValidYet + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/none.go b/vendor/github.com/golang-jwt/jwt/v4/none.go new file mode 100644 index 00000000000..f19835d2078 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/none.go @@ -0,0 +1,52 @@ +package jwt + +// SigningMethodNone implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go new file mode 100644 index 00000000000..c0a6f692791 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -0,0 +1,177 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + // If populated, only these methods will be considered valid. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + ValidMethods []string + + // Use JSON Number format in JSON decoder. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + UseJSONNumber bool + + // Skip claims validation during token parsing. + // + // Deprecated: In future releases, this field will not be exported anymore and should be set with an option to NewParser instead. + SkipClaimsValidation bool +} + +// NewParser creates a new Parser with the specified options +func NewParser(options ...ParserOption) *Parser { + p := &Parser{} + + // loop through our parsing options and apply them + for _, option := range options { + option(p) + } + + return p +} + +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the key for validating. +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims +// interface. This provides default values which can be overridden and allows a caller to use their own type, rather +// than the default MapClaims implementation of Claims. +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// ParseUnverified parses the token but doesn't validate the signature. +// +// WARNING: Don't use this method unless you know what you're doing. +// +// It's only ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser_option.go b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go new file mode 100644 index 00000000000..6ea6f9527de --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/parser_option.go @@ -0,0 +1,29 @@ +package jwt + +// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add +// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that +// takes a *Parser type as input and manipulates its configuration accordingly. +type ParserOption func(*Parser) + +// WithValidMethods is an option to supply algorithm methods that the parser will check. Only those methods will be considered valid. +// It is heavily encouraged to use this option in order to prevent attacks such as https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/. +func WithValidMethods(methods []string) ParserOption { + return func(p *Parser) { + p.ValidMethods = methods + } +} + +// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber +func WithJSONNumber() ParserOption { + return func(p *Parser) { + p.UseJSONNumber = true + } +} + +// WithoutClaimsValidation is an option to disable claims validation. This option should only be used if you exactly know +// what you are doing. +func WithoutClaimsValidation() ParserOption { + return func(p *Parser) { + p.SkipClaimsValidation = true + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go new file mode 100644 index 00000000000..b910b19c0b5 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// SigningMethodRSA implements the RSA family of signing methods. +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Sign implements token signing for the SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go new file mode 100644 index 00000000000..4fd6f9e610b --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go @@ -0,0 +1,143 @@ +//go:build go1.4 +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go new file mode 100644 index 00000000000..1966c450bf8 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go @@ -0,0 +1,105 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") +) + +// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password +// +// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock +// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative +// in the Go standard library for now. See https://github.com/golang/go/issues/8860. +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go new file mode 100644 index 00000000000..241ae9c60d0 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go @@ -0,0 +1,46 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// SigningMethod can be used add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// RegisterSigningMethod registers the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// GetSigningMethod retrieves a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} + +// GetAlgorithms returns a list of registered "alg" names +func GetAlgorithms() (algs []string) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + for alg := range signingMethods { + algs = append(algs, alg) + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf new file mode 100644 index 00000000000..53745d51d7c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"] diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go new file mode 100644 index 00000000000..786b275ce03 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -0,0 +1,143 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515 +// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations +// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global +// variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use the non-recommended decoding, set this boolean to `true` prior to using this package. +var DecodePaddingAllowed bool + +// DecodeStrict will switch the codec used for decoding JWTs into strict mode. +// In this mode, the decoder requires that trailing padding bits are zero, as described in RFC 4648 section 3.5. +// Note that this is a global variable, and updating it will change the behavior on a package level, and is also NOT go-routine safe. +// To use strict decoding, set this boolean to `true` prior to using this package. +var DecodeStrict bool + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Keyfunc will be used by the Parse methods as a callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// Token represents a JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// New creates a new Token with the specified signing method and an empty map of claims. +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +// NewWithClaims creates a new Token with the specified signing method and claims. +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// SignedString creates and returns a complete, signed JWT. +// The token is signed using the SigningMethod specified in the token. +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// SigningString generates the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + var jsonValue []byte + + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + header := EncodeSegment(jsonValue) + + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + claim := EncodeSegment(jsonValue) + + return strings.Join([]string{header, claim}, "."), nil +} + +// Parse parses, validates, verifies the signature and returns the parsed token. +// keyFunc will receive the parsed token and should return the cryptographic key +// for verifying the signature. +// The caller is strongly encouraged to set the WithValidMethods option to +// validate the 'alg' claim in the token matches the expected algorithm. +// For more details about the importance of validating the 'alg' claim, +// see https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ +func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).Parse(tokenString, keyFunc) +} + +// ParseWithClaims is a shortcut for NewParser().ParseWithClaims(). +// +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), +// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the +// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) { + return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc) +} + +// EncodeSegment encodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally +func EncodeSegment(seg []byte) string { + return base64.RawURLEncoding.EncodeToString(seg) +} + +// DecodeSegment decodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally +func DecodeSegment(seg string) ([]byte, error) { + encoding := base64.RawURLEncoding + + if DecodePaddingAllowed { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + encoding = base64.URLEncoding + } + + if DecodeStrict { + encoding = encoding.Strict() + } + return encoding.DecodeString(seg) +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go new file mode 100644 index 00000000000..ac8e140eb11 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/types.go @@ -0,0 +1,145 @@ +package jwt + +import ( + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" + "time" +) + +// TimePrecision sets the precision of times and dates within this library. +// This has an influence on the precision of times when comparing expiry or +// other related time fields. Furthermore, it is also the precision of times +// when serializing. +// +// For backwards compatibility the default precision is set to seconds, so that +// no fractional timestamps are generated. +var TimePrecision = time.Second + +// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially +// its MarshalJSON function. +// +// If it is set to true (the default), it will always serialize the type as an +// array of strings, even if it just contains one element, defaulting to the behaviour +// of the underlying []string. If it is set to false, it will serialize to a single +// string, if it contains one element. Otherwise, it will serialize to an array of strings. +var MarshalSingleStringAsArray = true + +// NumericDate represents a JSON numeric date value, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-2. +type NumericDate struct { + time.Time +} + +// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct. +// It will truncate the timestamp according to the precision specified in TimePrecision. +func NewNumericDate(t time.Time) *NumericDate { + return &NumericDate{t.Truncate(TimePrecision)} +} + +// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a +// UNIX epoch with the float fraction representing non-integer seconds. +func newNumericDateFromSeconds(f float64) *NumericDate { + round, frac := math.Modf(f) + return NewNumericDate(time.Unix(int64(round), int64(frac*1e9))) +} + +// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch +// represented in NumericDate to a byte array, using the precision specified in TimePrecision. +func (date NumericDate) MarshalJSON() (b []byte, err error) { + var prec int + if TimePrecision < time.Second { + prec = int(math.Log10(float64(time.Second) / float64(TimePrecision))) + } + truncatedDate := date.Truncate(TimePrecision) + + // For very large timestamps, UnixNano would overflow an int64, but this + // function requires nanosecond level precision, so we have to use the + // following technique to get round the issue: + // 1. Take the normal unix timestamp to form the whole number part of the + // output, + // 2. Take the result of the Nanosecond function, which retuns the offset + // within the second of the particular unix time instance, to form the + // decimal part of the output + // 3. Concatenate them to produce the final result + seconds := strconv.FormatInt(truncatedDate.Unix(), 10) + nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64) + + output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...) + + return output, nil +} + +// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a +// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch +// with either integer or non-integer seconds. +func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { + var ( + number json.Number + f float64 + ) + + if err = json.Unmarshal(b, &number); err != nil { + return fmt.Errorf("could not parse NumericData: %w", err) + } + + if f, err = number.Float64(); err != nil { + return fmt.Errorf("could not convert json number value to float: %w", err) + } + + n := newNumericDateFromSeconds(f) + *date = *n + + return nil +} + +// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string. +// This type is necessary, since the "aud" claim can either be a single string or an array. +type ClaimStrings []string + +func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { + var value interface{} + + if err = json.Unmarshal(data, &value); err != nil { + return err + } + + var aud []string + + switch v := value.(type) { + case string: + aud = append(aud, v) + case []string: + aud = ClaimStrings(v) + case []interface{}: + for _, vv := range v { + vs, ok := vv.(string) + if !ok { + return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} + } + aud = append(aud, vs) + } + case nil: + return nil + default: + return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } + + *s = aud + + return +} + +func (s ClaimStrings) MarshalJSON() (b []byte, err error) { + // This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field, + // only contains one element, it MAY be serialized as a single string. This may or may not be + // desired based on the ecosystem of other JWT library used, so we make it configurable by the + // variable MarshalSingleStringAsArray. + if len(s) == 1 && !MarshalSingleStringAsArray { + return json.Marshal(s[0]) + } + + return json.Marshal([]string(s)) +} diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json new file mode 100644 index 00000000000..feb372228b4 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + "v2": "2.12.3" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md new file mode 100644 index 00000000000..0d019d97fd3 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -0,0 +1,128 @@ +# Changelog + +## [2.12.3](https://github.com/googleapis/gax-go/compare/v2.12.2...v2.12.3) (2024-03-14) + + +### Bug Fixes + +* bump protobuf dep to v1.33 ([#333](https://github.com/googleapis/gax-go/issues/333)) ([2892b22](https://github.com/googleapis/gax-go/commit/2892b22c1ae8a70dec3448d82e634643fe6c1be2)) + +## [2.12.2](https://github.com/googleapis/gax-go/compare/v2.12.1...v2.12.2) (2024-02-23) + + +### Bug Fixes + +* **v2/callctx:** fix SetHeader race by cloning header map ([#326](https://github.com/googleapis/gax-go/issues/326)) ([534311f](https://github.com/googleapis/gax-go/commit/534311f0f163d101f30657736c0e6f860e9c39dc)) + +## [2.12.1](https://github.com/googleapis/gax-go/compare/v2.12.0...v2.12.1) (2024-02-13) + + +### Bug Fixes + +* add XGoogFieldMaskHeader constant ([#321](https://github.com/googleapis/gax-go/issues/321)) ([666ee08](https://github.com/googleapis/gax-go/commit/666ee08931041b7fed56bed7132649785b2d3dfe)) + +## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26) + + +### Features + +* **v2/callctx:** add new callctx package ([#291](https://github.com/googleapis/gax-go/issues/291)) ([11503ed](https://github.com/googleapis/gax-go/commit/11503ed98df4ae1bbdedf91ff64d47e63f187d68)) +* **v2:** add BuildHeaders and InsertMetadataIntoOutgoingContext to header ([#290](https://github.com/googleapis/gax-go/issues/290)) ([6a4b89f](https://github.com/googleapis/gax-go/commit/6a4b89f5551a40262e7c3caf2e1bdc7321b76ea1)) + +## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13) + + +### Features + +* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5)) + + +### Bug Fixes + +* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75)) + +## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30) + + +### Features + +* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21)) + +## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23) + + +### Bug Fixes + +* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270) + +## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22) + + +### Features + +* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229) + + +### Documentation + +* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228) + +## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15) + + +### Features + +* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1)) + +## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06) + + +### Bug Fixes + +* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254) + +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + +## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) + + +### Features + +* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) + +## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) + + +### Bug Fixes + +* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) + +## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) + + +### Features + +* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) + +## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) + + +### Features + +* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c)) + + +### Bug Fixes + +* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e)) + + +### Miscellaneous Chores + +* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719)) diff --git a/vendor/github.com/googleapis/gax-go/v2/LICENSE b/vendor/github.com/googleapis/gax-go/v2/LICENSE new file mode 100644 index 00000000000..6d16b6578a2 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go new file mode 100644 index 00000000000..d785a065cab --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -0,0 +1,361 @@ +// Copyright 2021, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package apierror implements a wrapper error for parsing error details from +// API calls. Both HTTP & gRPC status errors are supported. +// +// For examples of how to use [APIError] with client libraries please reference +// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors) +// in the client library documentation. +package apierror + +import ( + "errors" + "fmt" + "strings" + + jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +// ErrDetails holds the google/rpc/error_details.proto messages. +type ErrDetails struct { + ErrorInfo *errdetails.ErrorInfo + BadRequest *errdetails.BadRequest + PreconditionFailure *errdetails.PreconditionFailure + QuotaFailure *errdetails.QuotaFailure + RetryInfo *errdetails.RetryInfo + ResourceInfo *errdetails.ResourceInfo + RequestInfo *errdetails.RequestInfo + DebugInfo *errdetails.DebugInfo + Help *errdetails.Help + LocalizedMessage *errdetails.LocalizedMessage + + // Unknown stores unidentifiable error details. + Unknown []interface{} +} + +// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. +var ErrMessageNotFound = errors.New("message not found") + +// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the +// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, +// the content of the message is copied to the provided message. +// +// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the +// protocol buffer type of the provided message. +func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { + if v == nil { + return ErrMessageNotFound + } + for _, elem := range e.Unknown { + if elemProto, ok := elem.(proto.Message); ok { + if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { + proto.Merge(v, elemProto) + return nil + } + } + } + return ErrMessageNotFound +} + +func (e ErrDetails) String() string { + var d strings.Builder + if e.ErrorInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = ErrorInfo reason = %s domain = %s metadata = %s\n", + e.ErrorInfo.GetReason(), e.ErrorInfo.GetDomain(), e.ErrorInfo.GetMetadata())) + } + + if e.BadRequest != nil { + v := e.BadRequest.GetFieldViolations() + var f []string + var desc []string + for _, x := range v { + f = append(f, x.GetField()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = BadRequest field = %s desc = %s\n", + strings.Join(f, " "), strings.Join(desc, " "))) + } + + if e.PreconditionFailure != nil { + v := e.PreconditionFailure.GetViolations() + var t []string + var s []string + var desc []string + for _, x := range v { + t = append(t, x.GetType()) + s = append(s, x.GetSubject()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = PreconditionFailure type = %s subj = %s desc = %s\n", strings.Join(t, " "), + strings.Join(s, " "), strings.Join(desc, " "))) + } + + if e.QuotaFailure != nil { + v := e.QuotaFailure.GetViolations() + var s []string + var desc []string + for _, x := range v { + s = append(s, x.GetSubject()) + desc = append(desc, x.GetDescription()) + } + d.WriteString(fmt.Sprintf("error details: name = QuotaFailure subj = %s desc = %s\n", + strings.Join(s, " "), strings.Join(desc, " "))) + } + + if e.RequestInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = RequestInfo id = %s data = %s\n", + e.RequestInfo.GetRequestId(), e.RequestInfo.GetServingData())) + } + + if e.ResourceInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = ResourceInfo type = %s resourcename = %s owner = %s desc = %s\n", + e.ResourceInfo.GetResourceType(), e.ResourceInfo.GetResourceName(), + e.ResourceInfo.GetOwner(), e.ResourceInfo.GetDescription())) + + } + if e.RetryInfo != nil { + d.WriteString(fmt.Sprintf("error details: retry in %s\n", e.RetryInfo.GetRetryDelay().AsDuration())) + + } + if e.Unknown != nil { + var s []string + for _, x := range e.Unknown { + s = append(s, fmt.Sprintf("%v", x)) + } + d.WriteString(fmt.Sprintf("error details: name = Unknown desc = %s\n", strings.Join(s, " "))) + } + + if e.DebugInfo != nil { + d.WriteString(fmt.Sprintf("error details: name = DebugInfo detail = %s stack = %s\n", e.DebugInfo.GetDetail(), + strings.Join(e.DebugInfo.GetStackEntries(), " "))) + } + if e.Help != nil { + var desc []string + var url []string + for _, x := range e.Help.Links { + desc = append(desc, x.GetDescription()) + url = append(url, x.GetUrl()) + } + d.WriteString(fmt.Sprintf("error details: name = Help desc = %s url = %s\n", + strings.Join(desc, " "), strings.Join(url, " "))) + } + if e.LocalizedMessage != nil { + d.WriteString(fmt.Sprintf("error details: name = LocalizedMessage locale = %s msg = %s\n", + e.LocalizedMessage.GetLocale(), e.LocalizedMessage.GetMessage())) + } + + return d.String() +} + +// APIError wraps either a gRPC Status error or a HTTP googleapi.Error. It +// implements error and Status interfaces. +type APIError struct { + err error + status *status.Status + httpErr *googleapi.Error + details ErrDetails +} + +// Details presents the error details of the APIError. +func (a *APIError) Details() ErrDetails { + return a.details +} + +// Unwrap extracts the original error. +func (a *APIError) Unwrap() error { + return a.err +} + +// Error returns a readable representation of the APIError. +func (a *APIError) Error() string { + var msg string + if a.httpErr != nil { + // Truncate the googleapi.Error message because it dumps the Details in + // an ugly way. + msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message) + } else if a.status != nil { + msg = a.err.Error() + } + return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details)) +} + +// GRPCStatus extracts the underlying gRPC Status error. +// This method is necessary to fulfill the interface +// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError. +func (a *APIError) GRPCStatus() *status.Status { + return a.status +} + +// Reason returns the reason in an ErrorInfo. +// If ErrorInfo is nil, it returns an empty string. +func (a *APIError) Reason() string { + return a.details.ErrorInfo.GetReason() +} + +// Domain returns the domain in an ErrorInfo. +// If ErrorInfo is nil, it returns an empty string. +func (a *APIError) Domain() string { + return a.details.ErrorInfo.GetDomain() +} + +// Metadata returns the metadata in an ErrorInfo. +// If ErrorInfo is nil, it returns nil. +func (a *APIError) Metadata() map[string]string { + return a.details.ErrorInfo.GetMetadata() + +} + +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +// When err is a googleapi.Error, the status of the returned error will +// be set to an Unknown error, rather than nil, since a nil code is +// interpreted as OK in the gRPC status package. +func (a *APIError) setDetailsFromError(err error) bool { + st, isStatus := status.FromError(err) + var herr *googleapi.Error + isHTTPErr := errors.As(err, &herr) + + switch { + case isStatus: + a.status = st + a.details = parseDetails(st.Details()) + case isHTTPErr: + a.httpErr = herr + a.details = parseHTTPDetails(herr) + a.status = status.New(codes.Unknown, herr.Message) + default: + return false + } + return true +} + +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} + +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true +} + +// parseDetails accepts a slice of interface{} that should be backed by some +// sort of proto.Message that can be cast to the google/rpc/error_details.proto +// types. +// +// This is for internal use only. +func parseDetails(details []interface{}) ErrDetails { + var ed ErrDetails + for _, d := range details { + switch d := d.(type) { + case *errdetails.ErrorInfo: + ed.ErrorInfo = d + case *errdetails.BadRequest: + ed.BadRequest = d + case *errdetails.PreconditionFailure: + ed.PreconditionFailure = d + case *errdetails.QuotaFailure: + ed.QuotaFailure = d + case *errdetails.RetryInfo: + ed.RetryInfo = d + case *errdetails.ResourceInfo: + ed.ResourceInfo = d + case *errdetails.RequestInfo: + ed.RequestInfo = d + case *errdetails.DebugInfo: + ed.DebugInfo = d + case *errdetails.Help: + ed.Help = d + case *errdetails.LocalizedMessage: + ed.LocalizedMessage = d + default: + ed.Unknown = append(ed.Unknown, d) + } + } + + return ed +} + +// parseHTTPDetails will convert the given googleapi.Error into the protobuf +// representation then parse the Any values that contain the error details. +// +// This is for internal use only. +func parseHTTPDetails(gae *googleapi.Error) ErrDetails { + e := &jsonerror.Error{} + if err := protojson.Unmarshal([]byte(gae.Body), e); err != nil { + // If the error body does not conform to the error schema, ignore it + // altogther. See https://cloud.google.com/apis/design/errors#http_mapping. + return ErrDetails{} + } + + // Coerce the Any messages into proto.Message then parse the details. + details := []interface{}{} + for _, any := range e.GetError().GetDetails() { + m, err := any.UnmarshalNew() + if err != nil { + // Ignore malformed Any values. + continue + } + details = append(details, m) + } + + return parseDetails(details) +} + +// HTTPCode returns the underlying HTTP response status code. This method returns +// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To +// check gRPC error codes use [google.golang.org/grpc/status.Code]. +func (a *APIError) HTTPCode() int { + if a.httpErr == nil { + return -1 + } + return a.httpErr.Code +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md new file mode 100644 index 00000000000..9ff0caea946 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md @@ -0,0 +1,30 @@ +# HTTP JSON Error Schema + +The `error.proto` represents the HTTP-JSON schema used by Google APIs to convey +error payloads as described by https://cloud.google.com/apis/design/errors#http_mapping. +This package is for internal parsing logic only and should not be used in any +other context. + +## Regeneration + +To regenerate the protobuf Go code you will need the following: + +* A local copy of [googleapis], the absolute path to which should be exported to +the environment variable `GOOGLEAPIS` +* The protobuf compiler [protoc] +* The Go [protobuf plugin] +* The [goimports] tool + +From this directory run the following command: +```sh +protoc -I $GOOGLEAPIS -I. --go_out=. --go_opt=module=github.com/googleapis/gax-go/v2/apierror/internal/proto error.proto +goimports -w . +``` + +Note: the `module` plugin option ensures the generated code is placed in this +directory, and not in several nested directories defined by `go_package` option. + +[googleapis]: https://github.com/googleapis/googleapis +[protoc]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation +[protobuf plugin]: https://developers.google.com/protocol-buffers/docs/reference/go-generated +[goimports]: https://pkg.go.dev/golang.org/x/tools/cmd/goimports \ No newline at end of file diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go new file mode 100644 index 00000000000..e4b03f161d8 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go @@ -0,0 +1,256 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.17.3 +// source: custom_error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Error code for `CustomError`. +type CustomError_CustomErrorCode int32 + +const ( + // Default error. + CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 + // Too many foo. + CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 + // Not enough foo. + CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 + // Catastrophic error. + CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 +) + +// Enum value maps for CustomError_CustomErrorCode. +var ( + CustomError_CustomErrorCode_name = map[int32]string{ + 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", + 1: "TOO_MANY_FOO", + 2: "NOT_ENOUGH_FOO", + 3: "UNIVERSE_WAS_DESTROYED", + } + CustomError_CustomErrorCode_value = map[string]int32{ + "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, + "TOO_MANY_FOO": 1, + "NOT_ENOUGH_FOO": 2, + "UNIVERSE_WAS_DESTROYED": 3, + } +) + +func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { + p := new(CustomError_CustomErrorCode) + *p = x + return p +} + +func (x CustomError_CustomErrorCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { + return file_custom_error_proto_enumTypes[0].Descriptor() +} + +func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { + return &file_custom_error_proto_enumTypes[0] +} + +func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. +func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0, 0} +} + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +type CustomError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error code specific to the custom API being invoked. + Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` + // Name of the failed entity. + Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + // Message that describes the error. + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CustomError) Reset() { + *x = CustomError{} + if protoimpl.UnsafeEnabled { + mi := &file_custom_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomError) ProtoMessage() {} + +func (x *CustomError) ProtoReflect() protoreflect.Message { + mi := &file_custom_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. +func (*CustomError) Descriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomError) GetCode() CustomError_CustomErrorCode { + if x != nil { + return x.Code + } + return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED +} + +func (x *CustomError) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *CustomError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_custom_error_proto protoreflect.FileDescriptor + +var file_custom_error_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, + 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, + 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, + 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, + 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_custom_error_proto_rawDescOnce sync.Once + file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc +) + +func file_custom_error_proto_rawDescGZIP() []byte { + file_custom_error_proto_rawDescOnce.Do(func() { + file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) + }) + return file_custom_error_proto_rawDescData +} + +var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_custom_error_proto_goTypes = []interface{}{ + (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode + (*CustomError)(nil), // 1: error.CustomError +} +var file_custom_error_proto_depIdxs = []int32{ + 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_custom_error_proto_init() } +func file_custom_error_proto_init() { + if File_custom_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_custom_error_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_custom_error_proto_goTypes, + DependencyIndexes: file_custom_error_proto_depIdxs, + EnumInfos: file_custom_error_proto_enumTypes, + MessageInfos: file_custom_error_proto_msgTypes, + }.Build() + File_custom_error_proto = out.File + file_custom_error_proto_rawDesc = nil + file_custom_error_proto_goTypes = nil + file_custom_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto new file mode 100644 index 00000000000..21678ae65c9 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto @@ -0,0 +1,50 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +message CustomError { + + // Error code for `CustomError`. + enum CustomErrorCode { + // Default error. + CUSTOM_ERROR_CODE_UNSPECIFIED = 0; + + // Too many foo. + TOO_MANY_FOO = 1; + + // Not enough foo. + NOT_ENOUGH_FOO = 2; + + // Catastrophic error. + UNIVERSE_WAS_DESTROYED = 3; + + } + + // Error code specific to the custom API being invoked. + CustomErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go new file mode 100644 index 00000000000..7dd9b83739a --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go @@ -0,0 +1,280 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.15.8 +// source: apierror/internal/proto/error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + code "google.golang.org/genproto/googleapis/rpc/code" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The error format v2 for Google JSON REST APIs. +// Copied from https://cloud.google.com/apis/design/errors#http_mapping. +// +// NOTE: This schema is not used for other wire protocols. +type Error struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The actual error payload. The nested message structure is for backward + // compatibility with Google API client libraries. It also makes the error + // more readable to developers. + Error *Error_Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *Error) Reset() { + *x = Error{} + if protoimpl.UnsafeEnabled { + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Error) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Error) ProtoMessage() {} + +func (x *Error) ProtoReflect() protoreflect.Message { + mi := &file_apierror_internal_proto_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Error.ProtoReflect.Descriptor instead. +func (*Error) Descriptor() ([]byte, []int) { + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0} +} + +func (x *Error) GetError() *Error_Status { + if x != nil { + return x.Error + } + return nil +} + +// This message has the same semantics as `google.rpc.Status`. It uses HTTP +// status code instead of gRPC status code. It has an extra field `status` +// for backward compatibility with Google API Client Libraries. +type Error_Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP status code that corresponds to `google.rpc.Status.code`. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // This corresponds to `google.rpc.Status.message`. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // This is the enum version for `google.rpc.Status.code`. + Status code.Code `protobuf:"varint,4,opt,name=status,proto3,enum=google.rpc.Code" json:"status,omitempty"` + // This corresponds to `google.rpc.Status.details`. + Details []*anypb.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` +} + +func (x *Error_Status) Reset() { + *x = Error_Status{} + if protoimpl.UnsafeEnabled { + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Error_Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Error_Status) ProtoMessage() {} + +func (x *Error_Status) ProtoReflect() protoreflect.Message { + mi := &file_apierror_internal_proto_error_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead. +func (*Error_Status) Descriptor() ([]byte, []int) { + return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Error_Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Error_Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Error_Status) GetStatus() code.Code { + if x != nil { + return x.Status + } + return code.Code(0) +} + +func (x *Error_Status) GetDetails() []*anypb.Any { + if x != nil { + return x.Details + } + return nil +} + +var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor + +var file_apierror_internal_proto_error_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, + 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, + 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_apierror_internal_proto_error_proto_rawDescOnce sync.Once + file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc +) + +func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte { + file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() { + file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData) + }) + return file_apierror_internal_proto_error_proto_rawDescData +} + +var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_apierror_internal_proto_error_proto_goTypes = []interface{}{ + (*Error)(nil), // 0: error.Error + (*Error_Status)(nil), // 1: error.Error.Status + (code.Code)(0), // 2: google.rpc.Code + (*anypb.Any)(nil), // 3: google.protobuf.Any +} +var file_apierror_internal_proto_error_proto_depIdxs = []int32{ + 1, // 0: error.Error.error:type_name -> error.Error.Status + 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code + 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_apierror_internal_proto_error_proto_init() } +func file_apierror_internal_proto_error_proto_init() { + if File_apierror_internal_proto_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Error); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Error_Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_apierror_internal_proto_error_proto_goTypes, + DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs, + MessageInfos: file_apierror_internal_proto_error_proto_msgTypes, + }.Build() + File_apierror_internal_proto_error_proto = out.File + file_apierror_internal_proto_error_proto_rawDesc = nil + file_apierror_internal_proto_error_proto_goTypes = nil + file_apierror_internal_proto_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto new file mode 100644 index 00000000000..4b9b13ce111 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto @@ -0,0 +1,46 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +import "google/protobuf/any.proto"; +import "google/rpc/code.proto"; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + +// The error format v2 for Google JSON REST APIs. +// Copied from https://cloud.google.com/apis/design/errors#http_mapping. +// +// NOTE: This schema is not used for other wire protocols. +message Error { + // This message has the same semantics as `google.rpc.Status`. It uses HTTP + // status code instead of gRPC status code. It has an extra field `status` + // for backward compatibility with Google API Client Libraries. + message Status { + // The HTTP status code that corresponds to `google.rpc.Status.code`. + int32 code = 1; + // This corresponds to `google.rpc.Status.message`. + string message = 2; + // This is the enum version for `google.rpc.Status.code`. + google.rpc.Code status = 4; + // This corresponds to `google.rpc.Status.details`. + repeated google.protobuf.Any details = 5; + } + // The actual error payload. The nested message structure is for backward + // compatibility with Google API client libraries. It also makes the error + // more readable to developers. + Status error = 1; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go new file mode 100644 index 00000000000..c52e03f6436 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -0,0 +1,265 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "errors" + "math/rand" + "time" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retried and how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnErrorFunc returns a Retryer that retries if and only if the previous attempt +// returns an error that satisfies shouldRetry. +// +// Pause times between retries are specified by bo. bo is only used for its +// parameters; each Retryer has its own copy. +func OnErrorFunc(bo Backoff, shouldRetry func(err error) bool) Retryer { + return &errorRetryer{ + shouldRetry: shouldRetry, + backoff: bo, + } +} + +type errorRetryer struct { + backoff Backoff + shouldRetry func(err error) bool +} + +func (r *errorRetryer) Retry(err error) (time.Duration, bool) { + if r.shouldRetry(err) { + return r.backoff.Pause(), true + } + + return 0, false +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// OnHTTPCodes returns a Retryer that retries if and only if +// the previous attempt returns a googleapi.Error whose status code is stored in +// cc. Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnHTTPCodes(bo Backoff, cc ...int) Retryer { + codes := make(map[int]bool, len(cc)) + for _, c := range cc { + codes[c] = true + } + + return &httpRetryer{ + backoff: bo, + codes: codes, + } +} + +type httpRetryer struct { + backoff Backoff + codes map[int]bool +} + +func (r *httpRetryer) Retry(err error) (time.Duration, bool) { + var gerr *googleapi.Error + if !errors.As(err, &gerr) { + return 0, false + } + + if r.codes[gerr.Code] { + return r.backoff.Pause(), true + } + + return 0, false +} + +// Backoff implements exponential backoff. The wait time between retries is a +// random value between 0 and the "retry period" - the time between retries. The +// retry period starts at Initial and increases by the factor of Multiplier +// every retry, but is capped at Max. +// +// Note: MaxNumRetries / RPCDeadline is specifically not provided. These should +// be built on top of Backoff. +type Backoff struct { + // Initial is the initial value of the retry period, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry period, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry period increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry period. + cur time.Duration +} + +// Pause returns the next time.Duration that the caller should use to backoff. +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + // Select a duration between 1ns and the current max. It might seem + // counterintuitive to have so much jitter, but + // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that + // that is the best strategy. + d := time.Duration(1 + rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +type pathOpt struct { + p string +} + +func (p pathOpt) Resolve(s *CallSettings) { + s.Path = p.p +} + +type timeoutOpt struct { + t time.Duration +} + +func (t timeoutOpt) Resolve(s *CallSettings) { + s.timeout = t.t +} + +// WithPath applies a Path override to the HTTP-based APICall. +// +// This is for internal use only. +func WithPath(p string) CallOption { + return &pathOpt{p: p} +} + +// WithGRPCOptions allows passing gRPC call options during client creation. +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +// WithTimeout is a convenience option for setting a context.WithTimeout on the +// singular context.Context used for **all** APICall attempts. Calculated from +// the start of the first APICall attempt. +// If the context.Context provided to Invoke already has a Deadline set, that +// will always be respected over the deadline calculated using this option. +func WithTimeout(t time.Duration) CallOption { + return &timeoutOpt{t: t} +} + +// CallSettings allow fine-grained control over how calls are made. +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption + + // Path is an HTTP override for an APICall. + Path string + + // Timeout defines the amount of time that Invoke has to complete. + // Unexported so it cannot be changed by the code in an APICall. + timeout time.Duration +} diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go new file mode 100644 index 00000000000..f5af5c990f9 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go @@ -0,0 +1,100 @@ +// Copyright 2023, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package callctx provides helpers for storing and retrieving values out of +// [context.Context]. These values are used by our client libraries in various +// ways across the stack. +package callctx + +import ( + "context" + "fmt" +) + +const ( + // XGoogFieldMaskHeader is the canonical header key for the [System Parameter] + // that specifies the response read mask. The value(s) for this header + // must adhere to format described in [fieldmaskpb]. + // + // [System Parameter]: https://cloud.google.com/apis/docs/system-parameters + // [fieldmaskpb]: https://google.golang.org/protobuf/types/known/fieldmaskpb + XGoogFieldMaskHeader = "x-goog-fieldmask" + + headerKey = contextKey("header") +) + +// contextKey is a private type used to store/retrieve context values. +type contextKey string + +// HeadersFromContext retrieves headers set from [SetHeaders]. These headers +// can then be cast to http.Header or metadata.MD to send along on requests. +func HeadersFromContext(ctx context.Context) map[string][]string { + m, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + return nil + } + return m +} + +// SetHeaders stores key value pairs in the returned context that can later +// be retrieved by [HeadersFromContext]. Values stored in this manner will +// automatically be retrieved by client libraries and sent as outgoing headers +// on all requests. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +func SetHeaders(ctx context.Context, keyvals ...string) context.Context { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + h, ok := ctx.Value(headerKey).(map[string][]string) + if !ok { + h = make(map[string][]string) + } else { + h = cloneHeaders(h) + } + + for i := 0; i < len(keyvals); i = i + 2 { + h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1]) + } + return context.WithValue(ctx, headerKey, h) +} + +// cloneHeaders makes a new key-value map while reusing the value slices. +// As such, new values should be appended to the value slice, and modifying +// indexed values is not thread safe. +// +// TODO: Replace this with maps.Clone when Go 1.21 is the minimum version. +func cloneHeaders(h map[string][]string) map[string][]string { + c := make(map[string][]string, len(h)) + for k, v := range h { + vc := make([]string, len(v)) + copy(vc, v) + c[k] = vc + } + return c +} diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go new file mode 100644 index 00000000000..1b53d0a3ac1 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/content_type.go @@ -0,0 +1,112 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "io" + "io/ioutil" + "net/http" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// The content of media will be sniffed to determine the content type. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader) (io.Reader, string) { + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. + if typer, ok := media.(interface { + ContentType() string + }); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go new file mode 100644 index 00000000000..36cdfa33e35 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -0,0 +1,41 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +package gax + +import "github.com/googleapis/gax-go/v2/internal" + +// Version specifies the gax-go version being used. +const Version = internal.Version diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go new file mode 100644 index 00000000000..3e53729e5fc --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -0,0 +1,173 @@ +// Copyright 2018, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "bytes" + "context" + "fmt" + "net/http" + "runtime" + "strings" + "unicode" + + "github.com/googleapis/gax-go/v2/callctx" + "google.golang.org/grpc/metadata" +) + +var ( + // GoVersion is a header-safe representation of the current runtime + // environment's Go version. This is for GAX consumers that need to + // report the Go runtime version in API calls. + GoVersion string + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +func init() { + GoVersion = goVersion() +} + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} + +// XGoogHeader is for use by the Google Cloud Libraries only. See package +// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving +// request/response headers. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} + +// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries +// only. See package [github.com/googleapis/gax-go/v2/callctx] for help +// setting/retrieving request/response headers. +// +// InsertMetadataIntoOutgoingContext returns a new context that merges the +// provided keyvals metadata pairs with any existing metadata/headers in the +// provided context. keyvals should have a corresponding value for every key +// provided. If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) context.Context { + return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...)) +} + +// BuildHeaders is for use by the Google Cloud Libraries only. See package +// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving +// request/response headers. +// +// BuildHeaders returns a new http.Header that merges the provided +// keyvals header pairs with any existing metadata/headers in the provided +// context. keyvals should have a corresponding value for every key provided. +// If there is an odd number of keyvals this method will panic. +// Existing values for keys will not be overwritten, instead provided values +// will be appended to the list of existing values. +func BuildHeaders(ctx context.Context, keyvals ...string) http.Header { + return http.Header(insertMetadata(ctx, keyvals...)) +} + +func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD { + if len(keyvals)%2 != 0 { + panic(fmt.Sprintf("gax: an even number of key value pairs must be provided, got %d", len(keyvals))) + } + out, ok := metadata.FromOutgoingContext(ctx) + if !ok { + out = metadata.MD(make(map[string][]string)) + } + headers := callctx.HeadersFromContext(ctx) + for k, v := range headers { + out[k] = append(out[k], v...) + } + for i := 0; i < len(keyvals); i = i + 2 { + out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1]) + } + return out +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go new file mode 100644 index 00000000000..90348f303df --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -0,0 +1,33 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package internal + +// Version is the current tagged release of the library. +const Version = "2.12.3" diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go new file mode 100644 index 00000000000..721d1af5517 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -0,0 +1,114 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "strings" + "time" + + "github.com/googleapis/gax-go/v2/apierror" +) + +// APICall is a user defined call stub. +type APICall func(context.Context, CallSettings) error + +// Invoke calls the given APICall, performing retries as specified by opts, if +// any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer + + // Only use the value provided via WithTimeout if the context doesn't + // already have a deadline. This is important for backwards compatibility if + // the user already set a deadline on the context given to Invoke. + if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 { + c, cc := context.WithTimeout(ctx, settings.timeout) + defer cc() + ctx = c + } + + for { + err := call(ctx, settings) + if err == nil { + return nil + } + // Never retry permanent certificate errors. (e.x. if ca-certificates + // are not installed). We should only make very few, targeted + // exceptions: many (other) status=Unavailable should be retried, such + // as if there's a network hiccup, or the internet goes out for a + // minute. This is also why here we are doing string parsing instead of + // simply making Unavailable a non-retried code elsewhere. + if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { + return err + } + if apierr, ok := apierror.FromError(err); ok { + err = apierr + } + if settings.Retry == nil { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + } +} diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go new file mode 100644 index 00000000000..cc4486eb9e5 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go @@ -0,0 +1,126 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "encoding/json" + "errors" + "io" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + arrayOpen = json.Delim('[') + arrayClose = json.Delim(']') + errBadOpening = errors.New("unexpected opening token, expected '['") +) + +// ProtoJSONStream represents a wrapper for consuming a stream of protobuf +// messages encoded using protobuf-JSON format. More information on this format +// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json. +// The stream must appear as a comma-delimited, JSON array of obbjects with +// opening and closing square braces. +// +// This is for internal use only. +type ProtoJSONStream struct { + first, closed bool + reader io.ReadCloser + stream *json.Decoder + typ protoreflect.MessageType +} + +// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are +// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream +// must be closed when done. +// +// This is for internal use only. +func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream { + return &ProtoJSONStream{ + first: true, + reader: rc, + stream: json.NewDecoder(rc), + typ: typ, + } +} + +// Recv decodes the next protobuf message in the stream or returns io.EOF if +// the stream is done. It is not safe to call Recv on the same stream from +// different goroutines, just like it is not safe to do so with a single gRPC +// stream. Type-cast the protobuf message returned to the type provided at +// ProtoJSONStream creation. +// Calls to Recv after calling Close will produce io.EOF. +func (s *ProtoJSONStream) Recv() (proto.Message, error) { + if s.closed { + return nil, io.EOF + } + if s.first { + s.first = false + + // Consume the opening '[' so Decode gets one object at a time. + if t, err := s.stream.Token(); err != nil { + return nil, err + } else if t != arrayOpen { + return nil, errBadOpening + } + } + + // Capture the next block of data for the item (a JSON object) in the stream. + var raw json.RawMessage + if err := s.stream.Decode(&raw); err != nil { + e := err + // To avoid checking the first token of each stream, just attempt to + // Decode the next blob and if that fails, double check if it is just + // the closing token ']'. If it is the closing, return io.EOF. If it + // isn't, return the original error. + if t, _ := s.stream.Token(); t == arrayClose { + e = io.EOF + } + return nil, e + } + + // Initialize a new instance of the protobuf message to unmarshal the + // raw data into. + m := s.typ.New().Interface() + err := protojson.Unmarshal(raw, m) + + return m, err +} + +// Close closes the stream so that resources are cleaned up. +func (s *ProtoJSONStream) Close() error { + // Dereference the *json.Decoder so that the memory is gc'd. + s.stream = nil + s.closed = true + + return s.reader.Close() +} diff --git a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json new file mode 100644 index 00000000000..61ee266a159 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json @@ -0,0 +1,10 @@ +{ + "release-type": "go-yoshi", + "separate-pull-requests": true, + "include-component-in-tag": false, + "packages": { + "v2": { + "component": "v2" + } + } +} diff --git a/vendor/github.com/jzelinskie/whirlpool/.travis.yml b/vendor/github.com/jzelinskie/whirlpool/.travis.yml new file mode 100644 index 00000000000..d7001e85544 --- /dev/null +++ b/vendor/github.com/jzelinskie/whirlpool/.travis.yml @@ -0,0 +1,4 @@ +arch: + - ppc64le + - amd64 +language: go diff --git a/vendor/github.com/jzelinskie/whirlpool/LICENSE b/vendor/github.com/jzelinskie/whirlpool/LICENSE new file mode 100644 index 00000000000..b9329c258a1 --- /dev/null +++ b/vendor/github.com/jzelinskie/whirlpool/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2012, Jimmy Zelinskie +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/jzelinskie/whirlpool/README.md b/vendor/github.com/jzelinskie/whirlpool/README.md new file mode 100644 index 00000000000..3445070f17a --- /dev/null +++ b/vendor/github.com/jzelinskie/whirlpool/README.md @@ -0,0 +1,43 @@ +# whirlpool.go +A [whirlpool hashing](https://en.wikipedia.org/wiki/Whirlpool_(cryptography)) library for go + +## Build status + +[![Build Status](https://secure.travis-ci.org/jzelinskie/whirlpool.png)](http://travis-ci.org/jzelinskie/whirlpool) + +## Setup + +```bash +$ go get github.com/jzelinskie/whirlpool +``` + +## Example + +```Go +package main + +import ( + "fmt" + "github.com/jzelinskie/whirlpool" +) + +func main() { + w := whirlpool.New() + text := []byte("This is an example.") + w.Write(text) + fmt.Println(w.Sum(nil)) +} +``` + +## Docs + +Check out the [gopkgdoc page](http://go.pkgdoc.org/github.com/jzelinskie/whirlpool), but there isn't much -- it works just like the other hashes in the standard library + +## Branches + +* master - stable, works like the hash libs in the corelib +* trace - same code as master, but prints midstate values to stdout + +## license + +Modified BSD License diff --git a/vendor/github.com/jzelinskie/whirlpool/const.go b/vendor/github.com/jzelinskie/whirlpool/const.go new file mode 100644 index 00000000000..4a6f1104412 --- /dev/null +++ b/vendor/github.com/jzelinskie/whirlpool/const.go @@ -0,0 +1,565 @@ +// Copyright 2012 Jimmy Zelinskie. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package whirlpool + +const ( + rounds = 10 + lengthBytes = 32 + lengthBits = 8 * lengthBytes + digestBytes = 64 + digestBits = 8 * digestBytes + wblockBytes = 64 + wblockBits = 8 * wblockBytes +) + +var _C0 = [256]uint64{ + 0x18186018c07830d8, 0x23238c2305af4626, 0xc6c63fc67ef991b8, 0xe8e887e8136fcdfb, + 0x878726874ca113cb, 0xb8b8dab8a9626d11, 0x0101040108050209, 0x4f4f214f426e9e0d, + 0x3636d836adee6c9b, 0xa6a6a2a6590451ff, 0xd2d26fd2debdb90c, 0xf5f5f3f5fb06f70e, + 0x7979f979ef80f296, 0x6f6fa16f5fcede30, 0x91917e91fcef3f6d, 0x52525552aa07a4f8, + 0x60609d6027fdc047, 0xbcbccabc89766535, 0x9b9b569baccd2b37, 0x8e8e028e048c018a, + 0xa3a3b6a371155bd2, 0x0c0c300c603c186c, 0x7b7bf17bff8af684, 0x3535d435b5e16a80, + 0x1d1d741de8693af5, 0xe0e0a7e05347ddb3, 0xd7d77bd7f6acb321, 0xc2c22fc25eed999c, + 0x2e2eb82e6d965c43, 0x4b4b314b627a9629, 0xfefedffea321e15d, 0x575741578216aed5, + 0x15155415a8412abd, 0x7777c1779fb6eee8, 0x3737dc37a5eb6e92, 0xe5e5b3e57b56d79e, + 0x9f9f469f8cd92313, 0xf0f0e7f0d317fd23, 0x4a4a354a6a7f9420, 0xdada4fda9e95a944, + 0x58587d58fa25b0a2, 0xc9c903c906ca8fcf, 0x2929a429558d527c, 0x0a0a280a5022145a, + 0xb1b1feb1e14f7f50, 0xa0a0baa0691a5dc9, 0x6b6bb16b7fdad614, 0x85852e855cab17d9, + 0xbdbdcebd8173673c, 0x5d5d695dd234ba8f, 0x1010401080502090, 0xf4f4f7f4f303f507, + 0xcbcb0bcb16c08bdd, 0x3e3ef83eedc67cd3, 0x0505140528110a2d, 0x676781671fe6ce78, + 0xe4e4b7e47353d597, 0x27279c2725bb4e02, 0x4141194132588273, 0x8b8b168b2c9d0ba7, + 0xa7a7a6a7510153f6, 0x7d7de97dcf94fab2, 0x95956e95dcfb3749, 0xd8d847d88e9fad56, + 0xfbfbcbfb8b30eb70, 0xeeee9fee2371c1cd, 0x7c7ced7cc791f8bb, 0x6666856617e3cc71, + 0xdddd53dda68ea77b, 0x17175c17b84b2eaf, 0x4747014702468e45, 0x9e9e429e84dc211a, + 0xcaca0fca1ec589d4, 0x2d2db42d75995a58, 0xbfbfc6bf9179632e, 0x07071c07381b0e3f, + 0xadad8ead012347ac, 0x5a5a755aea2fb4b0, 0x838336836cb51bef, 0x3333cc3385ff66b6, + 0x636391633ff2c65c, 0x02020802100a0412, 0xaaaa92aa39384993, 0x7171d971afa8e2de, + 0xc8c807c80ecf8dc6, 0x19196419c87d32d1, 0x494939497270923b, 0xd9d943d9869aaf5f, + 0xf2f2eff2c31df931, 0xe3e3abe34b48dba8, 0x5b5b715be22ab6b9, 0x88881a8834920dbc, + 0x9a9a529aa4c8293e, 0x262698262dbe4c0b, 0x3232c8328dfa64bf, 0xb0b0fab0e94a7d59, + 0xe9e983e91b6acff2, 0x0f0f3c0f78331e77, 0xd5d573d5e6a6b733, 0x80803a8074ba1df4, + 0xbebec2be997c6127, 0xcdcd13cd26de87eb, 0x3434d034bde46889, 0x48483d487a759032, + 0xffffdbffab24e354, 0x7a7af57af78ff48d, 0x90907a90f4ea3d64, 0x5f5f615fc23ebe9d, + 0x202080201da0403d, 0x6868bd6867d5d00f, 0x1a1a681ad07234ca, 0xaeae82ae192c41b7, + 0xb4b4eab4c95e757d, 0x54544d549a19a8ce, 0x93937693ece53b7f, 0x222288220daa442f, + 0x64648d6407e9c863, 0xf1f1e3f1db12ff2a, 0x7373d173bfa2e6cc, 0x12124812905a2482, + 0x40401d403a5d807a, 0x0808200840281048, 0xc3c32bc356e89b95, 0xecec97ec337bc5df, + 0xdbdb4bdb9690ab4d, 0xa1a1bea1611f5fc0, 0x8d8d0e8d1c830791, 0x3d3df43df5c97ac8, + 0x97976697ccf1335b, 0x0000000000000000, 0xcfcf1bcf36d483f9, 0x2b2bac2b4587566e, + 0x7676c57697b3ece1, 0x8282328264b019e6, 0xd6d67fd6fea9b128, 0x1b1b6c1bd87736c3, + 0xb5b5eeb5c15b7774, 0xafaf86af112943be, 0x6a6ab56a77dfd41d, 0x50505d50ba0da0ea, + 0x45450945124c8a57, 0xf3f3ebf3cb18fb38, 0x3030c0309df060ad, 0xefef9bef2b74c3c4, + 0x3f3ffc3fe5c37eda, 0x55554955921caac7, 0xa2a2b2a2791059db, 0xeaea8fea0365c9e9, + 0x656589650fecca6a, 0xbabad2bab9686903, 0x2f2fbc2f65935e4a, 0xc0c027c04ee79d8e, + 0xdede5fdebe81a160, 0x1c1c701ce06c38fc, 0xfdfdd3fdbb2ee746, 0x4d4d294d52649a1f, + 0x92927292e4e03976, 0x7575c9758fbceafa, 0x06061806301e0c36, 0x8a8a128a249809ae, + 0xb2b2f2b2f940794b, 0xe6e6bfe66359d185, 0x0e0e380e70361c7e, 0x1f1f7c1ff8633ee7, + 0x6262956237f7c455, 0xd4d477d4eea3b53a, 0xa8a89aa829324d81, 0x96966296c4f43152, + 0xf9f9c3f99b3aef62, 0xc5c533c566f697a3, 0x2525942535b14a10, 0x59597959f220b2ab, + 0x84842a8454ae15d0, 0x7272d572b7a7e4c5, 0x3939e439d5dd72ec, 0x4c4c2d4c5a619816, + 0x5e5e655eca3bbc94, 0x7878fd78e785f09f, 0x3838e038ddd870e5, 0x8c8c0a8c14860598, + 0xd1d163d1c6b2bf17, 0xa5a5aea5410b57e4, 0xe2e2afe2434dd9a1, 0x616199612ff8c24e, + 0xb3b3f6b3f1457b42, 0x2121842115a54234, 0x9c9c4a9c94d62508, 0x1e1e781ef0663cee, + 0x4343114322528661, 0xc7c73bc776fc93b1, 0xfcfcd7fcb32be54f, 0x0404100420140824, + 0x51515951b208a2e3, 0x99995e99bcc72f25, 0x6d6da96d4fc4da22, 0x0d0d340d68391a65, + 0xfafacffa8335e979, 0xdfdf5bdfb684a369, 0x7e7ee57ed79bfca9, 0x242490243db44819, + 0x3b3bec3bc5d776fe, 0xabab96ab313d4b9a, 0xcece1fce3ed181f0, 0x1111441188552299, + 0x8f8f068f0c890383, 0x4e4e254e4a6b9c04, 0xb7b7e6b7d1517366, 0xebeb8beb0b60cbe0, + 0x3c3cf03cfdcc78c1, 0x81813e817cbf1ffd, 0x94946a94d4fe3540, 0xf7f7fbf7eb0cf31c, + 0xb9b9deb9a1676f18, 0x13134c13985f268b, 0x2c2cb02c7d9c5851, 0xd3d36bd3d6b8bb05, + 0xe7e7bbe76b5cd38c, 0x6e6ea56e57cbdc39, 0xc4c437c46ef395aa, 0x03030c03180f061b, + 0x565645568a13acdc, 0x44440d441a49885e, 0x7f7fe17fdf9efea0, 0xa9a99ea921374f88, + 0x2a2aa82a4d825467, 0xbbbbd6bbb16d6b0a, 0xc1c123c146e29f87, 0x53535153a202a6f1, + 0xdcdc57dcae8ba572, 0x0b0b2c0b58271653, 0x9d9d4e9d9cd32701, 0x6c6cad6c47c1d82b, + 0x3131c43195f562a4, 0x7474cd7487b9e8f3, 0xf6f6fff6e309f115, 0x464605460a438c4c, + 0xacac8aac092645a5, 0x89891e893c970fb5, 0x14145014a04428b4, 0xe1e1a3e15b42dfba, + 0x16165816b04e2ca6, 0x3a3ae83acdd274f7, 0x6969b9696fd0d206, 0x09092409482d1241, + 0x7070dd70a7ade0d7, 0xb6b6e2b6d954716f, 0xd0d067d0ceb7bd1e, 0xeded93ed3b7ec7d6, + 0xcccc17cc2edb85e2, 0x424215422a578468, 0x98985a98b4c22d2c, 0xa4a4aaa4490e55ed, + 0x2828a0285d885075, 0x5c5c6d5cda31b886, 0xf8f8c7f8933fed6b, 0x8686228644a411c2, +} + +var _C1 = [256]uint64{ + 0xd818186018c07830, 0x2623238c2305af46, 0xb8c6c63fc67ef991, 0xfbe8e887e8136fcd, + 0xcb878726874ca113, 0x11b8b8dab8a9626d, 0x0901010401080502, 0x0d4f4f214f426e9e, + 0x9b3636d836adee6c, 0xffa6a6a2a6590451, 0x0cd2d26fd2debdb9, 0x0ef5f5f3f5fb06f7, + 0x967979f979ef80f2, 0x306f6fa16f5fcede, 0x6d91917e91fcef3f, 0xf852525552aa07a4, + 0x4760609d6027fdc0, 0x35bcbccabc897665, 0x379b9b569baccd2b, 0x8a8e8e028e048c01, + 0xd2a3a3b6a371155b, 0x6c0c0c300c603c18, 0x847b7bf17bff8af6, 0x803535d435b5e16a, + 0xf51d1d741de8693a, 0xb3e0e0a7e05347dd, 0x21d7d77bd7f6acb3, 0x9cc2c22fc25eed99, + 0x432e2eb82e6d965c, 0x294b4b314b627a96, 0x5dfefedffea321e1, 0xd5575741578216ae, + 0xbd15155415a8412a, 0xe87777c1779fb6ee, 0x923737dc37a5eb6e, 0x9ee5e5b3e57b56d7, + 0x139f9f469f8cd923, 0x23f0f0e7f0d317fd, 0x204a4a354a6a7f94, 0x44dada4fda9e95a9, + 0xa258587d58fa25b0, 0xcfc9c903c906ca8f, 0x7c2929a429558d52, 0x5a0a0a280a502214, + 0x50b1b1feb1e14f7f, 0xc9a0a0baa0691a5d, 0x146b6bb16b7fdad6, 0xd985852e855cab17, + 0x3cbdbdcebd817367, 0x8f5d5d695dd234ba, 0x9010104010805020, 0x07f4f4f7f4f303f5, + 0xddcbcb0bcb16c08b, 0xd33e3ef83eedc67c, 0x2d0505140528110a, 0x78676781671fe6ce, + 0x97e4e4b7e47353d5, 0x0227279c2725bb4e, 0x7341411941325882, 0xa78b8b168b2c9d0b, + 0xf6a7a7a6a7510153, 0xb27d7de97dcf94fa, 0x4995956e95dcfb37, 0x56d8d847d88e9fad, + 0x70fbfbcbfb8b30eb, 0xcdeeee9fee2371c1, 0xbb7c7ced7cc791f8, 0x716666856617e3cc, + 0x7bdddd53dda68ea7, 0xaf17175c17b84b2e, 0x454747014702468e, 0x1a9e9e429e84dc21, + 0xd4caca0fca1ec589, 0x582d2db42d75995a, 0x2ebfbfc6bf917963, 0x3f07071c07381b0e, + 0xacadad8ead012347, 0xb05a5a755aea2fb4, 0xef838336836cb51b, 0xb63333cc3385ff66, + 0x5c636391633ff2c6, 0x1202020802100a04, 0x93aaaa92aa393849, 0xde7171d971afa8e2, + 0xc6c8c807c80ecf8d, 0xd119196419c87d32, 0x3b49493949727092, 0x5fd9d943d9869aaf, + 0x31f2f2eff2c31df9, 0xa8e3e3abe34b48db, 0xb95b5b715be22ab6, 0xbc88881a8834920d, + 0x3e9a9a529aa4c829, 0x0b262698262dbe4c, 0xbf3232c8328dfa64, 0x59b0b0fab0e94a7d, + 0xf2e9e983e91b6acf, 0x770f0f3c0f78331e, 0x33d5d573d5e6a6b7, 0xf480803a8074ba1d, + 0x27bebec2be997c61, 0xebcdcd13cd26de87, 0x893434d034bde468, 0x3248483d487a7590, + 0x54ffffdbffab24e3, 0x8d7a7af57af78ff4, 0x6490907a90f4ea3d, 0x9d5f5f615fc23ebe, + 0x3d202080201da040, 0x0f6868bd6867d5d0, 0xca1a1a681ad07234, 0xb7aeae82ae192c41, + 0x7db4b4eab4c95e75, 0xce54544d549a19a8, 0x7f93937693ece53b, 0x2f222288220daa44, + 0x6364648d6407e9c8, 0x2af1f1e3f1db12ff, 0xcc7373d173bfa2e6, 0x8212124812905a24, + 0x7a40401d403a5d80, 0x4808082008402810, 0x95c3c32bc356e89b, 0xdfecec97ec337bc5, + 0x4ddbdb4bdb9690ab, 0xc0a1a1bea1611f5f, 0x918d8d0e8d1c8307, 0xc83d3df43df5c97a, + 0x5b97976697ccf133, 0x0000000000000000, 0xf9cfcf1bcf36d483, 0x6e2b2bac2b458756, + 0xe17676c57697b3ec, 0xe68282328264b019, 0x28d6d67fd6fea9b1, 0xc31b1b6c1bd87736, + 0x74b5b5eeb5c15b77, 0xbeafaf86af112943, 0x1d6a6ab56a77dfd4, 0xea50505d50ba0da0, + 0x5745450945124c8a, 0x38f3f3ebf3cb18fb, 0xad3030c0309df060, 0xc4efef9bef2b74c3, + 0xda3f3ffc3fe5c37e, 0xc755554955921caa, 0xdba2a2b2a2791059, 0xe9eaea8fea0365c9, + 0x6a656589650fecca, 0x03babad2bab96869, 0x4a2f2fbc2f65935e, 0x8ec0c027c04ee79d, + 0x60dede5fdebe81a1, 0xfc1c1c701ce06c38, 0x46fdfdd3fdbb2ee7, 0x1f4d4d294d52649a, + 0x7692927292e4e039, 0xfa7575c9758fbcea, 0x3606061806301e0c, 0xae8a8a128a249809, + 0x4bb2b2f2b2f94079, 0x85e6e6bfe66359d1, 0x7e0e0e380e70361c, 0xe71f1f7c1ff8633e, + 0x556262956237f7c4, 0x3ad4d477d4eea3b5, 0x81a8a89aa829324d, 0x5296966296c4f431, + 0x62f9f9c3f99b3aef, 0xa3c5c533c566f697, 0x102525942535b14a, 0xab59597959f220b2, + 0xd084842a8454ae15, 0xc57272d572b7a7e4, 0xec3939e439d5dd72, 0x164c4c2d4c5a6198, + 0x945e5e655eca3bbc, 0x9f7878fd78e785f0, 0xe53838e038ddd870, 0x988c8c0a8c148605, + 0x17d1d163d1c6b2bf, 0xe4a5a5aea5410b57, 0xa1e2e2afe2434dd9, 0x4e616199612ff8c2, + 0x42b3b3f6b3f1457b, 0x342121842115a542, 0x089c9c4a9c94d625, 0xee1e1e781ef0663c, + 0x6143431143225286, 0xb1c7c73bc776fc93, 0x4ffcfcd7fcb32be5, 0x2404041004201408, + 0xe351515951b208a2, 0x2599995e99bcc72f, 0x226d6da96d4fc4da, 0x650d0d340d68391a, + 0x79fafacffa8335e9, 0x69dfdf5bdfb684a3, 0xa97e7ee57ed79bfc, 0x19242490243db448, + 0xfe3b3bec3bc5d776, 0x9aabab96ab313d4b, 0xf0cece1fce3ed181, 0x9911114411885522, + 0x838f8f068f0c8903, 0x044e4e254e4a6b9c, 0x66b7b7e6b7d15173, 0xe0ebeb8beb0b60cb, + 0xc13c3cf03cfdcc78, 0xfd81813e817cbf1f, 0x4094946a94d4fe35, 0x1cf7f7fbf7eb0cf3, + 0x18b9b9deb9a1676f, 0x8b13134c13985f26, 0x512c2cb02c7d9c58, 0x05d3d36bd3d6b8bb, + 0x8ce7e7bbe76b5cd3, 0x396e6ea56e57cbdc, 0xaac4c437c46ef395, 0x1b03030c03180f06, + 0xdc565645568a13ac, 0x5e44440d441a4988, 0xa07f7fe17fdf9efe, 0x88a9a99ea921374f, + 0x672a2aa82a4d8254, 0x0abbbbd6bbb16d6b, 0x87c1c123c146e29f, 0xf153535153a202a6, + 0x72dcdc57dcae8ba5, 0x530b0b2c0b582716, 0x019d9d4e9d9cd327, 0x2b6c6cad6c47c1d8, + 0xa43131c43195f562, 0xf37474cd7487b9e8, 0x15f6f6fff6e309f1, 0x4c464605460a438c, + 0xa5acac8aac092645, 0xb589891e893c970f, 0xb414145014a04428, 0xbae1e1a3e15b42df, + 0xa616165816b04e2c, 0xf73a3ae83acdd274, 0x066969b9696fd0d2, 0x4109092409482d12, + 0xd77070dd70a7ade0, 0x6fb6b6e2b6d95471, 0x1ed0d067d0ceb7bd, 0xd6eded93ed3b7ec7, + 0xe2cccc17cc2edb85, 0x68424215422a5784, 0x2c98985a98b4c22d, 0xeda4a4aaa4490e55, + 0x752828a0285d8850, 0x865c5c6d5cda31b8, 0x6bf8f8c7f8933fed, 0xc28686228644a411, +} + +var _C2 = [256]uint64{ + 0x30d818186018c078, 0x462623238c2305af, 0x91b8c6c63fc67ef9, 0xcdfbe8e887e8136f, + 0x13cb878726874ca1, 0x6d11b8b8dab8a962, 0x0209010104010805, 0x9e0d4f4f214f426e, + 0x6c9b3636d836adee, 0x51ffa6a6a2a65904, 0xb90cd2d26fd2debd, 0xf70ef5f5f3f5fb06, + 0xf2967979f979ef80, 0xde306f6fa16f5fce, 0x3f6d91917e91fcef, 0xa4f852525552aa07, + 0xc04760609d6027fd, 0x6535bcbccabc8976, 0x2b379b9b569baccd, 0x018a8e8e028e048c, + 0x5bd2a3a3b6a37115, 0x186c0c0c300c603c, 0xf6847b7bf17bff8a, 0x6a803535d435b5e1, + 0x3af51d1d741de869, 0xddb3e0e0a7e05347, 0xb321d7d77bd7f6ac, 0x999cc2c22fc25eed, + 0x5c432e2eb82e6d96, 0x96294b4b314b627a, 0xe15dfefedffea321, 0xaed5575741578216, + 0x2abd15155415a841, 0xeee87777c1779fb6, 0x6e923737dc37a5eb, 0xd79ee5e5b3e57b56, + 0x23139f9f469f8cd9, 0xfd23f0f0e7f0d317, 0x94204a4a354a6a7f, 0xa944dada4fda9e95, + 0xb0a258587d58fa25, 0x8fcfc9c903c906ca, 0x527c2929a429558d, 0x145a0a0a280a5022, + 0x7f50b1b1feb1e14f, 0x5dc9a0a0baa0691a, 0xd6146b6bb16b7fda, 0x17d985852e855cab, + 0x673cbdbdcebd8173, 0xba8f5d5d695dd234, 0x2090101040108050, 0xf507f4f4f7f4f303, + 0x8bddcbcb0bcb16c0, 0x7cd33e3ef83eedc6, 0x0a2d050514052811, 0xce78676781671fe6, + 0xd597e4e4b7e47353, 0x4e0227279c2725bb, 0x8273414119413258, 0x0ba78b8b168b2c9d, + 0x53f6a7a7a6a75101, 0xfab27d7de97dcf94, 0x374995956e95dcfb, 0xad56d8d847d88e9f, + 0xeb70fbfbcbfb8b30, 0xc1cdeeee9fee2371, 0xf8bb7c7ced7cc791, 0xcc716666856617e3, + 0xa77bdddd53dda68e, 0x2eaf17175c17b84b, 0x8e45474701470246, 0x211a9e9e429e84dc, + 0x89d4caca0fca1ec5, 0x5a582d2db42d7599, 0x632ebfbfc6bf9179, 0x0e3f07071c07381b, + 0x47acadad8ead0123, 0xb4b05a5a755aea2f, 0x1bef838336836cb5, 0x66b63333cc3385ff, + 0xc65c636391633ff2, 0x041202020802100a, 0x4993aaaa92aa3938, 0xe2de7171d971afa8, + 0x8dc6c8c807c80ecf, 0x32d119196419c87d, 0x923b494939497270, 0xaf5fd9d943d9869a, + 0xf931f2f2eff2c31d, 0xdba8e3e3abe34b48, 0xb6b95b5b715be22a, 0x0dbc88881a883492, + 0x293e9a9a529aa4c8, 0x4c0b262698262dbe, 0x64bf3232c8328dfa, 0x7d59b0b0fab0e94a, + 0xcff2e9e983e91b6a, 0x1e770f0f3c0f7833, 0xb733d5d573d5e6a6, 0x1df480803a8074ba, + 0x6127bebec2be997c, 0x87ebcdcd13cd26de, 0x68893434d034bde4, 0x903248483d487a75, + 0xe354ffffdbffab24, 0xf48d7a7af57af78f, 0x3d6490907a90f4ea, 0xbe9d5f5f615fc23e, + 0x403d202080201da0, 0xd00f6868bd6867d5, 0x34ca1a1a681ad072, 0x41b7aeae82ae192c, + 0x757db4b4eab4c95e, 0xa8ce54544d549a19, 0x3b7f93937693ece5, 0x442f222288220daa, + 0xc86364648d6407e9, 0xff2af1f1e3f1db12, 0xe6cc7373d173bfa2, 0x248212124812905a, + 0x807a40401d403a5d, 0x1048080820084028, 0x9b95c3c32bc356e8, 0xc5dfecec97ec337b, + 0xab4ddbdb4bdb9690, 0x5fc0a1a1bea1611f, 0x07918d8d0e8d1c83, 0x7ac83d3df43df5c9, + 0x335b97976697ccf1, 0x0000000000000000, 0x83f9cfcf1bcf36d4, 0x566e2b2bac2b4587, + 0xece17676c57697b3, 0x19e68282328264b0, 0xb128d6d67fd6fea9, 0x36c31b1b6c1bd877, + 0x7774b5b5eeb5c15b, 0x43beafaf86af1129, 0xd41d6a6ab56a77df, 0xa0ea50505d50ba0d, + 0x8a5745450945124c, 0xfb38f3f3ebf3cb18, 0x60ad3030c0309df0, 0xc3c4efef9bef2b74, + 0x7eda3f3ffc3fe5c3, 0xaac755554955921c, 0x59dba2a2b2a27910, 0xc9e9eaea8fea0365, + 0xca6a656589650fec, 0x6903babad2bab968, 0x5e4a2f2fbc2f6593, 0x9d8ec0c027c04ee7, + 0xa160dede5fdebe81, 0x38fc1c1c701ce06c, 0xe746fdfdd3fdbb2e, 0x9a1f4d4d294d5264, + 0x397692927292e4e0, 0xeafa7575c9758fbc, 0x0c3606061806301e, 0x09ae8a8a128a2498, + 0x794bb2b2f2b2f940, 0xd185e6e6bfe66359, 0x1c7e0e0e380e7036, 0x3ee71f1f7c1ff863, + 0xc4556262956237f7, 0xb53ad4d477d4eea3, 0x4d81a8a89aa82932, 0x315296966296c4f4, + 0xef62f9f9c3f99b3a, 0x97a3c5c533c566f6, 0x4a102525942535b1, 0xb2ab59597959f220, + 0x15d084842a8454ae, 0xe4c57272d572b7a7, 0x72ec3939e439d5dd, 0x98164c4c2d4c5a61, + 0xbc945e5e655eca3b, 0xf09f7878fd78e785, 0x70e53838e038ddd8, 0x05988c8c0a8c1486, + 0xbf17d1d163d1c6b2, 0x57e4a5a5aea5410b, 0xd9a1e2e2afe2434d, 0xc24e616199612ff8, + 0x7b42b3b3f6b3f145, 0x42342121842115a5, 0x25089c9c4a9c94d6, 0x3cee1e1e781ef066, + 0x8661434311432252, 0x93b1c7c73bc776fc, 0xe54ffcfcd7fcb32b, 0x0824040410042014, + 0xa2e351515951b208, 0x2f2599995e99bcc7, 0xda226d6da96d4fc4, 0x1a650d0d340d6839, + 0xe979fafacffa8335, 0xa369dfdf5bdfb684, 0xfca97e7ee57ed79b, 0x4819242490243db4, + 0x76fe3b3bec3bc5d7, 0x4b9aabab96ab313d, 0x81f0cece1fce3ed1, 0x2299111144118855, + 0x03838f8f068f0c89, 0x9c044e4e254e4a6b, 0x7366b7b7e6b7d151, 0xcbe0ebeb8beb0b60, + 0x78c13c3cf03cfdcc, 0x1ffd81813e817cbf, 0x354094946a94d4fe, 0xf31cf7f7fbf7eb0c, + 0x6f18b9b9deb9a167, 0x268b13134c13985f, 0x58512c2cb02c7d9c, 0xbb05d3d36bd3d6b8, + 0xd38ce7e7bbe76b5c, 0xdc396e6ea56e57cb, 0x95aac4c437c46ef3, 0x061b03030c03180f, + 0xacdc565645568a13, 0x885e44440d441a49, 0xfea07f7fe17fdf9e, 0x4f88a9a99ea92137, + 0x54672a2aa82a4d82, 0x6b0abbbbd6bbb16d, 0x9f87c1c123c146e2, 0xa6f153535153a202, + 0xa572dcdc57dcae8b, 0x16530b0b2c0b5827, 0x27019d9d4e9d9cd3, 0xd82b6c6cad6c47c1, + 0x62a43131c43195f5, 0xe8f37474cd7487b9, 0xf115f6f6fff6e309, 0x8c4c464605460a43, + 0x45a5acac8aac0926, 0x0fb589891e893c97, 0x28b414145014a044, 0xdfbae1e1a3e15b42, + 0x2ca616165816b04e, 0x74f73a3ae83acdd2, 0xd2066969b9696fd0, 0x124109092409482d, + 0xe0d77070dd70a7ad, 0x716fb6b6e2b6d954, 0xbd1ed0d067d0ceb7, 0xc7d6eded93ed3b7e, + 0x85e2cccc17cc2edb, 0x8468424215422a57, 0x2d2c98985a98b4c2, 0x55eda4a4aaa4490e, + 0x50752828a0285d88, 0xb8865c5c6d5cda31, 0xed6bf8f8c7f8933f, 0x11c28686228644a4, +} + +var _C3 = [256]uint64{ + 0x7830d818186018c0, 0xaf462623238c2305, 0xf991b8c6c63fc67e, 0x6fcdfbe8e887e813, + 0xa113cb878726874c, 0x626d11b8b8dab8a9, 0x0502090101040108, 0x6e9e0d4f4f214f42, + 0xee6c9b3636d836ad, 0x0451ffa6a6a2a659, 0xbdb90cd2d26fd2de, 0x06f70ef5f5f3f5fb, + 0x80f2967979f979ef, 0xcede306f6fa16f5f, 0xef3f6d91917e91fc, 0x07a4f852525552aa, + 0xfdc04760609d6027, 0x766535bcbccabc89, 0xcd2b379b9b569bac, 0x8c018a8e8e028e04, + 0x155bd2a3a3b6a371, 0x3c186c0c0c300c60, 0x8af6847b7bf17bff, 0xe16a803535d435b5, + 0x693af51d1d741de8, 0x47ddb3e0e0a7e053, 0xacb321d7d77bd7f6, 0xed999cc2c22fc25e, + 0x965c432e2eb82e6d, 0x7a96294b4b314b62, 0x21e15dfefedffea3, 0x16aed55757415782, + 0x412abd15155415a8, 0xb6eee87777c1779f, 0xeb6e923737dc37a5, 0x56d79ee5e5b3e57b, + 0xd923139f9f469f8c, 0x17fd23f0f0e7f0d3, 0x7f94204a4a354a6a, 0x95a944dada4fda9e, + 0x25b0a258587d58fa, 0xca8fcfc9c903c906, 0x8d527c2929a42955, 0x22145a0a0a280a50, + 0x4f7f50b1b1feb1e1, 0x1a5dc9a0a0baa069, 0xdad6146b6bb16b7f, 0xab17d985852e855c, + 0x73673cbdbdcebd81, 0x34ba8f5d5d695dd2, 0x5020901010401080, 0x03f507f4f4f7f4f3, + 0xc08bddcbcb0bcb16, 0xc67cd33e3ef83eed, 0x110a2d0505140528, 0xe6ce78676781671f, + 0x53d597e4e4b7e473, 0xbb4e0227279c2725, 0x5882734141194132, 0x9d0ba78b8b168b2c, + 0x0153f6a7a7a6a751, 0x94fab27d7de97dcf, 0xfb374995956e95dc, 0x9fad56d8d847d88e, + 0x30eb70fbfbcbfb8b, 0x71c1cdeeee9fee23, 0x91f8bb7c7ced7cc7, 0xe3cc716666856617, + 0x8ea77bdddd53dda6, 0x4b2eaf17175c17b8, 0x468e454747014702, 0xdc211a9e9e429e84, + 0xc589d4caca0fca1e, 0x995a582d2db42d75, 0x79632ebfbfc6bf91, 0x1b0e3f07071c0738, + 0x2347acadad8ead01, 0x2fb4b05a5a755aea, 0xb51bef838336836c, 0xff66b63333cc3385, + 0xf2c65c636391633f, 0x0a04120202080210, 0x384993aaaa92aa39, 0xa8e2de7171d971af, + 0xcf8dc6c8c807c80e, 0x7d32d119196419c8, 0x70923b4949394972, 0x9aaf5fd9d943d986, + 0x1df931f2f2eff2c3, 0x48dba8e3e3abe34b, 0x2ab6b95b5b715be2, 0x920dbc88881a8834, + 0xc8293e9a9a529aa4, 0xbe4c0b262698262d, 0xfa64bf3232c8328d, 0x4a7d59b0b0fab0e9, + 0x6acff2e9e983e91b, 0x331e770f0f3c0f78, 0xa6b733d5d573d5e6, 0xba1df480803a8074, + 0x7c6127bebec2be99, 0xde87ebcdcd13cd26, 0xe468893434d034bd, 0x75903248483d487a, + 0x24e354ffffdbffab, 0x8ff48d7a7af57af7, 0xea3d6490907a90f4, 0x3ebe9d5f5f615fc2, + 0xa0403d202080201d, 0xd5d00f6868bd6867, 0x7234ca1a1a681ad0, 0x2c41b7aeae82ae19, + 0x5e757db4b4eab4c9, 0x19a8ce54544d549a, 0xe53b7f93937693ec, 0xaa442f222288220d, + 0xe9c86364648d6407, 0x12ff2af1f1e3f1db, 0xa2e6cc7373d173bf, 0x5a24821212481290, + 0x5d807a40401d403a, 0x2810480808200840, 0xe89b95c3c32bc356, 0x7bc5dfecec97ec33, + 0x90ab4ddbdb4bdb96, 0x1f5fc0a1a1bea161, 0x8307918d8d0e8d1c, 0xc97ac83d3df43df5, + 0xf1335b97976697cc, 0x0000000000000000, 0xd483f9cfcf1bcf36, 0x87566e2b2bac2b45, + 0xb3ece17676c57697, 0xb019e68282328264, 0xa9b128d6d67fd6fe, 0x7736c31b1b6c1bd8, + 0x5b7774b5b5eeb5c1, 0x2943beafaf86af11, 0xdfd41d6a6ab56a77, 0x0da0ea50505d50ba, + 0x4c8a574545094512, 0x18fb38f3f3ebf3cb, 0xf060ad3030c0309d, 0x74c3c4efef9bef2b, + 0xc37eda3f3ffc3fe5, 0x1caac75555495592, 0x1059dba2a2b2a279, 0x65c9e9eaea8fea03, + 0xecca6a656589650f, 0x686903babad2bab9, 0x935e4a2f2fbc2f65, 0xe79d8ec0c027c04e, + 0x81a160dede5fdebe, 0x6c38fc1c1c701ce0, 0x2ee746fdfdd3fdbb, 0x649a1f4d4d294d52, + 0xe0397692927292e4, 0xbceafa7575c9758f, 0x1e0c360606180630, 0x9809ae8a8a128a24, + 0x40794bb2b2f2b2f9, 0x59d185e6e6bfe663, 0x361c7e0e0e380e70, 0x633ee71f1f7c1ff8, + 0xf7c4556262956237, 0xa3b53ad4d477d4ee, 0x324d81a8a89aa829, 0xf4315296966296c4, + 0x3aef62f9f9c3f99b, 0xf697a3c5c533c566, 0xb14a102525942535, 0x20b2ab59597959f2, + 0xae15d084842a8454, 0xa7e4c57272d572b7, 0xdd72ec3939e439d5, 0x6198164c4c2d4c5a, + 0x3bbc945e5e655eca, 0x85f09f7878fd78e7, 0xd870e53838e038dd, 0x8605988c8c0a8c14, + 0xb2bf17d1d163d1c6, 0x0b57e4a5a5aea541, 0x4dd9a1e2e2afe243, 0xf8c24e616199612f, + 0x457b42b3b3f6b3f1, 0xa542342121842115, 0xd625089c9c4a9c94, 0x663cee1e1e781ef0, + 0x5286614343114322, 0xfc93b1c7c73bc776, 0x2be54ffcfcd7fcb3, 0x1408240404100420, + 0x08a2e351515951b2, 0xc72f2599995e99bc, 0xc4da226d6da96d4f, 0x391a650d0d340d68, + 0x35e979fafacffa83, 0x84a369dfdf5bdfb6, 0x9bfca97e7ee57ed7, 0xb44819242490243d, + 0xd776fe3b3bec3bc5, 0x3d4b9aabab96ab31, 0xd181f0cece1fce3e, 0x5522991111441188, + 0x8903838f8f068f0c, 0x6b9c044e4e254e4a, 0x517366b7b7e6b7d1, 0x60cbe0ebeb8beb0b, + 0xcc78c13c3cf03cfd, 0xbf1ffd81813e817c, 0xfe354094946a94d4, 0x0cf31cf7f7fbf7eb, + 0x676f18b9b9deb9a1, 0x5f268b13134c1398, 0x9c58512c2cb02c7d, 0xb8bb05d3d36bd3d6, + 0x5cd38ce7e7bbe76b, 0xcbdc396e6ea56e57, 0xf395aac4c437c46e, 0x0f061b03030c0318, + 0x13acdc565645568a, 0x49885e44440d441a, 0x9efea07f7fe17fdf, 0x374f88a9a99ea921, + 0x8254672a2aa82a4d, 0x6d6b0abbbbd6bbb1, 0xe29f87c1c123c146, 0x02a6f153535153a2, + 0x8ba572dcdc57dcae, 0x2716530b0b2c0b58, 0xd327019d9d4e9d9c, 0xc1d82b6c6cad6c47, + 0xf562a43131c43195, 0xb9e8f37474cd7487, 0x09f115f6f6fff6e3, 0x438c4c464605460a, + 0x2645a5acac8aac09, 0x970fb589891e893c, 0x4428b414145014a0, 0x42dfbae1e1a3e15b, + 0x4e2ca616165816b0, 0xd274f73a3ae83acd, 0xd0d2066969b9696f, 0x2d12410909240948, + 0xade0d77070dd70a7, 0x54716fb6b6e2b6d9, 0xb7bd1ed0d067d0ce, 0x7ec7d6eded93ed3b, + 0xdb85e2cccc17cc2e, 0x578468424215422a, 0xc22d2c98985a98b4, 0x0e55eda4a4aaa449, + 0x8850752828a0285d, 0x31b8865c5c6d5cda, 0x3fed6bf8f8c7f893, 0xa411c28686228644, +} + +var _C4 = [256]uint64{ + 0xc07830d818186018, 0x05af462623238c23, 0x7ef991b8c6c63fc6, 0x136fcdfbe8e887e8, + 0x4ca113cb87872687, 0xa9626d11b8b8dab8, 0x0805020901010401, 0x426e9e0d4f4f214f, + 0xadee6c9b3636d836, 0x590451ffa6a6a2a6, 0xdebdb90cd2d26fd2, 0xfb06f70ef5f5f3f5, + 0xef80f2967979f979, 0x5fcede306f6fa16f, 0xfcef3f6d91917e91, 0xaa07a4f852525552, + 0x27fdc04760609d60, 0x89766535bcbccabc, 0xaccd2b379b9b569b, 0x048c018a8e8e028e, + 0x71155bd2a3a3b6a3, 0x603c186c0c0c300c, 0xff8af6847b7bf17b, 0xb5e16a803535d435, + 0xe8693af51d1d741d, 0x5347ddb3e0e0a7e0, 0xf6acb321d7d77bd7, 0x5eed999cc2c22fc2, + 0x6d965c432e2eb82e, 0x627a96294b4b314b, 0xa321e15dfefedffe, 0x8216aed557574157, + 0xa8412abd15155415, 0x9fb6eee87777c177, 0xa5eb6e923737dc37, 0x7b56d79ee5e5b3e5, + 0x8cd923139f9f469f, 0xd317fd23f0f0e7f0, 0x6a7f94204a4a354a, 0x9e95a944dada4fda, + 0xfa25b0a258587d58, 0x06ca8fcfc9c903c9, 0x558d527c2929a429, 0x5022145a0a0a280a, + 0xe14f7f50b1b1feb1, 0x691a5dc9a0a0baa0, 0x7fdad6146b6bb16b, 0x5cab17d985852e85, + 0x8173673cbdbdcebd, 0xd234ba8f5d5d695d, 0x8050209010104010, 0xf303f507f4f4f7f4, + 0x16c08bddcbcb0bcb, 0xedc67cd33e3ef83e, 0x28110a2d05051405, 0x1fe6ce7867678167, + 0x7353d597e4e4b7e4, 0x25bb4e0227279c27, 0x3258827341411941, 0x2c9d0ba78b8b168b, + 0x510153f6a7a7a6a7, 0xcf94fab27d7de97d, 0xdcfb374995956e95, 0x8e9fad56d8d847d8, + 0x8b30eb70fbfbcbfb, 0x2371c1cdeeee9fee, 0xc791f8bb7c7ced7c, 0x17e3cc7166668566, + 0xa68ea77bdddd53dd, 0xb84b2eaf17175c17, 0x02468e4547470147, 0x84dc211a9e9e429e, + 0x1ec589d4caca0fca, 0x75995a582d2db42d, 0x9179632ebfbfc6bf, 0x381b0e3f07071c07, + 0x012347acadad8ead, 0xea2fb4b05a5a755a, 0x6cb51bef83833683, 0x85ff66b63333cc33, + 0x3ff2c65c63639163, 0x100a041202020802, 0x39384993aaaa92aa, 0xafa8e2de7171d971, + 0x0ecf8dc6c8c807c8, 0xc87d32d119196419, 0x7270923b49493949, 0x869aaf5fd9d943d9, + 0xc31df931f2f2eff2, 0x4b48dba8e3e3abe3, 0xe22ab6b95b5b715b, 0x34920dbc88881a88, + 0xa4c8293e9a9a529a, 0x2dbe4c0b26269826, 0x8dfa64bf3232c832, 0xe94a7d59b0b0fab0, + 0x1b6acff2e9e983e9, 0x78331e770f0f3c0f, 0xe6a6b733d5d573d5, 0x74ba1df480803a80, + 0x997c6127bebec2be, 0x26de87ebcdcd13cd, 0xbde468893434d034, 0x7a75903248483d48, + 0xab24e354ffffdbff, 0xf78ff48d7a7af57a, 0xf4ea3d6490907a90, 0xc23ebe9d5f5f615f, + 0x1da0403d20208020, 0x67d5d00f6868bd68, 0xd07234ca1a1a681a, 0x192c41b7aeae82ae, + 0xc95e757db4b4eab4, 0x9a19a8ce54544d54, 0xece53b7f93937693, 0x0daa442f22228822, + 0x07e9c86364648d64, 0xdb12ff2af1f1e3f1, 0xbfa2e6cc7373d173, 0x905a248212124812, + 0x3a5d807a40401d40, 0x4028104808082008, 0x56e89b95c3c32bc3, 0x337bc5dfecec97ec, + 0x9690ab4ddbdb4bdb, 0x611f5fc0a1a1bea1, 0x1c8307918d8d0e8d, 0xf5c97ac83d3df43d, + 0xccf1335b97976697, 0x0000000000000000, 0x36d483f9cfcf1bcf, 0x4587566e2b2bac2b, + 0x97b3ece17676c576, 0x64b019e682823282, 0xfea9b128d6d67fd6, 0xd87736c31b1b6c1b, + 0xc15b7774b5b5eeb5, 0x112943beafaf86af, 0x77dfd41d6a6ab56a, 0xba0da0ea50505d50, + 0x124c8a5745450945, 0xcb18fb38f3f3ebf3, 0x9df060ad3030c030, 0x2b74c3c4efef9bef, + 0xe5c37eda3f3ffc3f, 0x921caac755554955, 0x791059dba2a2b2a2, 0x0365c9e9eaea8fea, + 0x0fecca6a65658965, 0xb9686903babad2ba, 0x65935e4a2f2fbc2f, 0x4ee79d8ec0c027c0, + 0xbe81a160dede5fde, 0xe06c38fc1c1c701c, 0xbb2ee746fdfdd3fd, 0x52649a1f4d4d294d, + 0xe4e0397692927292, 0x8fbceafa7575c975, 0x301e0c3606061806, 0x249809ae8a8a128a, + 0xf940794bb2b2f2b2, 0x6359d185e6e6bfe6, 0x70361c7e0e0e380e, 0xf8633ee71f1f7c1f, + 0x37f7c45562629562, 0xeea3b53ad4d477d4, 0x29324d81a8a89aa8, 0xc4f4315296966296, + 0x9b3aef62f9f9c3f9, 0x66f697a3c5c533c5, 0x35b14a1025259425, 0xf220b2ab59597959, + 0x54ae15d084842a84, 0xb7a7e4c57272d572, 0xd5dd72ec3939e439, 0x5a6198164c4c2d4c, + 0xca3bbc945e5e655e, 0xe785f09f7878fd78, 0xddd870e53838e038, 0x148605988c8c0a8c, + 0xc6b2bf17d1d163d1, 0x410b57e4a5a5aea5, 0x434dd9a1e2e2afe2, 0x2ff8c24e61619961, + 0xf1457b42b3b3f6b3, 0x15a5423421218421, 0x94d625089c9c4a9c, 0xf0663cee1e1e781e, + 0x2252866143431143, 0x76fc93b1c7c73bc7, 0xb32be54ffcfcd7fc, 0x2014082404041004, + 0xb208a2e351515951, 0xbcc72f2599995e99, 0x4fc4da226d6da96d, 0x68391a650d0d340d, + 0x8335e979fafacffa, 0xb684a369dfdf5bdf, 0xd79bfca97e7ee57e, 0x3db4481924249024, + 0xc5d776fe3b3bec3b, 0x313d4b9aabab96ab, 0x3ed181f0cece1fce, 0x8855229911114411, + 0x0c8903838f8f068f, 0x4a6b9c044e4e254e, 0xd1517366b7b7e6b7, 0x0b60cbe0ebeb8beb, + 0xfdcc78c13c3cf03c, 0x7cbf1ffd81813e81, 0xd4fe354094946a94, 0xeb0cf31cf7f7fbf7, + 0xa1676f18b9b9deb9, 0x985f268b13134c13, 0x7d9c58512c2cb02c, 0xd6b8bb05d3d36bd3, + 0x6b5cd38ce7e7bbe7, 0x57cbdc396e6ea56e, 0x6ef395aac4c437c4, 0x180f061b03030c03, + 0x8a13acdc56564556, 0x1a49885e44440d44, 0xdf9efea07f7fe17f, 0x21374f88a9a99ea9, + 0x4d8254672a2aa82a, 0xb16d6b0abbbbd6bb, 0x46e29f87c1c123c1, 0xa202a6f153535153, + 0xae8ba572dcdc57dc, 0x582716530b0b2c0b, 0x9cd327019d9d4e9d, 0x47c1d82b6c6cad6c, + 0x95f562a43131c431, 0x87b9e8f37474cd74, 0xe309f115f6f6fff6, 0x0a438c4c46460546, + 0x092645a5acac8aac, 0x3c970fb589891e89, 0xa04428b414145014, 0x5b42dfbae1e1a3e1, + 0xb04e2ca616165816, 0xcdd274f73a3ae83a, 0x6fd0d2066969b969, 0x482d124109092409, + 0xa7ade0d77070dd70, 0xd954716fb6b6e2b6, 0xceb7bd1ed0d067d0, 0x3b7ec7d6eded93ed, + 0x2edb85e2cccc17cc, 0x2a57846842421542, 0xb4c22d2c98985a98, 0x490e55eda4a4aaa4, + 0x5d8850752828a028, 0xda31b8865c5c6d5c, 0x933fed6bf8f8c7f8, 0x44a411c286862286, +} + +var _C5 = [256]uint64{ + 0x18c07830d8181860, 0x2305af462623238c, 0xc67ef991b8c6c63f, 0xe8136fcdfbe8e887, + 0x874ca113cb878726, 0xb8a9626d11b8b8da, 0x0108050209010104, 0x4f426e9e0d4f4f21, + 0x36adee6c9b3636d8, 0xa6590451ffa6a6a2, 0xd2debdb90cd2d26f, 0xf5fb06f70ef5f5f3, + 0x79ef80f2967979f9, 0x6f5fcede306f6fa1, 0x91fcef3f6d91917e, 0x52aa07a4f8525255, + 0x6027fdc04760609d, 0xbc89766535bcbcca, 0x9baccd2b379b9b56, 0x8e048c018a8e8e02, + 0xa371155bd2a3a3b6, 0x0c603c186c0c0c30, 0x7bff8af6847b7bf1, 0x35b5e16a803535d4, + 0x1de8693af51d1d74, 0xe05347ddb3e0e0a7, 0xd7f6acb321d7d77b, 0xc25eed999cc2c22f, + 0x2e6d965c432e2eb8, 0x4b627a96294b4b31, 0xfea321e15dfefedf, 0x578216aed5575741, + 0x15a8412abd151554, 0x779fb6eee87777c1, 0x37a5eb6e923737dc, 0xe57b56d79ee5e5b3, + 0x9f8cd923139f9f46, 0xf0d317fd23f0f0e7, 0x4a6a7f94204a4a35, 0xda9e95a944dada4f, + 0x58fa25b0a258587d, 0xc906ca8fcfc9c903, 0x29558d527c2929a4, 0x0a5022145a0a0a28, + 0xb1e14f7f50b1b1fe, 0xa0691a5dc9a0a0ba, 0x6b7fdad6146b6bb1, 0x855cab17d985852e, + 0xbd8173673cbdbdce, 0x5dd234ba8f5d5d69, 0x1080502090101040, 0xf4f303f507f4f4f7, + 0xcb16c08bddcbcb0b, 0x3eedc67cd33e3ef8, 0x0528110a2d050514, 0x671fe6ce78676781, + 0xe47353d597e4e4b7, 0x2725bb4e0227279c, 0x4132588273414119, 0x8b2c9d0ba78b8b16, + 0xa7510153f6a7a7a6, 0x7dcf94fab27d7de9, 0x95dcfb374995956e, 0xd88e9fad56d8d847, + 0xfb8b30eb70fbfbcb, 0xee2371c1cdeeee9f, 0x7cc791f8bb7c7ced, 0x6617e3cc71666685, + 0xdda68ea77bdddd53, 0x17b84b2eaf17175c, 0x4702468e45474701, 0x9e84dc211a9e9e42, + 0xca1ec589d4caca0f, 0x2d75995a582d2db4, 0xbf9179632ebfbfc6, 0x07381b0e3f07071c, + 0xad012347acadad8e, 0x5aea2fb4b05a5a75, 0x836cb51bef838336, 0x3385ff66b63333cc, + 0x633ff2c65c636391, 0x02100a0412020208, 0xaa39384993aaaa92, 0x71afa8e2de7171d9, + 0xc80ecf8dc6c8c807, 0x19c87d32d1191964, 0x497270923b494939, 0xd9869aaf5fd9d943, + 0xf2c31df931f2f2ef, 0xe34b48dba8e3e3ab, 0x5be22ab6b95b5b71, 0x8834920dbc88881a, + 0x9aa4c8293e9a9a52, 0x262dbe4c0b262698, 0x328dfa64bf3232c8, 0xb0e94a7d59b0b0fa, + 0xe91b6acff2e9e983, 0x0f78331e770f0f3c, 0xd5e6a6b733d5d573, 0x8074ba1df480803a, + 0xbe997c6127bebec2, 0xcd26de87ebcdcd13, 0x34bde468893434d0, 0x487a75903248483d, + 0xffab24e354ffffdb, 0x7af78ff48d7a7af5, 0x90f4ea3d6490907a, 0x5fc23ebe9d5f5f61, + 0x201da0403d202080, 0x6867d5d00f6868bd, 0x1ad07234ca1a1a68, 0xae192c41b7aeae82, + 0xb4c95e757db4b4ea, 0x549a19a8ce54544d, 0x93ece53b7f939376, 0x220daa442f222288, + 0x6407e9c86364648d, 0xf1db12ff2af1f1e3, 0x73bfa2e6cc7373d1, 0x12905a2482121248, + 0x403a5d807a40401d, 0x0840281048080820, 0xc356e89b95c3c32b, 0xec337bc5dfecec97, + 0xdb9690ab4ddbdb4b, 0xa1611f5fc0a1a1be, 0x8d1c8307918d8d0e, 0x3df5c97ac83d3df4, + 0x97ccf1335b979766, 0x0000000000000000, 0xcf36d483f9cfcf1b, 0x2b4587566e2b2bac, + 0x7697b3ece17676c5, 0x8264b019e6828232, 0xd6fea9b128d6d67f, 0x1bd87736c31b1b6c, + 0xb5c15b7774b5b5ee, 0xaf112943beafaf86, 0x6a77dfd41d6a6ab5, 0x50ba0da0ea50505d, + 0x45124c8a57454509, 0xf3cb18fb38f3f3eb, 0x309df060ad3030c0, 0xef2b74c3c4efef9b, + 0x3fe5c37eda3f3ffc, 0x55921caac7555549, 0xa2791059dba2a2b2, 0xea0365c9e9eaea8f, + 0x650fecca6a656589, 0xbab9686903babad2, 0x2f65935e4a2f2fbc, 0xc04ee79d8ec0c027, + 0xdebe81a160dede5f, 0x1ce06c38fc1c1c70, 0xfdbb2ee746fdfdd3, 0x4d52649a1f4d4d29, + 0x92e4e03976929272, 0x758fbceafa7575c9, 0x06301e0c36060618, 0x8a249809ae8a8a12, + 0xb2f940794bb2b2f2, 0xe66359d185e6e6bf, 0x0e70361c7e0e0e38, 0x1ff8633ee71f1f7c, + 0x6237f7c455626295, 0xd4eea3b53ad4d477, 0xa829324d81a8a89a, 0x96c4f43152969662, + 0xf99b3aef62f9f9c3, 0xc566f697a3c5c533, 0x2535b14a10252594, 0x59f220b2ab595979, + 0x8454ae15d084842a, 0x72b7a7e4c57272d5, 0x39d5dd72ec3939e4, 0x4c5a6198164c4c2d, + 0x5eca3bbc945e5e65, 0x78e785f09f7878fd, 0x38ddd870e53838e0, 0x8c148605988c8c0a, + 0xd1c6b2bf17d1d163, 0xa5410b57e4a5a5ae, 0xe2434dd9a1e2e2af, 0x612ff8c24e616199, + 0xb3f1457b42b3b3f6, 0x2115a54234212184, 0x9c94d625089c9c4a, 0x1ef0663cee1e1e78, + 0x4322528661434311, 0xc776fc93b1c7c73b, 0xfcb32be54ffcfcd7, 0x0420140824040410, + 0x51b208a2e3515159, 0x99bcc72f2599995e, 0x6d4fc4da226d6da9, 0x0d68391a650d0d34, + 0xfa8335e979fafacf, 0xdfb684a369dfdf5b, 0x7ed79bfca97e7ee5, 0x243db44819242490, + 0x3bc5d776fe3b3bec, 0xab313d4b9aabab96, 0xce3ed181f0cece1f, 0x1188552299111144, + 0x8f0c8903838f8f06, 0x4e4a6b9c044e4e25, 0xb7d1517366b7b7e6, 0xeb0b60cbe0ebeb8b, + 0x3cfdcc78c13c3cf0, 0x817cbf1ffd81813e, 0x94d4fe354094946a, 0xf7eb0cf31cf7f7fb, + 0xb9a1676f18b9b9de, 0x13985f268b13134c, 0x2c7d9c58512c2cb0, 0xd3d6b8bb05d3d36b, + 0xe76b5cd38ce7e7bb, 0x6e57cbdc396e6ea5, 0xc46ef395aac4c437, 0x03180f061b03030c, + 0x568a13acdc565645, 0x441a49885e44440d, 0x7fdf9efea07f7fe1, 0xa921374f88a9a99e, + 0x2a4d8254672a2aa8, 0xbbb16d6b0abbbbd6, 0xc146e29f87c1c123, 0x53a202a6f1535351, + 0xdcae8ba572dcdc57, 0x0b582716530b0b2c, 0x9d9cd327019d9d4e, 0x6c47c1d82b6c6cad, + 0x3195f562a43131c4, 0x7487b9e8f37474cd, 0xf6e309f115f6f6ff, 0x460a438c4c464605, + 0xac092645a5acac8a, 0x893c970fb589891e, 0x14a04428b4141450, 0xe15b42dfbae1e1a3, + 0x16b04e2ca6161658, 0x3acdd274f73a3ae8, 0x696fd0d2066969b9, 0x09482d1241090924, + 0x70a7ade0d77070dd, 0xb6d954716fb6b6e2, 0xd0ceb7bd1ed0d067, 0xed3b7ec7d6eded93, + 0xcc2edb85e2cccc17, 0x422a578468424215, 0x98b4c22d2c98985a, 0xa4490e55eda4a4aa, + 0x285d8850752828a0, 0x5cda31b8865c5c6d, 0xf8933fed6bf8f8c7, 0x8644a411c2868622, +} + +var _C6 = [256]uint64{ + 0x6018c07830d81818, 0x8c2305af46262323, 0x3fc67ef991b8c6c6, 0x87e8136fcdfbe8e8, + 0x26874ca113cb8787, 0xdab8a9626d11b8b8, 0x0401080502090101, 0x214f426e9e0d4f4f, + 0xd836adee6c9b3636, 0xa2a6590451ffa6a6, 0x6fd2debdb90cd2d2, 0xf3f5fb06f70ef5f5, + 0xf979ef80f2967979, 0xa16f5fcede306f6f, 0x7e91fcef3f6d9191, 0x5552aa07a4f85252, + 0x9d6027fdc0476060, 0xcabc89766535bcbc, 0x569baccd2b379b9b, 0x028e048c018a8e8e, + 0xb6a371155bd2a3a3, 0x300c603c186c0c0c, 0xf17bff8af6847b7b, 0xd435b5e16a803535, + 0x741de8693af51d1d, 0xa7e05347ddb3e0e0, 0x7bd7f6acb321d7d7, 0x2fc25eed999cc2c2, + 0xb82e6d965c432e2e, 0x314b627a96294b4b, 0xdffea321e15dfefe, 0x41578216aed55757, + 0x5415a8412abd1515, 0xc1779fb6eee87777, 0xdc37a5eb6e923737, 0xb3e57b56d79ee5e5, + 0x469f8cd923139f9f, 0xe7f0d317fd23f0f0, 0x354a6a7f94204a4a, 0x4fda9e95a944dada, + 0x7d58fa25b0a25858, 0x03c906ca8fcfc9c9, 0xa429558d527c2929, 0x280a5022145a0a0a, + 0xfeb1e14f7f50b1b1, 0xbaa0691a5dc9a0a0, 0xb16b7fdad6146b6b, 0x2e855cab17d98585, + 0xcebd8173673cbdbd, 0x695dd234ba8f5d5d, 0x4010805020901010, 0xf7f4f303f507f4f4, + 0x0bcb16c08bddcbcb, 0xf83eedc67cd33e3e, 0x140528110a2d0505, 0x81671fe6ce786767, + 0xb7e47353d597e4e4, 0x9c2725bb4e022727, 0x1941325882734141, 0x168b2c9d0ba78b8b, + 0xa6a7510153f6a7a7, 0xe97dcf94fab27d7d, 0x6e95dcfb37499595, 0x47d88e9fad56d8d8, + 0xcbfb8b30eb70fbfb, 0x9fee2371c1cdeeee, 0xed7cc791f8bb7c7c, 0x856617e3cc716666, + 0x53dda68ea77bdddd, 0x5c17b84b2eaf1717, 0x014702468e454747, 0x429e84dc211a9e9e, + 0x0fca1ec589d4caca, 0xb42d75995a582d2d, 0xc6bf9179632ebfbf, 0x1c07381b0e3f0707, + 0x8ead012347acadad, 0x755aea2fb4b05a5a, 0x36836cb51bef8383, 0xcc3385ff66b63333, + 0x91633ff2c65c6363, 0x0802100a04120202, 0x92aa39384993aaaa, 0xd971afa8e2de7171, + 0x07c80ecf8dc6c8c8, 0x6419c87d32d11919, 0x39497270923b4949, 0x43d9869aaf5fd9d9, + 0xeff2c31df931f2f2, 0xabe34b48dba8e3e3, 0x715be22ab6b95b5b, 0x1a8834920dbc8888, + 0x529aa4c8293e9a9a, 0x98262dbe4c0b2626, 0xc8328dfa64bf3232, 0xfab0e94a7d59b0b0, + 0x83e91b6acff2e9e9, 0x3c0f78331e770f0f, 0x73d5e6a6b733d5d5, 0x3a8074ba1df48080, + 0xc2be997c6127bebe, 0x13cd26de87ebcdcd, 0xd034bde468893434, 0x3d487a7590324848, + 0xdbffab24e354ffff, 0xf57af78ff48d7a7a, 0x7a90f4ea3d649090, 0x615fc23ebe9d5f5f, + 0x80201da0403d2020, 0xbd6867d5d00f6868, 0x681ad07234ca1a1a, 0x82ae192c41b7aeae, + 0xeab4c95e757db4b4, 0x4d549a19a8ce5454, 0x7693ece53b7f9393, 0x88220daa442f2222, + 0x8d6407e9c8636464, 0xe3f1db12ff2af1f1, 0xd173bfa2e6cc7373, 0x4812905a24821212, + 0x1d403a5d807a4040, 0x2008402810480808, 0x2bc356e89b95c3c3, 0x97ec337bc5dfecec, + 0x4bdb9690ab4ddbdb, 0xbea1611f5fc0a1a1, 0x0e8d1c8307918d8d, 0xf43df5c97ac83d3d, + 0x6697ccf1335b9797, 0x0000000000000000, 0x1bcf36d483f9cfcf, 0xac2b4587566e2b2b, + 0xc57697b3ece17676, 0x328264b019e68282, 0x7fd6fea9b128d6d6, 0x6c1bd87736c31b1b, + 0xeeb5c15b7774b5b5, 0x86af112943beafaf, 0xb56a77dfd41d6a6a, 0x5d50ba0da0ea5050, + 0x0945124c8a574545, 0xebf3cb18fb38f3f3, 0xc0309df060ad3030, 0x9bef2b74c3c4efef, + 0xfc3fe5c37eda3f3f, 0x4955921caac75555, 0xb2a2791059dba2a2, 0x8fea0365c9e9eaea, + 0x89650fecca6a6565, 0xd2bab9686903baba, 0xbc2f65935e4a2f2f, 0x27c04ee79d8ec0c0, + 0x5fdebe81a160dede, 0x701ce06c38fc1c1c, 0xd3fdbb2ee746fdfd, 0x294d52649a1f4d4d, + 0x7292e4e039769292, 0xc9758fbceafa7575, 0x1806301e0c360606, 0x128a249809ae8a8a, + 0xf2b2f940794bb2b2, 0xbfe66359d185e6e6, 0x380e70361c7e0e0e, 0x7c1ff8633ee71f1f, + 0x956237f7c4556262, 0x77d4eea3b53ad4d4, 0x9aa829324d81a8a8, 0x6296c4f431529696, + 0xc3f99b3aef62f9f9, 0x33c566f697a3c5c5, 0x942535b14a102525, 0x7959f220b2ab5959, + 0x2a8454ae15d08484, 0xd572b7a7e4c57272, 0xe439d5dd72ec3939, 0x2d4c5a6198164c4c, + 0x655eca3bbc945e5e, 0xfd78e785f09f7878, 0xe038ddd870e53838, 0x0a8c148605988c8c, + 0x63d1c6b2bf17d1d1, 0xaea5410b57e4a5a5, 0xafe2434dd9a1e2e2, 0x99612ff8c24e6161, + 0xf6b3f1457b42b3b3, 0x842115a542342121, 0x4a9c94d625089c9c, 0x781ef0663cee1e1e, + 0x1143225286614343, 0x3bc776fc93b1c7c7, 0xd7fcb32be54ffcfc, 0x1004201408240404, + 0x5951b208a2e35151, 0x5e99bcc72f259999, 0xa96d4fc4da226d6d, 0x340d68391a650d0d, + 0xcffa8335e979fafa, 0x5bdfb684a369dfdf, 0xe57ed79bfca97e7e, 0x90243db448192424, + 0xec3bc5d776fe3b3b, 0x96ab313d4b9aabab, 0x1fce3ed181f0cece, 0x4411885522991111, + 0x068f0c8903838f8f, 0x254e4a6b9c044e4e, 0xe6b7d1517366b7b7, 0x8beb0b60cbe0ebeb, + 0xf03cfdcc78c13c3c, 0x3e817cbf1ffd8181, 0x6a94d4fe35409494, 0xfbf7eb0cf31cf7f7, + 0xdeb9a1676f18b9b9, 0x4c13985f268b1313, 0xb02c7d9c58512c2c, 0x6bd3d6b8bb05d3d3, + 0xbbe76b5cd38ce7e7, 0xa56e57cbdc396e6e, 0x37c46ef395aac4c4, 0x0c03180f061b0303, + 0x45568a13acdc5656, 0x0d441a49885e4444, 0xe17fdf9efea07f7f, 0x9ea921374f88a9a9, + 0xa82a4d8254672a2a, 0xd6bbb16d6b0abbbb, 0x23c146e29f87c1c1, 0x5153a202a6f15353, + 0x57dcae8ba572dcdc, 0x2c0b582716530b0b, 0x4e9d9cd327019d9d, 0xad6c47c1d82b6c6c, + 0xc43195f562a43131, 0xcd7487b9e8f37474, 0xfff6e309f115f6f6, 0x05460a438c4c4646, + 0x8aac092645a5acac, 0x1e893c970fb58989, 0x5014a04428b41414, 0xa3e15b42dfbae1e1, + 0x5816b04e2ca61616, 0xe83acdd274f73a3a, 0xb9696fd0d2066969, 0x2409482d12410909, + 0xdd70a7ade0d77070, 0xe2b6d954716fb6b6, 0x67d0ceb7bd1ed0d0, 0x93ed3b7ec7d6eded, + 0x17cc2edb85e2cccc, 0x15422a5784684242, 0x5a98b4c22d2c9898, 0xaaa4490e55eda4a4, + 0xa0285d8850752828, 0x6d5cda31b8865c5c, 0xc7f8933fed6bf8f8, 0x228644a411c28686, +} + +var _C7 = [256]uint64{ + 0x186018c07830d818, 0x238c2305af462623, 0xc63fc67ef991b8c6, 0xe887e8136fcdfbe8, + 0x8726874ca113cb87, 0xb8dab8a9626d11b8, 0x0104010805020901, 0x4f214f426e9e0d4f, + 0x36d836adee6c9b36, 0xa6a2a6590451ffa6, 0xd26fd2debdb90cd2, 0xf5f3f5fb06f70ef5, + 0x79f979ef80f29679, 0x6fa16f5fcede306f, 0x917e91fcef3f6d91, 0x525552aa07a4f852, + 0x609d6027fdc04760, 0xbccabc89766535bc, 0x9b569baccd2b379b, 0x8e028e048c018a8e, + 0xa3b6a371155bd2a3, 0x0c300c603c186c0c, 0x7bf17bff8af6847b, 0x35d435b5e16a8035, + 0x1d741de8693af51d, 0xe0a7e05347ddb3e0, 0xd77bd7f6acb321d7, 0xc22fc25eed999cc2, + 0x2eb82e6d965c432e, 0x4b314b627a96294b, 0xfedffea321e15dfe, 0x5741578216aed557, + 0x155415a8412abd15, 0x77c1779fb6eee877, 0x37dc37a5eb6e9237, 0xe5b3e57b56d79ee5, + 0x9f469f8cd923139f, 0xf0e7f0d317fd23f0, 0x4a354a6a7f94204a, 0xda4fda9e95a944da, + 0x587d58fa25b0a258, 0xc903c906ca8fcfc9, 0x29a429558d527c29, 0x0a280a5022145a0a, + 0xb1feb1e14f7f50b1, 0xa0baa0691a5dc9a0, 0x6bb16b7fdad6146b, 0x852e855cab17d985, + 0xbdcebd8173673cbd, 0x5d695dd234ba8f5d, 0x1040108050209010, 0xf4f7f4f303f507f4, + 0xcb0bcb16c08bddcb, 0x3ef83eedc67cd33e, 0x05140528110a2d05, 0x6781671fe6ce7867, + 0xe4b7e47353d597e4, 0x279c2725bb4e0227, 0x4119413258827341, 0x8b168b2c9d0ba78b, + 0xa7a6a7510153f6a7, 0x7de97dcf94fab27d, 0x956e95dcfb374995, 0xd847d88e9fad56d8, + 0xfbcbfb8b30eb70fb, 0xee9fee2371c1cdee, 0x7ced7cc791f8bb7c, 0x66856617e3cc7166, + 0xdd53dda68ea77bdd, 0x175c17b84b2eaf17, 0x47014702468e4547, 0x9e429e84dc211a9e, + 0xca0fca1ec589d4ca, 0x2db42d75995a582d, 0xbfc6bf9179632ebf, 0x071c07381b0e3f07, + 0xad8ead012347acad, 0x5a755aea2fb4b05a, 0x8336836cb51bef83, 0x33cc3385ff66b633, + 0x6391633ff2c65c63, 0x020802100a041202, 0xaa92aa39384993aa, 0x71d971afa8e2de71, + 0xc807c80ecf8dc6c8, 0x196419c87d32d119, 0x4939497270923b49, 0xd943d9869aaf5fd9, + 0xf2eff2c31df931f2, 0xe3abe34b48dba8e3, 0x5b715be22ab6b95b, 0x881a8834920dbc88, + 0x9a529aa4c8293e9a, 0x2698262dbe4c0b26, 0x32c8328dfa64bf32, 0xb0fab0e94a7d59b0, + 0xe983e91b6acff2e9, 0x0f3c0f78331e770f, 0xd573d5e6a6b733d5, 0x803a8074ba1df480, + 0xbec2be997c6127be, 0xcd13cd26de87ebcd, 0x34d034bde4688934, 0x483d487a75903248, + 0xffdbffab24e354ff, 0x7af57af78ff48d7a, 0x907a90f4ea3d6490, 0x5f615fc23ebe9d5f, + 0x2080201da0403d20, 0x68bd6867d5d00f68, 0x1a681ad07234ca1a, 0xae82ae192c41b7ae, + 0xb4eab4c95e757db4, 0x544d549a19a8ce54, 0x937693ece53b7f93, 0x2288220daa442f22, + 0x648d6407e9c86364, 0xf1e3f1db12ff2af1, 0x73d173bfa2e6cc73, 0x124812905a248212, + 0x401d403a5d807a40, 0x0820084028104808, 0xc32bc356e89b95c3, 0xec97ec337bc5dfec, + 0xdb4bdb9690ab4ddb, 0xa1bea1611f5fc0a1, 0x8d0e8d1c8307918d, 0x3df43df5c97ac83d, + 0x976697ccf1335b97, 0x0000000000000000, 0xcf1bcf36d483f9cf, 0x2bac2b4587566e2b, + 0x76c57697b3ece176, 0x82328264b019e682, 0xd67fd6fea9b128d6, 0x1b6c1bd87736c31b, + 0xb5eeb5c15b7774b5, 0xaf86af112943beaf, 0x6ab56a77dfd41d6a, 0x505d50ba0da0ea50, + 0x450945124c8a5745, 0xf3ebf3cb18fb38f3, 0x30c0309df060ad30, 0xef9bef2b74c3c4ef, + 0x3ffc3fe5c37eda3f, 0x554955921caac755, 0xa2b2a2791059dba2, 0xea8fea0365c9e9ea, + 0x6589650fecca6a65, 0xbad2bab9686903ba, 0x2fbc2f65935e4a2f, 0xc027c04ee79d8ec0, + 0xde5fdebe81a160de, 0x1c701ce06c38fc1c, 0xfdd3fdbb2ee746fd, 0x4d294d52649a1f4d, + 0x927292e4e0397692, 0x75c9758fbceafa75, 0x061806301e0c3606, 0x8a128a249809ae8a, + 0xb2f2b2f940794bb2, 0xe6bfe66359d185e6, 0x0e380e70361c7e0e, 0x1f7c1ff8633ee71f, + 0x62956237f7c45562, 0xd477d4eea3b53ad4, 0xa89aa829324d81a8, 0x966296c4f4315296, + 0xf9c3f99b3aef62f9, 0xc533c566f697a3c5, 0x25942535b14a1025, 0x597959f220b2ab59, + 0x842a8454ae15d084, 0x72d572b7a7e4c572, 0x39e439d5dd72ec39, 0x4c2d4c5a6198164c, + 0x5e655eca3bbc945e, 0x78fd78e785f09f78, 0x38e038ddd870e538, 0x8c0a8c148605988c, + 0xd163d1c6b2bf17d1, 0xa5aea5410b57e4a5, 0xe2afe2434dd9a1e2, 0x6199612ff8c24e61, + 0xb3f6b3f1457b42b3, 0x21842115a5423421, 0x9c4a9c94d625089c, 0x1e781ef0663cee1e, + 0x4311432252866143, 0xc73bc776fc93b1c7, 0xfcd7fcb32be54ffc, 0x0410042014082404, + 0x515951b208a2e351, 0x995e99bcc72f2599, 0x6da96d4fc4da226d, 0x0d340d68391a650d, + 0xfacffa8335e979fa, 0xdf5bdfb684a369df, 0x7ee57ed79bfca97e, 0x2490243db4481924, + 0x3bec3bc5d776fe3b, 0xab96ab313d4b9aab, 0xce1fce3ed181f0ce, 0x1144118855229911, + 0x8f068f0c8903838f, 0x4e254e4a6b9c044e, 0xb7e6b7d1517366b7, 0xeb8beb0b60cbe0eb, + 0x3cf03cfdcc78c13c, 0x813e817cbf1ffd81, 0x946a94d4fe354094, 0xf7fbf7eb0cf31cf7, + 0xb9deb9a1676f18b9, 0x134c13985f268b13, 0x2cb02c7d9c58512c, 0xd36bd3d6b8bb05d3, + 0xe7bbe76b5cd38ce7, 0x6ea56e57cbdc396e, 0xc437c46ef395aac4, 0x030c03180f061b03, + 0x5645568a13acdc56, 0x440d441a49885e44, 0x7fe17fdf9efea07f, 0xa99ea921374f88a9, + 0x2aa82a4d8254672a, 0xbbd6bbb16d6b0abb, 0xc123c146e29f87c1, 0x535153a202a6f153, + 0xdc57dcae8ba572dc, 0x0b2c0b582716530b, 0x9d4e9d9cd327019d, 0x6cad6c47c1d82b6c, + 0x31c43195f562a431, 0x74cd7487b9e8f374, 0xf6fff6e309f115f6, 0x4605460a438c4c46, + 0xac8aac092645a5ac, 0x891e893c970fb589, 0x145014a04428b414, 0xe1a3e15b42dfbae1, + 0x165816b04e2ca616, 0x3ae83acdd274f73a, 0x69b9696fd0d20669, 0x092409482d124109, + 0x70dd70a7ade0d770, 0xb6e2b6d954716fb6, 0xd067d0ceb7bd1ed0, 0xed93ed3b7ec7d6ed, + 0xcc17cc2edb85e2cc, 0x4215422a57846842, 0x985a98b4c22d2c98, 0xa4aaa4490e55eda4, + 0x28a0285d88507528, 0x5c6d5cda31b8865c, 0xf8c7f8933fed6bf8, 0x86228644a411c286, +} + +var rc = [rounds + 1]uint64{ + 0x0000000000000000, + 0x1823c6e887b8014f, + 0x36a6d2f5796f9152, + 0x60bc9b8ea30c7b35, + 0x1de0d7c22e4bfe57, + 0x157737e59ff04ada, + 0x58c9290ab1a06b85, + 0xbd5d10f4cb3e0567, + 0xe427418ba77d95d8, + 0xfbee7c66dd17479e, + 0xca2dbf07ad5a8333, +} diff --git a/vendor/github.com/jzelinskie/whirlpool/whirlpool.go b/vendor/github.com/jzelinskie/whirlpool/whirlpool.go new file mode 100644 index 00000000000..0c16befee7f --- /dev/null +++ b/vendor/github.com/jzelinskie/whirlpool/whirlpool.go @@ -0,0 +1,240 @@ +// Copyright 2012 Jimmy Zelinskie. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package whirlpool implements the ISO/IEC 10118-3:2004 whirlpool +// cryptographic hash. Whirlpool is defined in +// http://www.larc.usp.br/~pbarreto/WhirlpoolPage.html +package whirlpool + +import ( + "encoding/binary" + "hash" +) + +// whirlpool represents the partial evaluation of a checksum. +type whirlpool struct { + bitLength [lengthBytes]byte // Number of hashed bits. + buffer [wblockBytes]byte // Buffer of data to be hashed. + bufferBits int // Current number of bits on the buffer. + bufferPos int // Current byte location on buffer. + hash [digestBytes / 8]uint64 // Hash state. +} + +// New returns a new hash.Hash computing the whirlpool checksum. +func New() hash.Hash { + return new(whirlpool) +} + +func (w *whirlpool) Reset() { + // Cleanup the buffer. + w.buffer = [wblockBytes]byte{} + w.bufferBits = 0 + w.bufferPos = 0 + + // Cleanup the digest. + w.hash = [digestBytes / 8]uint64{} + + // Clean up the number of hashed bits. + w.bitLength = [lengthBytes]byte{} +} + +func (w *whirlpool) Size() int { + return digestBytes +} + +func (w *whirlpool) BlockSize() int { + return wblockBytes +} + +func (w *whirlpool) transform() { + var ( + K [8]uint64 // Round key. + block [8]uint64 // μ(buffer). + state [8]uint64 // Cipher state. + L [8]uint64 + ) + + // Map the buffer to a block. + for i := 0; i < 8; i++ { + b := 8 * i + block[i] = binary.BigEndian.Uint64(w.buffer[b:]) + } + + // Compute & apply K^0 to the cipher state. + for i := 0; i < 8; i++ { + K[i] = w.hash[i] + state[i] = block[i] ^ K[i] + } + + // Iterate over all the rounds. + for r := 1; r <= rounds; r++ { + // Compute K^rounds from K^(rounds-1). + for i := 0; i < 8; i++ { + L[i] = _C0[byte(K[i%8]>>56)] ^ + _C1[byte(K[(i+7)%8]>>48)] ^ + _C2[byte(K[(i+6)%8]>>40)] ^ + _C3[byte(K[(i+5)%8]>>32)] ^ + _C4[byte(K[(i+4)%8]>>24)] ^ + _C5[byte(K[(i+3)%8]>>16)] ^ + _C6[byte(K[(i+2)%8]>>8)] ^ + _C7[byte(K[(i+1)%8])] + } + L[0] ^= rc[r] + + for i := 0; i < 8; i++ { + K[i] = L[i] + } + + // Apply r-th round transformation. + for i := 0; i < 8; i++ { + L[i] = _C0[byte(state[i%8]>>56)] ^ + _C1[byte(state[(i+7)%8]>>48)] ^ + _C2[byte(state[(i+6)%8]>>40)] ^ + _C3[byte(state[(i+5)%8]>>32)] ^ + _C4[byte(state[(i+4)%8]>>24)] ^ + _C5[byte(state[(i+3)%8]>>16)] ^ + _C6[byte(state[(i+2)%8]>>8)] ^ + _C7[byte(state[(i+1)%8])] ^ + K[i%8] + } + + for i := 0; i < 8; i++ { + state[i] = L[i] + } + } + + // Apply the Miyaguchi-Preneel compression function. + for i := 0; i < 8; i++ { + w.hash[i] ^= state[i] ^ block[i] + } +} + +func (w *whirlpool) Write(source []byte) (int, error) { + var ( + sourcePos int // Index of the leftmost source. + nn int = len(source) // Num of bytes to process. + sourceBits uint64 = uint64(nn * 8) // Num of bits to process. + sourceGap uint = uint((8 - (int(sourceBits & 7))) & 7) // Space on source[sourcePos]. + bufferRem uint = uint(w.bufferBits & 7) // Occupied bits on buffer[bufferPos]. + b uint32 // Current byte. + ) + + // Tally the length of the data added. + for i, carry, value := 31, uint32(0), uint64(sourceBits); i >= 0 && (carry != 0 || value != 0); i-- { + carry += uint32(w.bitLength[i]) + (uint32(value & 0xff)) + w.bitLength[i] = byte(carry) + carry >>= 8 + value >>= 8 + } + + // Process data in chunks of 8 bits. + for sourceBits > 8 { + // Take a byte form the source. + b = uint32(((source[sourcePos] << sourceGap) & 0xff) | + ((source[sourcePos+1] & 0xff) >> (8 - sourceGap))) + + // Process this byte. + w.buffer[w.bufferPos] |= uint8(b >> bufferRem) + w.bufferPos++ + w.bufferBits += int(8 - bufferRem) + + if w.bufferBits == digestBits { + // Process this block. + w.transform() + // Reset the buffer. + w.bufferBits = 0 + w.bufferPos = 0 + } + w.buffer[w.bufferPos] = byte(b << (8 - bufferRem)) + w.bufferBits += int(bufferRem) + + // Proceed to remaining data. + sourceBits -= 8 + sourcePos++ + } + + // 0 <= sourceBits <= 8; All data leftover is in source[sourcePos]. + if sourceBits > 0 { + b = uint32((source[sourcePos] << sourceGap) & 0xff) // The bits are left-justified. + + // Process the remaining bits. + w.buffer[w.bufferPos] |= byte(b) >> bufferRem + } else { + b = 0 + } + + if uint64(bufferRem)+sourceBits < 8 { + // The remaining data fits on the buffer[bufferPos]. + w.bufferBits += int(sourceBits) + } else { + // The buffer[bufferPos] is full. + w.bufferPos++ + w.bufferBits += 8 - int(bufferRem) // bufferBits = 8*bufferPos + sourceBits -= uint64(8 - bufferRem) + + // Now, 0 <= sourceBits <= 8; all data leftover is in source[sourcePos]. + if w.bufferBits == digestBits { + // Process this data block. + w.transform() + // Reset buffer. + w.bufferBits = 0 + w.bufferPos = 0 + } + w.buffer[w.bufferPos] = byte(b << (8 - bufferRem)) + w.bufferBits += int(sourceBits) + } + return nn, nil +} + +func (w *whirlpool) Sum(in []byte) []byte { + // Copy the whirlpool so that the caller can keep summing. + n := *w + + // Append a 1-bit. + n.buffer[n.bufferPos] |= 0x80 >> (uint(n.bufferBits) & 7) + n.bufferPos++ + + // The remaining bits should be 0. Pad with 0s to be complete. + if n.bufferPos > wblockBytes-lengthBytes { + if n.bufferPos < wblockBytes { + for i := 0; i < wblockBytes-n.bufferPos; i++ { + n.buffer[n.bufferPos+i] = 0 + } + } + // Process this data block. + n.transform() + // Reset the buffer. + n.bufferPos = 0 + } + + if n.bufferPos < wblockBytes-lengthBytes { + for i := 0; i < (wblockBytes-lengthBytes)-n.bufferPos; i++ { + n.buffer[n.bufferPos+i] = 0 + } + } + n.bufferPos = wblockBytes - lengthBytes + + // Append the bit length of the hashed data. + for i := 0; i < lengthBytes; i++ { + n.buffer[n.bufferPos+i] = n.bitLength[i] + } + + // Process this data block. + n.transform() + + // Return the final digest as []byte. + var digest [digestBytes]byte + for i := 0; i < digestBytes/8; i++ { + digest[i*8] = byte(n.hash[i] >> 56) + digest[i*8+1] = byte(n.hash[i] >> 48) + digest[i*8+2] = byte(n.hash[i] >> 40) + digest[i*8+3] = byte(n.hash[i] >> 32) + digest[i*8+4] = byte(n.hash[i] >> 24) + digest[i*8+5] = byte(n.hash[i] >> 16) + digest[i*8+6] = byte(n.hash[i] >> 8) + digest[i*8+7] = byte(n.hash[i]) + } + + return append(in, digest[:digestBytes]...) +} diff --git a/vendor/github.com/mattn/go-ieproxy/.gitignore b/vendor/github.com/mattn/go-ieproxy/.gitignore new file mode 100644 index 00000000000..bc8a670e021 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/.gitignore @@ -0,0 +1 @@ +.idea/* \ No newline at end of file diff --git a/vendor/github.com/mattn/go-ieproxy/LICENSE b/vendor/github.com/mattn/go-ieproxy/LICENSE new file mode 100644 index 00000000000..7b7c0f855af --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/LICENSE @@ -0,0 +1,23 @@ +MIT License + +Copyright (c) 2014 mattn +Copyright (c) 2017 oliverpool +Copyright (c) 2019 Adele Reed + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-ieproxy/README.md b/vendor/github.com/mattn/go-ieproxy/README.md new file mode 100644 index 00000000000..3e3b4759cf7 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/README.md @@ -0,0 +1,51 @@ +# ieproxy + +Go package to detect the proxy settings on Windows platform, and MacOS. + +On Windows, the settings are initially attempted to be read from the [`WinHttpGetIEProxyConfigForCurrentUser` DLL call](https://docs.microsoft.com/en-us/windows/desktop/api/winhttp/nf-winhttp-winhttpgetieproxyconfigforcurrentuser), but falls back to the registry (`CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Internet Settings`) in the event the DLL call fails. + +On MacOS, the settings are read from [`CFNetworkCopySystemProxySettings` method of CFNetwork](https://developer.apple.com/documentation/cfnetwork/1426754-cfnetworkcopysystemproxysettings?language=objc). + +For more information, take a look at the [documentation](https://godoc.org/github.com/mattn/go-ieproxy) + +## Methods + +You can either obtain a `net/http` compatible proxy function using `ieproxy.GetProxyFunc()`, set environment variables using `ieproxy.OverrideEnvWithStaticProxy()` (though no automatic configuration is available this way), or obtain the proxy settings via `ieproxy.GetConf()`. + +| Method | Supported configuration options: | +|----------------------------------------|-----------------------------------------------| +| `ieproxy.GetProxyFunc()` | Static, Specified script, and fully automatic | +| `ieproxy.OverrideEnvWithStaticProxy()` | Static | +| `ieproxy.GetConf()` | Depends on how you use it | + +## Examples + +### Using GetProxyFunc(): + +```go +func init() { + http.DefaultTransport.(*http.Transport).Proxy = ieproxy.GetProxyFunc() +} +``` + +GetProxyFunc acts as a middleman between `net/http` and `mattn/go-ieproxy` in order to select the correct proxy configuration based off the details supplied in the config. + +### Using OverrideEnvWithStaticProxy(): + +```go +func init() { + ieproxy.OverrideEnvWithStaticProxy() + http.DefaultTransport.(*http.Transport).Proxy = http.ProxyFromEnvironment +} +``` + +OverrideEnvWithStaticProxy overrides the relevant environment variables (`HTTP_PROXY`, `HTTPS_PROXY`, `NO_PROXY`) with the **static, manually configured** proxy details typically found in the registry. + +### Using GetConf(): + +```go +func main() { + conf := ieproxy.GetConf() + //Handle proxies how you want to. +} +``` diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy.go b/vendor/github.com/mattn/go-ieproxy/ieproxy.go new file mode 100644 index 00000000000..0b5460bb828 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy.go @@ -0,0 +1,56 @@ +// Package ieproxy is a utility to retrieve the proxy parameters (especially of Internet Explorer on windows) +// +// On windows, it gathers the parameters from the registry (regedit), while it uses env variable on other platforms +package ieproxy + +import "os" + +// ProxyConf gathers the configuration for proxy +type ProxyConf struct { + Static StaticProxyConf // static configuration + Automatic ProxyScriptConf // script configuration +} + +// StaticProxyConf contains the configuration for static proxy +type StaticProxyConf struct { + // Is the proxy active? + Active bool + // Proxy address for each scheme (http, https) + // "" (empty string) is the fallback proxy + Protocols map[string]string + // Addresses not to be browsed via the proxy (comma-separated, linux-like) + NoProxy string +} + +// ProxyScriptConf contains the configuration for automatic proxy +type ProxyScriptConf struct { + // Is the proxy active? + Active bool + // PreConfiguredURL of the .pac file. + // If this is empty and Active is true, auto-configuration should be assumed. + PreConfiguredURL string +} + +// GetConf retrieves the proxy configuration from the Windows Regedit +func GetConf() ProxyConf { + return getConf() +} + +// ReloadConf reloads the proxy configuration +func ReloadConf() ProxyConf { + return reloadConf() +} + +// OverrideEnvWithStaticProxy writes new values to the +// `http_proxy`, `https_proxy` and `no_proxy` environment variables. +// The values are taken from the Windows Regedit (should be called in `init()` function - see example) +func OverrideEnvWithStaticProxy() { + overrideEnvWithStaticProxy(GetConf(), os.Setenv) +} + +// FindProxyForURL computes the proxy for a given URL according to the pac file +func (psc *ProxyScriptConf) FindProxyForURL(URL string) string { + return psc.findProxyForURL(URL) +} + +type envSetter func(string, string) error diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go new file mode 100644 index 00000000000..5d53555708b --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_darwin.go @@ -0,0 +1,123 @@ +package ieproxy + +/* +#cgo LDFLAGS: -framework CoreFoundation +#cgo LDFLAGS: -framework CFNetwork +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "sync" + "unsafe" +) + +var once sync.Once +var darwinProxyConf ProxyConf + +// GetConf retrieves the proxy configuration from the Windows Regedit +func getConf() ProxyConf { + once.Do(writeConf) + return darwinProxyConf +} + +// reloadConf forces a reload of the proxy configuration. +func reloadConf() ProxyConf { + writeConf() + return getConf() +} + +func cfStringGetGoString(cfStr C.CFStringRef) string { + retCString := (*C.char)(C.calloc(C.ulong(uint(128)), 1)) + defer C.free(unsafe.Pointer(retCString)) + + C.CFStringGetCString(cfStr, retCString, C.long(128), C.kCFStringEncodingUTF8) + return C.GoString(retCString) +} + +func cfNumberGetGoInt(cfNum C.CFNumberRef) int { + ret := 0 + C.CFNumberGetValue(cfNum, C.kCFNumberIntType, unsafe.Pointer(&ret)) + return ret +} + +func cfArrayGetGoStrings(cfArray C.CFArrayRef) []string { + var ret []string + for i := 0; i < int(C.CFArrayGetCount(cfArray)); i++ { + cfStr := C.CFStringRef(C.CFArrayGetValueAtIndex(cfArray, C.long(i))) + if unsafe.Pointer(cfStr) != C.NULL { + ret = append(ret, cfStringGetGoString(cfStr)) + } + } + return ret +} + +func writeConf() { + cfDictProxy := C.CFDictionaryRef(C.CFNetworkCopySystemProxySettings()) + defer C.CFRelease(C.CFTypeRef(cfDictProxy)) + darwinProxyConf = ProxyConf{} + + cfNumHttpEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPEnable))) + if unsafe.Pointer(cfNumHttpEnable) != C.NULL && cfNumberGetGoInt(cfNumHttpEnable) > 0 { + darwinProxyConf.Static.Active = true + if darwinProxyConf.Static.Protocols == nil { + darwinProxyConf.Static.Protocols = make(map[string]string) + } + httpHost := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPProxy))) + httpPort := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPPort))) + + httpProxy := fmt.Sprintf("%s:%d", cfStringGetGoString(httpHost), cfNumberGetGoInt(httpPort)) + darwinProxyConf.Static.Protocols["http"] = httpProxy + } + + cfNumHttpsEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSEnable))) + if unsafe.Pointer(cfNumHttpsEnable) != C.NULL && cfNumberGetGoInt(cfNumHttpsEnable) > 0 { + darwinProxyConf.Static.Active = true + if darwinProxyConf.Static.Protocols == nil { + darwinProxyConf.Static.Protocols = make(map[string]string) + } + httpsHost := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSProxy))) + httpsPort := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesHTTPSPort))) + + httpProxy := fmt.Sprintf("%s:%d", cfStringGetGoString(httpsHost), cfNumberGetGoInt(httpsPort)) + darwinProxyConf.Static.Protocols["https"] = httpProxy + } + + if darwinProxyConf.Static.Active { + cfArrayExceptionList := C.CFArrayRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesExceptionsList))) + if unsafe.Pointer(cfArrayExceptionList) != C.NULL { + exceptionList := cfArrayGetGoStrings(cfArrayExceptionList) + darwinProxyConf.Static.NoProxy = strings.Join(exceptionList, ",") + } + } + + cfNumPacEnable := C.CFNumberRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesProxyAutoConfigEnable))) + if unsafe.Pointer(cfNumPacEnable) != C.NULL && cfNumberGetGoInt(cfNumPacEnable) > 0 { + cfStringPac := C.CFStringRef(C.CFDictionaryGetValue(cfDictProxy, unsafe.Pointer(C.kCFNetworkProxiesProxyAutoConfigURLString))) + if unsafe.Pointer(cfStringPac) != C.NULL { + pac := cfStringGetGoString(cfStringPac) + darwinProxyConf.Automatic.PreConfiguredURL = pac + darwinProxyConf.Automatic.Active = true + } + } +} + +// OverrideEnvWithStaticProxy writes new values to the +// http_proxy, https_proxy and no_proxy environment variables. +// The values are taken from the MacOS System Preferences. +func overrideEnvWithStaticProxy(conf ProxyConf, setenv envSetter) { + if conf.Static.Active { + for _, scheme := range []string{"http", "https"} { + url := conf.Static.Protocols[scheme] + if url != "" { + setenv(scheme+"_proxy", url) + } + } + if conf.Static.NoProxy != "" { + setenv("no_proxy", conf.Static.NoProxy) + } + } +} diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go new file mode 100644 index 00000000000..c352546e23c --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_unix.go @@ -0,0 +1,15 @@ +//go:build !windows && (!darwin || !cgo) +// +build !windows,!darwin !cgo + +package ieproxy + +func getConf() ProxyConf { + return ProxyConf{} +} + +func reloadConf() ProxyConf { + return getConf() +} + +func overrideEnvWithStaticProxy(pc ProxyConf, setenv envSetter) { +} diff --git a/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go new file mode 100644 index 00000000000..7fd375017f6 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/ieproxy_windows.go @@ -0,0 +1,219 @@ +package ieproxy + +import ( + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/windows/registry" +) + +type regeditValues struct { + ProxyServer string + ProxyOverride string + ProxyEnable uint64 + AutoConfigURL string +} + +var once sync.Once +var windowsProxyConf ProxyConf + +// GetConf retrieves the proxy configuration from the Windows Regedit +func getConf() ProxyConf { + once.Do(writeConf) + return windowsProxyConf +} + +// reloadConf forces a reload of the proxy configuration from the Windows registry +func reloadConf() ProxyConf { + writeConf() + return getConf() +} + +func writeConf() { + proxy := "" + proxyByPass := "" + autoConfigUrl := "" + autoDetect := false + + // Try from IE first. + if ieCfg, err := getUserConfigFromWindowsSyscall(); err == nil { + defer globalFreeWrapper(ieCfg.lpszProxy) + defer globalFreeWrapper(ieCfg.lpszProxyBypass) + defer globalFreeWrapper(ieCfg.lpszAutoConfigUrl) + + proxy = StringFromUTF16Ptr(ieCfg.lpszProxy) + proxyByPass = StringFromUTF16Ptr(ieCfg.lpszProxyBypass) + autoConfigUrl = StringFromUTF16Ptr(ieCfg.lpszAutoConfigUrl) + autoDetect = ieCfg.fAutoDetect + } + + if proxy == "" && !autoDetect { + // Try WinHTTP default proxy. + if defaultCfg, err := getDefaultProxyConfiguration(); err == nil { + defer globalFreeWrapper(defaultCfg.lpszProxy) + defer globalFreeWrapper(defaultCfg.lpszProxyBypass) + + // Always set both of these (they are a pair, it doesn't make sense to set one here and keep the value of the other from above) + proxy = StringFromUTF16Ptr(defaultCfg.lpszProxy) + proxyByPass = StringFromUTF16Ptr(defaultCfg.lpszProxyBypass) + } + } + + if proxy == "" && !autoDetect { + // Fall back to IE registry or manual detection if nothing is found there.. + regedit, _ := readRegedit() // If the syscall fails, backup to manual detection. + windowsProxyConf = parseRegedit(regedit) + return + } + + // Setting the proxy settings. + windowsProxyConf = ProxyConf{ + Static: StaticProxyConf{ + Active: len(proxy) > 0, + }, + Automatic: ProxyScriptConf{ + Active: len(autoConfigUrl) > 0 || autoDetect, + }, + } + + if windowsProxyConf.Static.Active { + protocol := make(map[string]string) + for _, s := range strings.Split(proxy, ";") { + s = strings.TrimSpace(s) + if s == "" { + continue + } + pair := strings.SplitN(s, "=", 2) + if len(pair) > 1 { + protocol[pair[0]] = pair[1] + } else { + protocol[""] = pair[0] + } + } + + windowsProxyConf.Static.Protocols = protocol + if len(proxyByPass) > 0 { + windowsProxyConf.Static.NoProxy = strings.Replace(proxyByPass, ";", ",", -1) + } + } + + if windowsProxyConf.Automatic.Active { + windowsProxyConf.Automatic.PreConfiguredURL = autoConfigUrl + } +} + +func getUserConfigFromWindowsSyscall() (*tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG, error) { + if err := winHttpGetIEProxyConfigForCurrentUser.Find(); err != nil { + return nil, err + } + p := new(tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG) + r, _, err := winHttpGetIEProxyConfigForCurrentUser.Call(uintptr(unsafe.Pointer(p))) + if rTrue(r) { + return p, nil + } + return nil, err +} + +func getDefaultProxyConfiguration() (*tWINHTTP_PROXY_INFO, error) { + pInfo := new(tWINHTTP_PROXY_INFO) + if err := winHttpGetDefaultProxyConfiguration.Find(); err != nil { + return nil, err + } + r, _, err := winHttpGetDefaultProxyConfiguration.Call(uintptr(unsafe.Pointer(pInfo))) + if rTrue(r) { + return pInfo, nil + } + return nil, err +} + +// OverrideEnvWithStaticProxy writes new values to the +// http_proxy, https_proxy and no_proxy environment variables. +// The values are taken from the Windows Regedit (should be called in init() function) +func overrideEnvWithStaticProxy(conf ProxyConf, setenv envSetter) { + if conf.Static.Active { + for _, scheme := range []string{"http", "https"} { + url := mapFallback(scheme, "", conf.Static.Protocols) + setenv(scheme+"_proxy", url) + } + if conf.Static.NoProxy != "" { + setenv("no_proxy", conf.Static.NoProxy) + } + } +} + +func parseRegedit(regedit regeditValues) ProxyConf { + protocol := make(map[string]string) + for _, s := range strings.Split(regedit.ProxyServer, ";") { + if s == "" { + continue + } + pair := strings.SplitN(s, "=", 2) + if len(pair) > 1 { + protocol[pair[0]] = pair[1] + } else { + protocol[""] = pair[0] + } + } + + return ProxyConf{ + Static: StaticProxyConf{ + Active: regedit.ProxyEnable > 0, + Protocols: protocol, + NoProxy: strings.Replace(regedit.ProxyOverride, ";", ",", -1), // to match linux style + }, + Automatic: ProxyScriptConf{ + Active: regedit.AutoConfigURL != "", + PreConfiguredURL: regedit.AutoConfigURL, + }, + } +} + +func readRegedit() (values regeditValues, err error) { + var proxySettingsPerUser uint64 = 1 // 1 is the default value to consider current user + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `Software\Policies\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE) + if err == nil { + //We had used the below variable tempPrxUsrSettings, because the Golang method GetIntegerValue + //sets the value to zero even it fails. + tempPrxUsrSettings, _, err := k.GetIntegerValue("ProxySettingsPerUser") + if err == nil { + //consider the value of tempPrxUsrSettings if it is a success + proxySettingsPerUser = tempPrxUsrSettings + } + k.Close() + } + + var hkey registry.Key + if proxySettingsPerUser == 0 { + hkey = registry.LOCAL_MACHINE + } else { + hkey = registry.CURRENT_USER + } + + k, err = registry.OpenKey(hkey, `Software\Microsoft\Windows\CurrentVersion\Internet Settings`, registry.QUERY_VALUE) + if err != nil { + return + } + defer k.Close() + + values.ProxyServer, _, err = k.GetStringValue("ProxyServer") + if err != nil && err != registry.ErrNotExist { + return + } + values.ProxyOverride, _, err = k.GetStringValue("ProxyOverride") + if err != nil && err != registry.ErrNotExist { + return + } + + values.ProxyEnable, _, err = k.GetIntegerValue("ProxyEnable") + if err != nil && err != registry.ErrNotExist { + return + } + + values.AutoConfigURL, _, err = k.GetStringValue("AutoConfigURL") + if err != nil && err != registry.ErrNotExist { + return + } + err = nil + return +} diff --git a/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go b/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go new file mode 100644 index 00000000000..30ebbd22a07 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/kernel32_data_windows.go @@ -0,0 +1,19 @@ +package ieproxy + +import ( + "golang.org/x/sys/windows" + "unsafe" +) + +var kernel32 = windows.NewLazySystemDLL("kernel32.dll") +var globalFree = kernel32.NewProc("GlobalFree") + +func globalFreeWrapper(ptr *uint16) { + if ptr != nil { + _, _, _ = globalFree.Call(uintptr(unsafe.Pointer(ptr))) + } +} + +func rTrue(r uintptr) bool { + return r == 1 +} diff --git a/vendor/github.com/mattn/go-ieproxy/pac_darwin.go b/vendor/github.com/mattn/go-ieproxy/pac_darwin.go new file mode 100644 index 00000000000..a8bf90e94d7 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/pac_darwin.go @@ -0,0 +1,141 @@ +package ieproxy + +/* +#cgo LDFLAGS: -framework CoreFoundation +#cgo LDFLAGS: -framework CFNetwork +#include +#include + +#define STR_LEN 128 + +void proxyAutoConfCallback(void* client, CFArrayRef proxies, CFErrorRef error) { + CFTypeRef* result_ptr = (CFTypeRef*)client; + if (error != NULL) { + *result_ptr = CFRetain(error); + } else { + *result_ptr = CFRetain(proxies); + } + CFRunLoopStop(CFRunLoopGetCurrent()); +} + +int intCFNumber(CFNumberRef num) { + int ret; + CFNumberGetValue(num, kCFNumberIntType, &ret); + return ret; +} + +char* _getProxyUrlFromPac(char* pac, char* reqCs) { + char* retCString = (char*)calloc(STR_LEN, sizeof(char)); + + CFStringRef reqStr = CFStringCreateWithCString(NULL, reqCs, kCFStringEncodingUTF8); + CFStringRef pacStr = CFStringCreateWithCString(NULL, pac, kCFStringEncodingUTF8); + CFURLRef pacUrl = CFURLCreateWithString(NULL, pacStr, NULL); + CFURLRef reqUrl = CFURLCreateWithString(NULL, reqStr, NULL); + + CFTypeRef result = NULL; + CFStreamClientContext context = { 0, &result, NULL, NULL, NULL }; + CFRunLoopSourceRef runloop_src = CFNetworkExecuteProxyAutoConfigurationURL(pacUrl, reqUrl, proxyAutoConfCallback, &context); + + if (runloop_src) { + const CFStringRef private_runloop_mode = CFSTR("go-ieproxy"); + CFRunLoopAddSource(CFRunLoopGetCurrent(), runloop_src, private_runloop_mode); + CFRunLoopRunInMode(private_runloop_mode, DBL_MAX, false); + CFRunLoopRemoveSource(CFRunLoopGetCurrent(), runloop_src, kCFRunLoopCommonModes); + + if (CFGetTypeID(result) == CFArrayGetTypeID()) { + CFArrayRef resultArray = (CFTypeRef)result; + if (CFArrayGetCount(resultArray) > 0) { + CFDictionaryRef pxy = (CFDictionaryRef)CFArrayGetValueAtIndex(resultArray, 0); + CFStringRef pxyType = CFDictionaryGetValue(pxy, kCFProxyTypeKey); + + if (CFEqual(pxyType, kCFProxyTypeNone)) { + // noop + } + + if (CFEqual(pxyType, kCFProxyTypeHTTP)) { + CFStringRef host = (CFStringRef)CFDictionaryGetValue(pxy, kCFProxyHostNameKey); + CFNumberRef port = (CFNumberRef)CFDictionaryGetValue(pxy, kCFProxyPortNumberKey); + + char host_str[STR_LEN - 16]; + CFStringGetCString(host, host_str, STR_LEN - 16, kCFStringEncodingUTF8); + + int port_int = 80; + if (port) { + CFNumberGetValue(port, kCFNumberIntType, &port_int); + } + + sprintf(retCString, "%s:%d", host_str, port_int); + } + } + } else { + // error + } + } + + CFRelease(result); + CFRelease(reqStr); + CFRelease(reqUrl); + CFRelease(pacStr); + CFRelease(pacUrl); + return retCString; +} + +char* _getPacUrl() { + char* retCString = (char*)calloc(STR_LEN, sizeof(char)); + CFDictionaryRef proxyDict = CFNetworkCopySystemProxySettings(); + CFNumberRef pacEnable = (CFNumberRef)CFDictionaryGetValue(proxyDict, kCFNetworkProxiesProxyAutoConfigEnable); + + if (pacEnable && intCFNumber(pacEnable)) { + CFStringRef pacUrlStr = (CFStringRef)CFDictionaryGetValue(proxyDict, kCFNetworkProxiesProxyAutoConfigURLString); + if (pacUrlStr) { + CFStringGetCString(pacUrlStr, retCString, STR_LEN, kCFStringEncodingUTF8); + } + } + + CFRelease(proxyDict); + return retCString; +} + +*/ +import "C" +import ( + "net/url" + "unsafe" +) + +func (psc *ProxyScriptConf) findProxyForURL(URL string) string { + if !psc.Active { + return "" + } + proxy := getProxyForURL(psc.PreConfiguredURL, URL) + return proxy +} + +func getProxyForURL(pacFileURL, targetURL string) string { + if pacFileURL == "" { + pacFileURL = getPacUrl() + } + if pacFileURL == "" { + return "" + } + if u, err := url.Parse(pacFileURL); err != nil || u.Scheme == "" { + return "" + } + + csUrl := C.CString(targetURL) + csPac := C.CString(pacFileURL) + csRet := C._getProxyUrlFromPac(csPac, csUrl) + + defer C.free(unsafe.Pointer(csUrl)) + defer C.free(unsafe.Pointer(csPac)) + defer C.free(unsafe.Pointer(csRet)) + + return C.GoString(csRet) +} + +func getPacUrl() string { + csRet := C._getPacUrl() + + defer C.free(unsafe.Pointer(csRet)) + return C.GoString(csRet) +} diff --git a/vendor/github.com/mattn/go-ieproxy/pac_unix.go b/vendor/github.com/mattn/go-ieproxy/pac_unix.go new file mode 100644 index 00000000000..d4613cff695 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/pac_unix.go @@ -0,0 +1,8 @@ +//go:build !windows && (!darwin || !cgo) +// +build !windows,!darwin !cgo + +package ieproxy + +func (psc *ProxyScriptConf) findProxyForURL(URL string) string { + return "" +} diff --git a/vendor/github.com/mattn/go-ieproxy/pac_windows.go b/vendor/github.com/mattn/go-ieproxy/pac_windows.go new file mode 100644 index 00000000000..6a2ee677855 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/pac_windows.go @@ -0,0 +1,72 @@ +package ieproxy + +import ( + "strings" + "syscall" + "unsafe" +) + +func (psc *ProxyScriptConf) findProxyForURL(URL string) string { + if !psc.Active { + return "" + } + proxy, _ := getProxyForURL(psc.PreConfiguredURL, URL) + i := strings.Index(proxy, ";") + if i >= 0 { + return proxy[:i] + } + return proxy +} + +func getProxyForURL(pacfileURL, URL string) (string, error) { + pacfileURLPtr, err := syscall.UTF16PtrFromString(pacfileURL) + if err != nil { + return "", err + } + URLPtr, err := syscall.UTF16PtrFromString(URL) + if err != nil { + return "", err + } + + handle, _, err := winHttpOpen.Call(0, 0, 0, 0, 0) + if handle == 0 { + return "", err + } + defer winHttpCloseHandle.Call(handle) + + dwFlags := fWINHTTP_AUTOPROXY_CONFIG_URL + dwAutoDetectFlags := autoDetectFlag(0) + pfURLptr := pacfileURLPtr + + if pacfileURL == "" { + dwFlags = fWINHTTP_AUTOPROXY_AUTO_DETECT + dwAutoDetectFlags = fWINHTTP_AUTO_DETECT_TYPE_DNS_A | fWINHTTP_AUTO_DETECT_TYPE_DHCP + pfURLptr = nil + } + + options := tWINHTTP_AUTOPROXY_OPTIONS{ + dwFlags: dwFlags, // adding cache might cause issues: https://github.com/mattn/go-ieproxy/issues/6 + dwAutoDetectFlags: dwAutoDetectFlags, + lpszAutoConfigUrl: pfURLptr, + lpvReserved: nil, + dwReserved: 0, + fAutoLogonIfChallenged: true, // may not be optimal https://msdn.microsoft.com/en-us/library/windows/desktop/aa383153(v=vs.85).aspx + } // lpszProxyBypass isn't used as this only executes in cases where there (may) be a pac file (autodetect can fail), where lpszProxyBypass couldn't be returned. + // in the case that autodetect fails and no pre-specified pacfile is present, no proxy is returned. + + info := new(tWINHTTP_PROXY_INFO) + + ret, _, err := winHttpGetProxyForURL.Call( + handle, + uintptr(unsafe.Pointer(URLPtr)), + uintptr(unsafe.Pointer(&options)), + uintptr(unsafe.Pointer(info)), + ) + if ret > 0 { + err = nil + } + + defer globalFreeWrapper(info.lpszProxyBypass) + defer globalFreeWrapper(info.lpszProxy) + return StringFromUTF16Ptr(info.lpszProxy), err +} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman.go new file mode 100644 index 00000000000..b2ff9147b92 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/proxy_middleman.go @@ -0,0 +1,11 @@ +package ieproxy + +import ( + "net/http" + "net/url" +) + +// GetProxyFunc is a forwarder for the OS-Exclusive proxyMiddleman_os.go files +func GetProxyFunc() func(*http.Request) (*url.URL, error) { + return proxyMiddleman() +} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go new file mode 100644 index 00000000000..a89948dca65 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_darwin.go @@ -0,0 +1,43 @@ +package ieproxy + +import ( + "net/http" + "net/url" + + "golang.org/x/net/http/httpproxy" +) + +func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { + // Get the proxy configuration + conf := GetConf() + envCfg := httpproxy.FromEnvironment() + + if envCfg.HTTPProxy != "" || envCfg.HTTPSProxy != "" { + // If the user manually specifies environment variables, prefer those over the MacOS config. + return http.ProxyFromEnvironment + } + + return func(req *http.Request) (i *url.URL, e error) { + if conf.Automatic.Active { + host := conf.Automatic.FindProxyForURL(req.URL.String()) + if host != "" { + return &url.URL{Host: host}, nil + } + } + if conf.Static.Active { + return staticProxy(conf, req) + } + // Should return no proxy; fallthrough. + return http.ProxyFromEnvironment(req) + } +} + +func staticProxy(conf ProxyConf, req *http.Request) (i *url.URL, e error) { + // If static proxy obtaining is specified + proxy := httpproxy.Config{ + HTTPSProxy: conf.Static.Protocols["https"], + HTTPProxy: conf.Static.Protocols["http"], + NoProxy: conf.Static.NoProxy, + } + return proxy.ProxyFunc()(req.URL) +} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go new file mode 100644 index 00000000000..fe227a12ee2 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_unix.go @@ -0,0 +1,14 @@ +//go:build !windows && !darwin +// +build !windows,!darwin + +package ieproxy + +import ( + "net/http" + "net/url" +) + +func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { + // Fallthrough to ProxyFromEnvironment on all other OSes. + return http.ProxyFromEnvironment +} diff --git a/vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go new file mode 100644 index 00000000000..7d314dbf9ca --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/proxy_middleman_windows.go @@ -0,0 +1,52 @@ +package ieproxy + +import ( + "net/http" + "net/url" + + "golang.org/x/net/http/httpproxy" +) + +func proxyMiddleman() func(req *http.Request) (i *url.URL, e error) { + // Get the proxy configuration + conf := GetConf() + envcfg := httpproxy.FromEnvironment() + + if envcfg.HTTPProxy != "" || envcfg.HTTPSProxy != "" { + // If the user manually specifies environment variables, prefer those over the Windows config. + return http.ProxyFromEnvironment + } + + return func(req *http.Request) (i *url.URL, e error) { + if conf.Automatic.Active { + host := conf.Automatic.FindProxyForURL(req.URL.String()) + if host != "" { + return &url.URL{Host: host}, nil + } + } + if conf.Static.Active { + return staticProxy(conf, req) + } + // Should return no proxy; fallthrough. + return http.ProxyFromEnvironment(req) + } +} + +func staticProxy(conf ProxyConf, req *http.Request) (i *url.URL, e error) { + // If static proxy obtaining is specified + prox := httpproxy.Config{ + HTTPSProxy: mapFallback("https", "", conf.Static.Protocols), + HTTPProxy: mapFallback("http", "", conf.Static.Protocols), + NoProxy: conf.Static.NoProxy, + } + return prox.ProxyFunc()(req.URL) +} + +// Return oKey or fbKey if oKey doesn't exist in the map. +func mapFallback(oKey, fbKey string, m map[string]string) string { + if v, ok := m[oKey]; ok { + return v + } else { + return m[fbKey] + } +} diff --git a/vendor/github.com/mattn/go-ieproxy/utils.go b/vendor/github.com/mattn/go-ieproxy/utils.go new file mode 100644 index 00000000000..353b231120a --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/utils.go @@ -0,0 +1,23 @@ +package ieproxy + +import ( + "unicode/utf16" + "unsafe" +) + +// StringFromUTF16Ptr converts a *uint16 C string to a Go String +func StringFromUTF16Ptr(s *uint16) string { + if s == nil { + return "" + } + + p := (*[1<<30 - 1]uint16)(unsafe.Pointer(s)) + + // find the string length + sz := 0 + for p[sz] != 0 { + sz++ + } + + return string(utf16.Decode(p[:sz:sz])) +} diff --git a/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go b/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go new file mode 100644 index 00000000000..4d3b1677805 --- /dev/null +++ b/vendor/github.com/mattn/go-ieproxy/winhttp_data_windows.go @@ -0,0 +1,51 @@ +package ieproxy + +import "golang.org/x/sys/windows" + +var winHttp = windows.NewLazySystemDLL("winhttp.dll") +var winHttpGetProxyForURL = winHttp.NewProc("WinHttpGetProxyForUrl") +var winHttpOpen = winHttp.NewProc("WinHttpOpen") +var winHttpCloseHandle = winHttp.NewProc("WinHttpCloseHandle") +var winHttpGetIEProxyConfigForCurrentUser = winHttp.NewProc("WinHttpGetIEProxyConfigForCurrentUser") +var winHttpGetDefaultProxyConfiguration = winHttp.NewProc("WinHttpGetDefaultProxyConfiguration") + +type tWINHTTP_AUTOPROXY_OPTIONS struct { + dwFlags autoProxyFlag + dwAutoDetectFlags autoDetectFlag + lpszAutoConfigUrl *uint16 + lpvReserved *uint16 + dwReserved uint32 + fAutoLogonIfChallenged bool +} +type autoProxyFlag uint32 + +const ( + fWINHTTP_AUTOPROXY_AUTO_DETECT = autoProxyFlag(0x00000001) + fWINHTTP_AUTOPROXY_CONFIG_URL = autoProxyFlag(0x00000002) + fWINHTTP_AUTOPROXY_NO_CACHE_CLIENT = autoProxyFlag(0x00080000) + fWINHTTP_AUTOPROXY_NO_CACHE_SVC = autoProxyFlag(0x00100000) + fWINHTTP_AUTOPROXY_NO_DIRECTACCESS = autoProxyFlag(0x00040000) + fWINHTTP_AUTOPROXY_RUN_INPROCESS = autoProxyFlag(0x00010000) + fWINHTTP_AUTOPROXY_RUN_OUTPROCESS_ONLY = autoProxyFlag(0x00020000) + fWINHTTP_AUTOPROXY_SORT_RESULTS = autoProxyFlag(0x00400000) +) + +type autoDetectFlag uint32 + +const ( + fWINHTTP_AUTO_DETECT_TYPE_DHCP = autoDetectFlag(0x00000001) + fWINHTTP_AUTO_DETECT_TYPE_DNS_A = autoDetectFlag(0x00000002) +) + +type tWINHTTP_PROXY_INFO struct { + dwAccessType uint32 + lpszProxy *uint16 + lpszProxyBypass *uint16 +} + +type tWINHTTP_CURRENT_USER_IE_PROXY_CONFIG struct { + fAutoDetect bool + lpszAutoConfigUrl *uint16 + lpszProxy *uint16 + lpszProxyBypass *uint16 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go index d569c0c9499..d0ea68f4082 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_bsd.go +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -1,6 +1,7 @@ -//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine +//go:build (darwin || freebsd || openbsd || netbsd || dragonfly || hurd) && !appengine && !tinygo // +build darwin freebsd openbsd netbsd dragonfly hurd // +build !appengine +// +build !tinygo package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go index 31503226f6c..7402e0618aa 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_others.go +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -1,5 +1,6 @@ -//go:build appengine || js || nacl || wasm -// +build appengine js nacl wasm +//go:build (appengine || js || nacl || tinygo || wasm) && !windows +// +build appengine js nacl tinygo wasm +// +build !windows package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go index 67787657fb2..0337d8cf6de 100644 --- a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -1,6 +1,7 @@ -//go:build (linux || aix || zos) && !appengine +//go:build (linux || aix || zos) && !appengine && !tinygo // +build linux aix zos // +build !appengine +// +build !tinygo package isatty diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 00000000000..f9c841a51e0 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 00000000000..d70706d5b35 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 00000000000..25378537ead --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/ncw/swift/.gitignore b/vendor/github.com/ncw/swift/.gitignore new file mode 100644 index 00000000000..5cdbab79477 --- /dev/null +++ b/vendor/github.com/ncw/swift/.gitignore @@ -0,0 +1,4 @@ +*~ +*.pyc +test-env* +junk/ \ No newline at end of file diff --git a/vendor/github.com/ncw/swift/.travis.yml b/vendor/github.com/ncw/swift/.travis.yml new file mode 100644 index 00000000000..72364ac18d6 --- /dev/null +++ b/vendor/github.com/ncw/swift/.travis.yml @@ -0,0 +1,61 @@ +language: go +sudo: false + +arch: + - amd64 + - ppc64le + +go_import_path: github.com/ncw/swift + +go: + - 1.2.x + - 1.3.x + - 1.4.x + - 1.5.x + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - master + +matrix: + include: + - go: 1.14.x + env: TEST_REAL_SERVER=rackspace + - go: 1.14.x + env: TEST_REAL_SERVER=memset + - go: 1.14.x + arch: ppc64le + env: TEST_REAL_SERVER=rackspace + - go: 1.14.x + arch: ppc64le + env: TEST_REAL_SERVER=memset + allow_failures: + - go: 1.14.x + env: TEST_REAL_SERVER=rackspace + - go: 1.14.x + env: TEST_REAL_SERVER=memset + - go: 1.14.x + arch: ppc64le + env: TEST_REAL_SERVER=rackspace + - go: 1.14.x + arch: ppc64le + env: TEST_REAL_SERVER=memset +# Removed unsupported jobs for ppc64le + exclude: + - go: 1.2.x + arch: ppc64le + - go: 1.3.x + arch: ppc64le + - go: 1.4.x + arch: ppc64le +install: go test -i ./... +script: + - test -z "$(go fmt ./...)" + - go test + - ./travis_realserver.sh diff --git a/vendor/github.com/ncw/swift/COPYING b/vendor/github.com/ncw/swift/COPYING new file mode 100644 index 00000000000..8c27c67fd0a --- /dev/null +++ b/vendor/github.com/ncw/swift/COPYING @@ -0,0 +1,20 @@ +Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/ + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/ncw/swift/README.md b/vendor/github.com/ncw/swift/README.md new file mode 100644 index 00000000000..1965f73c5dd --- /dev/null +++ b/vendor/github.com/ncw/swift/README.md @@ -0,0 +1,163 @@ +Swift +===== + +This package provides an easy to use library for interfacing with +Swift / Openstack Object Storage / Rackspace cloud files from the Go +Language + +See here for package docs + + http://godoc.org/github.com/ncw/swift + +[![Build Status](https://api.travis-ci.org/ncw/swift.svg?branch=master)](https://travis-ci.org/ncw/swift) [![GoDoc](https://godoc.org/github.com/ncw/swift?status.svg)](https://godoc.org/github.com/ncw/swift) + +Install +------- + +Use go to install the library + + go get github.com/ncw/swift + +Usage +----- + +See here for full package docs + +- http://godoc.org/github.com/ncw/swift + +Here is a short example from the docs +```go +import "github.com/ncw/swift" + +// Create a connection +c := swift.Connection{ + UserName: "user", + ApiKey: "key", + AuthUrl: "auth_url", + Domain: "domain", // Name of the domain (v3 auth only) + Tenant: "tenant", // Name of the tenant (v2 auth only) +} +// Authenticate +err := c.Authenticate() +if err != nil { + panic(err) +} +// List all the containers +containers, err := c.ContainerNames(nil) +fmt.Println(containers) +// etc... +``` + +Additions +--------- + +The `rs` sub project contains a wrapper for the Rackspace specific CDN Management interface. + +Testing +------- + +To run the tests you can either use an embedded fake Swift server +either use a real Openstack Swift server or a Rackspace Cloud files account. + +When using a real Swift server, you need to set these environment variables +before running the tests + + export SWIFT_API_USER='user' + export SWIFT_API_KEY='key' + export SWIFT_AUTH_URL='https://url.of.auth.server/v1.0' + +And optionally these if using v2 authentication + + export SWIFT_TENANT='TenantName' + export SWIFT_TENANT_ID='TenantId' + +And optionally these if using v3 authentication + + export SWIFT_TENANT='TenantName' + export SWIFT_TENANT_ID='TenantId' + export SWIFT_API_DOMAIN_ID='domain id' + export SWIFT_API_DOMAIN='domain name' + +And optionally these if using v3 trust + + export SWIFT_TRUST_ID='TrustId' + +And optionally this if you want to skip server certificate validation + + export SWIFT_AUTH_INSECURE=1 + +And optionally this to configure the connect channel timeout, in seconds + + export SWIFT_CONNECTION_CHANNEL_TIMEOUT=60 + +And optionally this to configure the data channel timeout, in seconds + + export SWIFT_DATA_CHANNEL_TIMEOUT=60 + +Then run the tests with `go test` + +License +------- + +This is free software under the terms of MIT license (check COPYING file +included in this package). + +Contact and support +------------------- + +The project website is at: + +- https://github.com/ncw/swift + +There you can file bug reports, ask for help or contribute patches. + +Authors +------- + +- Nick Craig-Wood + +Contributors +------------ + +- Brian "bojo" Jones +- Janika Liiv +- Yamamoto, Hirotaka +- Stephen +- platformpurple +- Paul Querna +- Livio Soares +- thesyncim +- lsowen +- Sylvain Baubeau +- Chris Kastorff +- Dai HaoJun +- Hua Wang +- Fabian Ruff +- Arturo Reuschenbach Puncernau +- Petr Kotek +- Stefan Majewsky +- Cezar Sa Espinola +- Sam Gunaratne +- Richard Scothern +- Michel Couillard +- Christopher Waldon +- dennis +- hag +- Alexander Neumann +- eclipseo <30413512+eclipseo@users.noreply.github.com> +- Yuri Per +- Falk Reimann +- Arthur Paim Arnold +- Bruno Michel +- Charles Hsu +- Omar Ali +- Andreas Andersen +- kayrus +- CodeLingo Bot +- Jérémy Clerc +- 4xicom <37339705+4xicom@users.noreply.github.com> +- Bo +- Thiago da Silva +- Brandon WELSCH +- Damien Tournoud +- Pedro Kiefer diff --git a/vendor/github.com/ncw/swift/auth.go b/vendor/github.com/ncw/swift/auth.go new file mode 100644 index 00000000000..25654f429cb --- /dev/null +++ b/vendor/github.com/ncw/swift/auth.go @@ -0,0 +1,335 @@ +package swift + +import ( + "bytes" + "encoding/json" + "net/http" + "net/url" + "strings" + "time" +) + +// Auth defines the operations needed to authenticate with swift +// +// This encapsulates the different authentication schemes in use +type Authenticator interface { + // Request creates an http.Request for the auth - return nil if not needed + Request(*Connection) (*http.Request, error) + // Response parses the http.Response + Response(resp *http.Response) error + // The public storage URL - set Internal to true to read + // internal/service net URL + StorageUrl(Internal bool) string + // The access token + Token() string + // The CDN url if available + CdnUrl() string +} + +// Expireser is an optional interface to read the expiration time of the token +type Expireser interface { + Expires() time.Time +} + +type CustomEndpointAuthenticator interface { + StorageUrlForEndpoint(endpointType EndpointType) string +} + +type EndpointType string + +const ( + // Use public URL as storage URL + EndpointTypePublic = EndpointType("public") + + // Use internal URL as storage URL + EndpointTypeInternal = EndpointType("internal") + + // Use admin URL as storage URL + EndpointTypeAdmin = EndpointType("admin") +) + +// newAuth - create a new Authenticator from the AuthUrl +// +// A hint for AuthVersion can be provided +func newAuth(c *Connection) (Authenticator, error) { + AuthVersion := c.AuthVersion + if AuthVersion == 0 { + if strings.Contains(c.AuthUrl, "v3") { + AuthVersion = 3 + } else if strings.Contains(c.AuthUrl, "v2") { + AuthVersion = 2 + } else if strings.Contains(c.AuthUrl, "v1") { + AuthVersion = 1 + } else { + return nil, newErrorf(500, "Can't find AuthVersion in AuthUrl - set explicitly") + } + } + switch AuthVersion { + case 1: + return &v1Auth{}, nil + case 2: + return &v2Auth{ + // Guess as to whether using API key or + // password it will try both eventually so + // this is just an optimization. + useApiKey: len(c.ApiKey) >= 32, + }, nil + case 3: + return &v3Auth{}, nil + } + return nil, newErrorf(500, "Auth Version %d not supported", AuthVersion) +} + +// ------------------------------------------------------------ + +// v1 auth +type v1Auth struct { + Headers http.Header // V1 auth: the authentication headers so extensions can access them +} + +// v1 Authentication - make request +func (auth *v1Auth) Request(c *Connection) (*http.Request, error) { + req, err := http.NewRequest("GET", c.AuthUrl, nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.UserAgent) + req.Header.Set("X-Auth-Key", c.ApiKey) + req.Header.Set("X-Auth-User", c.UserName) + return req, nil +} + +// v1 Authentication - read response +func (auth *v1Auth) Response(resp *http.Response) error { + auth.Headers = resp.Header + return nil +} + +// v1 Authentication - read storage url +func (auth *v1Auth) StorageUrl(Internal bool) string { + storageUrl := auth.Headers.Get("X-Storage-Url") + if Internal { + newUrl, err := url.Parse(storageUrl) + if err != nil { + return storageUrl + } + newUrl.Host = "snet-" + newUrl.Host + storageUrl = newUrl.String() + } + return storageUrl +} + +// v1 Authentication - read auth token +func (auth *v1Auth) Token() string { + return auth.Headers.Get("X-Auth-Token") +} + +// v1 Authentication - read cdn url +func (auth *v1Auth) CdnUrl() string { + return auth.Headers.Get("X-CDN-Management-Url") +} + +// ------------------------------------------------------------ + +// v2 Authentication +type v2Auth struct { + Auth *v2AuthResponse + Region string + useApiKey bool // if set will use API key not Password + useApiKeyOk bool // if set won't change useApiKey any more + notFirst bool // set after first run +} + +// v2 Authentication - make request +func (auth *v2Auth) Request(c *Connection) (*http.Request, error) { + auth.Region = c.Region + // Toggle useApiKey if not first run and not OK yet + if auth.notFirst && !auth.useApiKeyOk { + auth.useApiKey = !auth.useApiKey + } + auth.notFirst = true + // Create a V2 auth request for the body of the connection + var v2i interface{} + if !auth.useApiKey { + // Normal swift authentication + v2 := v2AuthRequest{} + v2.Auth.PasswordCredentials.UserName = c.UserName + v2.Auth.PasswordCredentials.Password = c.ApiKey + v2.Auth.Tenant = c.Tenant + v2.Auth.TenantId = c.TenantId + v2i = v2 + } else { + // Rackspace special with API Key + v2 := v2AuthRequestRackspace{} + v2.Auth.ApiKeyCredentials.UserName = c.UserName + v2.Auth.ApiKeyCredentials.ApiKey = c.ApiKey + v2.Auth.Tenant = c.Tenant + v2.Auth.TenantId = c.TenantId + v2i = v2 + } + body, err := json.Marshal(v2i) + if err != nil { + return nil, err + } + url := c.AuthUrl + if !strings.HasSuffix(url, "/") { + url += "/" + } + url += "tokens" + req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", c.UserAgent) + return req, nil +} + +// v2 Authentication - read response +func (auth *v2Auth) Response(resp *http.Response) error { + auth.Auth = new(v2AuthResponse) + err := readJson(resp, auth.Auth) + // If successfully read Auth then no need to toggle useApiKey any more + if err == nil { + auth.useApiKeyOk = true + } + return err +} + +// Finds the Endpoint Url of "type" from the v2AuthResponse using the +// Region if set or defaulting to the first one if not +// +// Returns "" if not found +func (auth *v2Auth) endpointUrl(Type string, endpointType EndpointType) string { + for _, catalog := range auth.Auth.Access.ServiceCatalog { + if catalog.Type == Type { + for _, endpoint := range catalog.Endpoints { + if auth.Region == "" || (auth.Region == endpoint.Region) { + switch endpointType { + case EndpointTypeInternal: + return endpoint.InternalUrl + case EndpointTypePublic: + return endpoint.PublicUrl + case EndpointTypeAdmin: + return endpoint.AdminUrl + default: + return "" + } + } + } + } + } + return "" +} + +// v2 Authentication - read storage url +// +// If Internal is true then it reads the private (internal / service +// net) URL. +func (auth *v2Auth) StorageUrl(Internal bool) string { + endpointType := EndpointTypePublic + if Internal { + endpointType = EndpointTypeInternal + } + return auth.StorageUrlForEndpoint(endpointType) +} + +// v2 Authentication - read storage url +// +// Use the indicated endpointType to choose a URL. +func (auth *v2Auth) StorageUrlForEndpoint(endpointType EndpointType) string { + return auth.endpointUrl("object-store", endpointType) +} + +// v2 Authentication - read auth token +func (auth *v2Auth) Token() string { + return auth.Auth.Access.Token.Id +} + +// v2 Authentication - read expires +func (auth *v2Auth) Expires() time.Time { + t, err := time.Parse(time.RFC3339, auth.Auth.Access.Token.Expires) + if err != nil { + return time.Time{} // return Zero if not parsed + } + return t +} + +// v2 Authentication - read cdn url +func (auth *v2Auth) CdnUrl() string { + return auth.endpointUrl("rax:object-cdn", EndpointTypePublic) +} + +// ------------------------------------------------------------ + +// V2 Authentication request +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthRequest struct { + Auth struct { + PasswordCredentials struct { + UserName string `json:"username"` + Password string `json:"password"` + } `json:"passwordCredentials"` + Tenant string `json:"tenantName,omitempty"` + TenantId string `json:"tenantId,omitempty"` + } `json:"auth"` +} + +// V2 Authentication request - Rackspace variant +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthRequestRackspace struct { + Auth struct { + ApiKeyCredentials struct { + UserName string `json:"username"` + ApiKey string `json:"apiKey"` + } `json:"RAX-KSKEY:apiKeyCredentials"` + Tenant string `json:"tenantName,omitempty"` + TenantId string `json:"tenantId,omitempty"` + } `json:"auth"` +} + +// V2 Authentication reply +// +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/curl_auth.html +// http://docs.openstack.org/api/openstack-identity-service/2.0/content/POST_authenticate_v2.0_tokens_.html +type v2AuthResponse struct { + Access struct { + ServiceCatalog []struct { + Endpoints []struct { + InternalUrl string + PublicUrl string + AdminUrl string + Region string + TenantId string + } + Name string + Type string + } + Token struct { + Expires string + Id string + Tenant struct { + Id string + Name string + } + } + User struct { + DefaultRegion string `json:"RAX-AUTH:defaultRegion"` + Id string + Name string + Roles []struct { + Description string + Id string + Name string + TenantId string + } + } + } +} diff --git a/vendor/github.com/ncw/swift/auth_v3.go b/vendor/github.com/ncw/swift/auth_v3.go new file mode 100644 index 00000000000..1e34ad81464 --- /dev/null +++ b/vendor/github.com/ncw/swift/auth_v3.go @@ -0,0 +1,300 @@ +package swift + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "strings" + "time" +) + +const ( + v3AuthMethodToken = "token" + v3AuthMethodPassword = "password" + v3AuthMethodApplicationCredential = "application_credential" + v3CatalogTypeObjectStore = "object-store" +) + +// V3 Authentication request +// http://docs.openstack.org/developer/keystone/api_curl_examples.html +// http://developer.openstack.org/api-ref-identity-v3.html +type v3AuthRequest struct { + Auth struct { + Identity struct { + Methods []string `json:"methods"` + Password *v3AuthPassword `json:"password,omitempty"` + Token *v3AuthToken `json:"token,omitempty"` + ApplicationCredential *v3AuthApplicationCredential `json:"application_credential,omitempty"` + } `json:"identity"` + Scope *v3Scope `json:"scope,omitempty"` + } `json:"auth"` +} + +type v3Scope struct { + Project *v3Project `json:"project,omitempty"` + Domain *v3Domain `json:"domain,omitempty"` + Trust *v3Trust `json:"OS-TRUST:trust,omitempty"` +} + +type v3Domain struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` +} + +type v3Project struct { + Name string `json:"name,omitempty"` + Id string `json:"id,omitempty"` + Domain *v3Domain `json:"domain,omitempty"` +} + +type v3Trust struct { + Id string `json:"id"` +} + +type v3User struct { + Domain *v3Domain `json:"domain,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Password string `json:"password,omitempty"` +} + +type v3AuthToken struct { + Id string `json:"id"` +} + +type v3AuthPassword struct { + User v3User `json:"user"` +} + +type v3AuthApplicationCredential struct { + Id string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + Secret string `json:"secret,omitempty"` + User *v3User `json:"user,omitempty"` +} + +// V3 Authentication response +type v3AuthResponse struct { + Token struct { + ExpiresAt string `json:"expires_at"` + IssuedAt string `json:"issued_at"` + Methods []string + Roles []struct { + Id, Name string + Links struct { + Self string + } + } + + Project struct { + Domain struct { + Id, Name string + } + Id, Name string + } + + Catalog []struct { + Id, Namem, Type string + Endpoints []struct { + Id, Region_Id, Url, Region string + Interface EndpointType + } + } + + User struct { + Id, Name string + Domain struct { + Id, Name string + Links struct { + Self string + } + } + } + + Audit_Ids []string + } +} + +type v3Auth struct { + Region string + Auth *v3AuthResponse + Headers http.Header +} + +func (auth *v3Auth) Request(c *Connection) (*http.Request, error) { + auth.Region = c.Region + + var v3i interface{} + + v3 := v3AuthRequest{} + + if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret != "" { + var user *v3User + + if c.ApplicationCredentialId != "" { + c.ApplicationCredentialName = "" + user = &v3User{} + } + + if user == nil && c.UserId != "" { + // UserID could be used without the domain information + user = &v3User{ + Id: c.UserId, + } + } + + if user == nil && c.UserName == "" { + // Make sure that Username or UserID are provided + return nil, fmt.Errorf("UserID or Name should be provided") + } + + if user == nil && c.DomainId != "" { + user = &v3User{ + Name: c.UserName, + Domain: &v3Domain{ + Id: c.DomainId, + }, + } + } + + if user == nil && c.Domain != "" { + user = &v3User{ + Name: c.UserName, + Domain: &v3Domain{ + Name: c.Domain, + }, + } + } + + // Make sure that DomainID or DomainName are provided among Username + if user == nil { + return nil, fmt.Errorf("DomainID or Domain should be provided") + } + + v3.Auth.Identity.Methods = []string{v3AuthMethodApplicationCredential} + v3.Auth.Identity.ApplicationCredential = &v3AuthApplicationCredential{ + Id: c.ApplicationCredentialId, + Name: c.ApplicationCredentialName, + Secret: c.ApplicationCredentialSecret, + User: user, + } + } else if c.UserName == "" && c.UserId == "" { + v3.Auth.Identity.Methods = []string{v3AuthMethodToken} + v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey} + } else { + v3.Auth.Identity.Methods = []string{v3AuthMethodPassword} + v3.Auth.Identity.Password = &v3AuthPassword{ + User: v3User{ + Name: c.UserName, + Id: c.UserId, + Password: c.ApiKey, + }, + } + + var domain *v3Domain + + if c.Domain != "" { + domain = &v3Domain{Name: c.Domain} + } else if c.DomainId != "" { + domain = &v3Domain{Id: c.DomainId} + } + v3.Auth.Identity.Password.User.Domain = domain + } + + if v3.Auth.Identity.Methods[0] != v3AuthMethodApplicationCredential { + if c.TrustId != "" { + v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}} + } else if c.TenantId != "" || c.Tenant != "" { + + v3.Auth.Scope = &v3Scope{Project: &v3Project{}} + + if c.TenantId != "" { + v3.Auth.Scope.Project.Id = c.TenantId + } else if c.Tenant != "" { + v3.Auth.Scope.Project.Name = c.Tenant + switch { + case c.TenantDomain != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.TenantDomain} + case c.TenantDomainId != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.TenantDomainId} + case c.Domain != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Name: c.Domain} + case c.DomainId != "": + v3.Auth.Scope.Project.Domain = &v3Domain{Id: c.DomainId} + default: + v3.Auth.Scope.Project.Domain = &v3Domain{Name: "Default"} + } + } + } + } + + v3i = v3 + + body, err := json.Marshal(v3i) + + if err != nil { + return nil, err + } + + url := c.AuthUrl + if !strings.HasSuffix(url, "/") { + url += "/" + } + url += "auth/tokens" + req, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", c.UserAgent) + return req, nil +} + +func (auth *v3Auth) Response(resp *http.Response) error { + auth.Auth = &v3AuthResponse{} + auth.Headers = resp.Header + err := readJson(resp, auth.Auth) + return err +} + +func (auth *v3Auth) endpointUrl(Type string, endpointType EndpointType) string { + for _, catalog := range auth.Auth.Token.Catalog { + if catalog.Type == Type { + for _, endpoint := range catalog.Endpoints { + if endpoint.Interface == endpointType && (auth.Region == "" || (auth.Region == endpoint.Region)) { + return endpoint.Url + } + } + } + } + return "" +} + +func (auth *v3Auth) StorageUrl(Internal bool) string { + endpointType := EndpointTypePublic + if Internal { + endpointType = EndpointTypeInternal + } + return auth.StorageUrlForEndpoint(endpointType) +} + +func (auth *v3Auth) StorageUrlForEndpoint(endpointType EndpointType) string { + return auth.endpointUrl("object-store", endpointType) +} + +func (auth *v3Auth) Token() string { + return auth.Headers.Get("X-Subject-Token") +} + +func (auth *v3Auth) Expires() time.Time { + t, err := time.Parse(time.RFC3339, auth.Auth.Token.ExpiresAt) + if err != nil { + return time.Time{} // return Zero if not parsed + } + return t +} + +func (auth *v3Auth) CdnUrl() string { + return "" +} diff --git a/vendor/github.com/ncw/swift/compatibility_1_0.go b/vendor/github.com/ncw/swift/compatibility_1_0.go new file mode 100644 index 00000000000..7b69a757a1c --- /dev/null +++ b/vendor/github.com/ncw/swift/compatibility_1_0.go @@ -0,0 +1,28 @@ +// Go 1.0 compatibility functions + +// +build !go1.1 + +package swift + +import ( + "log" + "net/http" + "time" +) + +// Cancel the request - doesn't work under < go 1.1 +func cancelRequest(transport http.RoundTripper, req *http.Request) { + log.Printf("Tried to cancel a request but couldn't - recompile with go 1.1") +} + +// Reset a timer - Doesn't work properly < go 1.1 +// +// This is quite hard to do properly under go < 1.1 so we do a crude +// approximation and hope that everyone upgrades to go 1.1 quickly +func resetTimer(t *time.Timer, d time.Duration) { + t.Stop() + // Very likely this doesn't actually work if we are already + // selecting on t.C. However we've stopped the original timer + // so won't break transfers but may not time them out :-( + *t = *time.NewTimer(d) +} diff --git a/vendor/github.com/ncw/swift/compatibility_1_1.go b/vendor/github.com/ncw/swift/compatibility_1_1.go new file mode 100644 index 00000000000..a4f9c3ab242 --- /dev/null +++ b/vendor/github.com/ncw/swift/compatibility_1_1.go @@ -0,0 +1,24 @@ +// Go 1.1 and later compatibility functions +// +// +build go1.1 + +package swift + +import ( + "net/http" + "time" +) + +// Cancel the request +func cancelRequest(transport http.RoundTripper, req *http.Request) { + if tr, ok := transport.(interface { + CancelRequest(*http.Request) + }); ok { + tr.CancelRequest(req) + } +} + +// Reset a timer +func resetTimer(t *time.Timer, d time.Duration) { + t.Reset(d) +} diff --git a/vendor/github.com/ncw/swift/compatibility_1_6.go b/vendor/github.com/ncw/swift/compatibility_1_6.go new file mode 100644 index 00000000000..b443d01d2a8 --- /dev/null +++ b/vendor/github.com/ncw/swift/compatibility_1_6.go @@ -0,0 +1,23 @@ +// +build go1.6 + +package swift + +import ( + "net/http" + "time" +) + +const IS_AT_LEAST_GO_16 = true + +func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) { + tr.ExpectContinueTimeout = t +} + +func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) { + if req.Body != nil { + req.Header.Add("Expect", "100-continue") + } + if !hasContentLength { + req.TransferEncoding = []string{"chunked"} + } +} diff --git a/vendor/github.com/ncw/swift/compatibility_not_1_6.go b/vendor/github.com/ncw/swift/compatibility_not_1_6.go new file mode 100644 index 00000000000..aabb44e2b77 --- /dev/null +++ b/vendor/github.com/ncw/swift/compatibility_not_1_6.go @@ -0,0 +1,13 @@ +// +build !go1.6 + +package swift + +import ( + "net/http" + "time" +) + +const IS_AT_LEAST_GO_16 = false + +func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {} +func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {} diff --git a/vendor/github.com/ncw/swift/dlo.go b/vendor/github.com/ncw/swift/dlo.go new file mode 100644 index 00000000000..05a1927b393 --- /dev/null +++ b/vendor/github.com/ncw/swift/dlo.go @@ -0,0 +1,149 @@ +package swift + +import ( + "os" + "strings" +) + +// DynamicLargeObjectCreateFile represents an open static large object +type DynamicLargeObjectCreateFile struct { + largeObjectCreateFile +} + +// DynamicLargeObjectCreateFile creates a dynamic large object +// returning an object which satisfies io.Writer, io.Seeker, io.Closer +// and io.ReaderFrom. The flags are as passes to the +// largeObjectCreate method. +func (c *Connection) DynamicLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) { + lo, err := c.largeObjectCreate(opts) + if err != nil { + return nil, err + } + + return withBuffer(opts, &DynamicLargeObjectCreateFile{ + largeObjectCreateFile: *lo, + }), nil +} + +// DynamicLargeObjectCreate creates or truncates an existing dynamic +// large object returning a writeable object. This sets opts.Flags to +// an appropriate value before calling DynamicLargeObjectCreateFile +func (c *Connection) DynamicLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) { + opts.Flags = os.O_TRUNC | os.O_CREATE + return c.DynamicLargeObjectCreateFile(opts) +} + +// DynamicLargeObjectDelete deletes a dynamic large object and all of its segments. +func (c *Connection) DynamicLargeObjectDelete(container string, path string) error { + return c.LargeObjectDelete(container, path) +} + +// DynamicLargeObjectMove moves a dynamic large object from srcContainer, srcObjectName to dstContainer, dstObjectName +func (c *Connection) DynamicLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error { + info, headers, err := c.Object(srcContainer, srcObjectName) + if err != nil { + return err + } + + segmentContainer, segmentPath := parseFullPath(headers["X-Object-Manifest"]) + if err := c.createDLOManifest(dstContainer, dstObjectName, segmentContainer+"/"+segmentPath, info.ContentType, sanitizeLargeObjectMoveHeaders(headers)); err != nil { + return err + } + + if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil { + return err + } + + return nil +} + +func sanitizeLargeObjectMoveHeaders(headers Headers) Headers { + sanitizedHeaders := make(map[string]string, len(headers)) + for k, v := range headers { + if strings.HasPrefix(k, "X-") { //Some of the fields does not effect the request e,g, X-Timestamp, X-Trans-Id, X-Openstack-Request-Id. Open stack will generate new ones anyway. + sanitizedHeaders[k] = v + } + } + return sanitizedHeaders +} + +// createDLOManifest creates a dynamic large object manifest +func (c *Connection) createDLOManifest(container string, objectName string, prefix string, contentType string, headers Headers) error { + if headers == nil { + headers = make(Headers) + } + headers["X-Object-Manifest"] = prefix + manifest, err := c.ObjectCreate(container, objectName, false, "", contentType, headers) + if err != nil { + return err + } + + if err := manifest.Close(); err != nil { + return err + } + + return nil +} + +// Close satisfies the io.Closer interface +func (file *DynamicLargeObjectCreateFile) Close() error { + return file.Flush() +} + +func (file *DynamicLargeObjectCreateFile) Flush() error { + err := file.conn.createDLOManifest(file.container, file.objectName, file.segmentContainer+"/"+file.prefix, file.contentType, file.headers) + if err != nil { + return err + } + return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size()) +} + +func (c *Connection) getAllDLOSegments(segmentContainer, segmentPath string) ([]Object, error) { + //a simple container listing works 99.9% of the time + segments, err := c.ObjectsAll(segmentContainer, &ObjectsOpts{Prefix: segmentPath}) + if err != nil { + return nil, err + } + + hasObjectName := make(map[string]struct{}) + for _, segment := range segments { + hasObjectName[segment.Name] = struct{}{} + } + + //The container listing might be outdated (i.e. not contain all existing + //segment objects yet) because of temporary inconsistency (Swift is only + //eventually consistent!). Check its completeness. + segmentNumber := 0 + for { + segmentNumber++ + segmentName := getSegment(segmentPath, segmentNumber) + if _, seen := hasObjectName[segmentName]; seen { + continue + } + + //This segment is missing in the container listing. Use a more reliable + //request to check its existence. (HEAD requests on segments are + //guaranteed to return the correct metadata, except for the pathological + //case of an outage of large parts of the Swift cluster or its network, + //since every segment is only written once.) + segment, _, err := c.Object(segmentContainer, segmentName) + switch err { + case nil: + //found new segment -> add it in the correct position and keep + //going, more might be missing + if segmentNumber <= len(segments) { + segments = append(segments[:segmentNumber], segments[segmentNumber-1:]...) + segments[segmentNumber-1] = segment + } else { + segments = append(segments, segment) + } + continue + case ObjectNotFound: + //This segment is missing. Since we upload segments sequentially, + //there won't be any more segments after it. + return segments, nil + default: + return nil, err //unexpected error + } + } +} diff --git a/vendor/github.com/ncw/swift/doc.go b/vendor/github.com/ncw/swift/doc.go new file mode 100644 index 00000000000..44efde7bf82 --- /dev/null +++ b/vendor/github.com/ncw/swift/doc.go @@ -0,0 +1,19 @@ +/* +Package swift provides an easy to use interface to Swift / Openstack Object Storage / Rackspace Cloud Files + +Standard Usage + +Most of the work is done through the Container*() and Object*() methods. + +All methods are safe to use concurrently in multiple go routines. + +Object Versioning + +As defined by http://docs.openstack.org/api/openstack-object-storage/1.0/content/Object_Versioning-e1e3230.html#d6e983 one can create a container which allows for version control of files. The suggested method is to create a version container for holding all non-current files, and a current container for holding the latest version that the file points to. The container and objects inside it can be used in the standard manner, however, pushing a file multiple times will result in it being copied to the version container and the new file put in it's place. If the current file is deleted, the previous file in the version container will replace it. This means that if a file is updated 5 times, it must be deleted 5 times to be completely removed from the system. + +Rackspace Sub Module + +This module specifically allows the enabling/disabling of Rackspace Cloud File CDN management on a container. This is specific to the Rackspace API and not Swift/Openstack, therefore it has been placed in a submodule. One can easily create a RsConnection and use it like the standard Connection to access and manipulate containers and objects. + +*/ +package swift diff --git a/vendor/github.com/ncw/swift/largeobjects.go b/vendor/github.com/ncw/swift/largeobjects.go new file mode 100644 index 00000000000..038bef85a9f --- /dev/null +++ b/vendor/github.com/ncw/swift/largeobjects.go @@ -0,0 +1,448 @@ +package swift + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "errors" + "fmt" + "io" + "os" + gopath "path" + "strconv" + "strings" + "time" +) + +// NotLargeObject is returned if an operation is performed on an object which isn't large. +var NotLargeObject = errors.New("Not a large object") + +// readAfterWriteTimeout defines the time we wait before an object appears after having been uploaded +var readAfterWriteTimeout = 15 * time.Second + +// readAfterWriteWait defines the time to sleep between two retries +var readAfterWriteWait = 200 * time.Millisecond + +// largeObjectCreateFile represents an open static or dynamic large object +type largeObjectCreateFile struct { + conn *Connection + container string + objectName string + currentLength int64 + filePos int64 + chunkSize int64 + segmentContainer string + prefix string + contentType string + checkHash bool + segments []Object + headers Headers + minChunkSize int64 +} + +func swiftSegmentPath(path string) (string, error) { + checksum := sha1.New() + random := make([]byte, 32) + if _, err := rand.Read(random); err != nil { + return "", err + } + path = hex.EncodeToString(checksum.Sum(append([]byte(path), random...))) + return strings.TrimLeft(strings.TrimRight("segments/"+path[0:3]+"/"+path[3:], "/"), "/"), nil +} + +func getSegment(segmentPath string, partNumber int) string { + return fmt.Sprintf("%s/%016d", segmentPath, partNumber) +} + +func parseFullPath(manifest string) (container string, prefix string) { + components := strings.SplitN(manifest, "/", 2) + container = components[0] + if len(components) > 1 { + prefix = components[1] + } + return container, prefix +} + +func (headers Headers) IsLargeObjectDLO() bool { + _, isDLO := headers["X-Object-Manifest"] + return isDLO +} + +func (headers Headers) IsLargeObjectSLO() bool { + _, isSLO := headers["X-Static-Large-Object"] + return isSLO +} + +func (headers Headers) IsLargeObject() bool { + return headers.IsLargeObjectSLO() || headers.IsLargeObjectDLO() +} + +func (c *Connection) getAllSegments(container string, path string, headers Headers) (string, []Object, error) { + if manifest, isDLO := headers["X-Object-Manifest"]; isDLO { + segmentContainer, segmentPath := parseFullPath(manifest) + segments, err := c.getAllDLOSegments(segmentContainer, segmentPath) + return segmentContainer, segments, err + } + if headers.IsLargeObjectSLO() { + return c.getAllSLOSegments(container, path) + } + return "", nil, NotLargeObject +} + +// LargeObjectOpts describes how a large object should be created +type LargeObjectOpts struct { + Container string // Name of container to place object + ObjectName string // Name of object + Flags int // Creation flags + CheckHash bool // If set Check the hash + Hash string // If set use this hash to check + ContentType string // Content-Type of the object + Headers Headers // Additional headers to upload the object with + ChunkSize int64 // Size of chunks of the object, defaults to 10MB if not set + MinChunkSize int64 // Minimum chunk size, automatically set for SLO's based on info + SegmentContainer string // Name of the container to place segments + SegmentPrefix string // Prefix to use for the segments + NoBuffer bool // Prevents using a bufio.Writer to write segments +} + +type LargeObjectFile interface { + io.Writer + io.Seeker + io.Closer + Size() int64 + Flush() error +} + +// largeObjectCreate creates a large object at opts.Container, opts.ObjectName. +// +// opts.Flags can have the following bits set +// os.TRUNC - remove the contents of the large object if it exists +// os.APPEND - write at the end of the large object +func (c *Connection) largeObjectCreate(opts *LargeObjectOpts) (*largeObjectCreateFile, error) { + var ( + segmentPath string + segmentContainer string + segments []Object + currentLength int64 + err error + ) + + if opts.SegmentPrefix != "" { + segmentPath = opts.SegmentPrefix + } else if segmentPath, err = swiftSegmentPath(opts.ObjectName); err != nil { + return nil, err + } + + if info, headers, err := c.Object(opts.Container, opts.ObjectName); err == nil { + if opts.Flags&os.O_TRUNC != 0 { + c.LargeObjectDelete(opts.Container, opts.ObjectName) + } else { + currentLength = info.Bytes + if headers.IsLargeObject() { + segmentContainer, segments, err = c.getAllSegments(opts.Container, opts.ObjectName, headers) + if err != nil { + return nil, err + } + if len(segments) > 0 { + segmentPath = gopath.Dir(segments[0].Name) + } + } else { + if err = c.ObjectMove(opts.Container, opts.ObjectName, opts.Container, getSegment(segmentPath, 1)); err != nil { + return nil, err + } + segments = append(segments, info) + } + } + } else if err != ObjectNotFound { + return nil, err + } + + // segmentContainer is not empty when the manifest already existed + if segmentContainer == "" { + if opts.SegmentContainer != "" { + segmentContainer = opts.SegmentContainer + } else { + segmentContainer = opts.Container + "_segments" + } + } + + file := &largeObjectCreateFile{ + conn: c, + checkHash: opts.CheckHash, + container: opts.Container, + objectName: opts.ObjectName, + chunkSize: opts.ChunkSize, + minChunkSize: opts.MinChunkSize, + headers: opts.Headers, + segmentContainer: segmentContainer, + prefix: segmentPath, + segments: segments, + currentLength: currentLength, + } + + if file.chunkSize == 0 { + file.chunkSize = 10 * 1024 * 1024 + } + + if file.minChunkSize > file.chunkSize { + file.chunkSize = file.minChunkSize + } + + if opts.Flags&os.O_APPEND != 0 { + file.filePos = currentLength + } + + return file, nil +} + +// LargeObjectDelete deletes the large object named by container, path +func (c *Connection) LargeObjectDelete(container string, objectName string) error { + _, headers, err := c.Object(container, objectName) + if err != nil { + return err + } + + var objects [][]string + if headers.IsLargeObject() { + segmentContainer, segments, err := c.getAllSegments(container, objectName, headers) + if err != nil { + return err + } + for _, obj := range segments { + objects = append(objects, []string{segmentContainer, obj.Name}) + } + } + objects = append(objects, []string{container, objectName}) + + info, err := c.cachedQueryInfo() + if err == nil && info.SupportsBulkDelete() && len(objects) > 0 { + filenames := make([]string, len(objects)) + for i, obj := range objects { + filenames[i] = obj[0] + "/" + obj[1] + } + _, err = c.doBulkDelete(filenames, nil) + // Don't fail on ObjectNotFound because eventual consistency + // makes this situation normal. + if err != nil && err != Forbidden && err != ObjectNotFound { + return err + } + } else { + for _, obj := range objects { + if err := c.ObjectDelete(obj[0], obj[1]); err != nil { + return err + } + } + } + + return nil +} + +// LargeObjectGetSegments returns all the segments that compose an object +// If the object is a Dynamic Large Object (DLO), it just returns the objects +// that have the prefix as indicated by the manifest. +// If the object is a Static Large Object (SLO), it retrieves the JSON content +// of the manifest and return all the segments of it. +func (c *Connection) LargeObjectGetSegments(container string, path string) (string, []Object, error) { + _, headers, err := c.Object(container, path) + if err != nil { + return "", nil, err + } + + return c.getAllSegments(container, path, headers) +} + +// Seek sets the offset for the next write operation +func (file *largeObjectCreateFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + file.filePos = offset + case 1: + file.filePos += offset + case 2: + file.filePos = file.currentLength + offset + default: + return -1, fmt.Errorf("invalid value for whence") + } + if file.filePos < 0 { + return -1, fmt.Errorf("negative offset") + } + return file.filePos, nil +} + +func (file *largeObjectCreateFile) Size() int64 { + return file.currentLength +} + +func withLORetry(expectedSize int64, fn func() (Headers, int64, error)) (err error) { + endTimer := time.NewTimer(readAfterWriteTimeout) + defer endTimer.Stop() + waitingTime := readAfterWriteWait + for { + var headers Headers + var sz int64 + if headers, sz, err = fn(); err == nil { + if !headers.IsLargeObjectDLO() || (expectedSize == 0 && sz > 0) || expectedSize == sz { + return + } + } else { + return + } + waitTimer := time.NewTimer(waitingTime) + select { + case <-endTimer.C: + waitTimer.Stop() + err = fmt.Errorf("Timeout expired while waiting for object to have size == %d, got: %d", expectedSize, sz) + return + case <-waitTimer.C: + waitingTime *= 2 + } + } +} + +func (c *Connection) waitForSegmentsToShowUp(container, objectName string, expectedSize int64) (err error) { + err = withLORetry(expectedSize, func() (Headers, int64, error) { + var info Object + var headers Headers + info, headers, err = c.objectBase(container, objectName) + if err != nil { + return headers, 0, err + } + return headers, info.Bytes, nil + }) + return +} + +// Write satisfies the io.Writer interface +func (file *largeObjectCreateFile) Write(buf []byte) (int, error) { + var sz int64 + var relativeFilePos int + writeSegmentIdx := 0 + for i, obj := range file.segments { + if file.filePos < sz+obj.Bytes || (i == len(file.segments)-1 && file.filePos < sz+file.minChunkSize) { + relativeFilePos = int(file.filePos - sz) + break + } + writeSegmentIdx++ + sz += obj.Bytes + } + sizeToWrite := len(buf) + for offset := 0; offset < sizeToWrite; { + newSegment, n, err := file.writeSegment(buf[offset:], writeSegmentIdx, relativeFilePos) + if err != nil { + return 0, err + } + if writeSegmentIdx < len(file.segments) { + file.segments[writeSegmentIdx] = *newSegment + } else { + file.segments = append(file.segments, *newSegment) + } + offset += n + writeSegmentIdx++ + relativeFilePos = 0 + } + file.filePos += int64(sizeToWrite) + file.currentLength = 0 + for _, obj := range file.segments { + file.currentLength += obj.Bytes + } + return sizeToWrite, nil +} + +func (file *largeObjectCreateFile) writeSegment(buf []byte, writeSegmentIdx int, relativeFilePos int) (*Object, int, error) { + var ( + readers []io.Reader + existingSegment *Object + segmentSize int + ) + segmentName := getSegment(file.prefix, writeSegmentIdx+1) + sizeToRead := int(file.chunkSize) + if writeSegmentIdx < len(file.segments) { + existingSegment = &file.segments[writeSegmentIdx] + if writeSegmentIdx != len(file.segments)-1 { + sizeToRead = int(existingSegment.Bytes) + } + if relativeFilePos > 0 { + headers := make(Headers) + headers["Range"] = "bytes=0-" + strconv.FormatInt(int64(relativeFilePos-1), 10) + existingSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers) + if err != nil { + return nil, 0, err + } + defer existingSegmentReader.Close() + sizeToRead -= relativeFilePos + segmentSize += relativeFilePos + readers = []io.Reader{existingSegmentReader} + } + } + if sizeToRead > len(buf) { + sizeToRead = len(buf) + } + segmentSize += sizeToRead + readers = append(readers, bytes.NewReader(buf[:sizeToRead])) + if existingSegment != nil && segmentSize < int(existingSegment.Bytes) { + headers := make(Headers) + headers["Range"] = "bytes=" + strconv.FormatInt(int64(segmentSize), 10) + "-" + tailSegmentReader, _, err := file.conn.ObjectOpen(file.segmentContainer, segmentName, true, headers) + if err != nil { + return nil, 0, err + } + defer tailSegmentReader.Close() + segmentSize = int(existingSegment.Bytes) + readers = append(readers, tailSegmentReader) + } + segmentReader := io.MultiReader(readers...) + headers, err := file.conn.ObjectPut(file.segmentContainer, segmentName, segmentReader, true, "", file.contentType, nil) + if err != nil { + return nil, 0, err + } + return &Object{Name: segmentName, Bytes: int64(segmentSize), Hash: headers["Etag"]}, sizeToRead, nil +} + +func withBuffer(opts *LargeObjectOpts, lo LargeObjectFile) LargeObjectFile { + if !opts.NoBuffer { + return &bufferedLargeObjectFile{ + LargeObjectFile: lo, + bw: bufio.NewWriterSize(lo, int(opts.ChunkSize)), + } + } + return lo +} + +type bufferedLargeObjectFile struct { + LargeObjectFile + bw *bufio.Writer +} + +func (blo *bufferedLargeObjectFile) Close() error { + err := blo.bw.Flush() + if err != nil { + return err + } + return blo.LargeObjectFile.Close() +} + +func (blo *bufferedLargeObjectFile) Write(p []byte) (n int, err error) { + return blo.bw.Write(p) +} + +func (blo *bufferedLargeObjectFile) Seek(offset int64, whence int) (int64, error) { + err := blo.bw.Flush() + if err != nil { + return 0, err + } + return blo.LargeObjectFile.Seek(offset, whence) +} + +func (blo *bufferedLargeObjectFile) Size() int64 { + return blo.LargeObjectFile.Size() + int64(blo.bw.Buffered()) +} + +func (blo *bufferedLargeObjectFile) Flush() error { + err := blo.bw.Flush() + if err != nil { + return err + } + return blo.LargeObjectFile.Flush() +} diff --git a/vendor/github.com/ncw/swift/meta.go b/vendor/github.com/ncw/swift/meta.go new file mode 100644 index 00000000000..7e149e1390e --- /dev/null +++ b/vendor/github.com/ncw/swift/meta.go @@ -0,0 +1,174 @@ +// Metadata manipulation in and out of Headers + +package swift + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "time" +) + +// Metadata stores account, container or object metadata. +type Metadata map[string]string + +// Metadata gets the Metadata starting with the metaPrefix out of the Headers. +// +// The keys in the Metadata will be converted to lower case +func (h Headers) Metadata(metaPrefix string) Metadata { + m := Metadata{} + metaPrefix = http.CanonicalHeaderKey(metaPrefix) + for key, value := range h { + if strings.HasPrefix(key, metaPrefix) { + metaKey := strings.ToLower(key[len(metaPrefix):]) + m[metaKey] = value + } + } + return m +} + +// AccountMetadata converts Headers from account to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) AccountMetadata() Metadata { + return h.Metadata("X-Account-Meta-") +} + +// ContainerMetadata converts Headers from container to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) ContainerMetadata() Metadata { + return h.Metadata("X-Container-Meta-") +} + +// ObjectMetadata converts Headers from object to a Metadata. +// +// The keys in the Metadata will be converted to lower case. +func (h Headers) ObjectMetadata() Metadata { + return h.Metadata("X-Object-Meta-") +} + +// Headers convert the Metadata starting with the metaPrefix into a +// Headers. +// +// The keys in the Metadata will be converted from lower case to http +// Canonical (see http.CanonicalHeaderKey). +func (m Metadata) Headers(metaPrefix string) Headers { + h := Headers{} + for key, value := range m { + key = http.CanonicalHeaderKey(metaPrefix + key) + h[key] = value + } + return h +} + +// AccountHeaders converts the Metadata for the account. +func (m Metadata) AccountHeaders() Headers { + return m.Headers("X-Account-Meta-") +} + +// ContainerHeaders converts the Metadata for the container. +func (m Metadata) ContainerHeaders() Headers { + return m.Headers("X-Container-Meta-") +} + +// ObjectHeaders converts the Metadata for the object. +func (m Metadata) ObjectHeaders() Headers { + return m.Headers("X-Object-Meta-") +} + +// Turns a number of ns into a floating point string in seconds +// +// Trims trailing zeros and guaranteed to be perfectly accurate +func nsToFloatString(ns int64) string { + if ns < 0 { + return "-" + nsToFloatString(-ns) + } + result := fmt.Sprintf("%010d", ns) + split := len(result) - 9 + result, decimals := result[:split], result[split:] + decimals = strings.TrimRight(decimals, "0") + if decimals != "" { + result += "." + result += decimals + } + return result +} + +// Turns a floating point string in seconds into a ns integer +// +// Guaranteed to be perfectly accurate +func floatStringToNs(s string) (int64, error) { + const zeros = "000000000" + if point := strings.IndexRune(s, '.'); point >= 0 { + tail := s[point+1:] + if fill := 9 - len(tail); fill < 0 { + tail = tail[:9] + } else { + tail += zeros[:fill] + } + s = s[:point] + tail + } else if len(s) > 0 { // Make sure empty string produces an error + s += zeros + } + return strconv.ParseInt(s, 10, 64) +} + +// FloatStringToTime converts a floating point number string to a time.Time +// +// The string is floating point number of seconds since the epoch +// (Unix time). The number should be in fixed point format (not +// exponential), eg "1354040105.123456789" which represents the time +// "2012-11-27T18:15:05.123456789Z" +// +// Some care is taken to preserve all the accuracy in the time.Time +// (which wouldn't happen with a naive conversion through float64) so +// a round trip conversion won't change the data. +// +// If an error is returned then time will be returned as the zero time. +func FloatStringToTime(s string) (t time.Time, err error) { + ns, err := floatStringToNs(s) + if err != nil { + return + } + t = time.Unix(0, ns) + return +} + +// TimeToFloatString converts a time.Time object to a floating point string +// +// The string is floating point number of seconds since the epoch +// (Unix time). The number is in fixed point format (not +// exponential), eg "1354040105.123456789" which represents the time +// "2012-11-27T18:15:05.123456789Z". Trailing zeros will be dropped +// from the output. +// +// Some care is taken to preserve all the accuracy in the time.Time +// (which wouldn't happen with a naive conversion through float64) so +// a round trip conversion won't change the data. +func TimeToFloatString(t time.Time) string { + return nsToFloatString(t.UnixNano()) +} + +// GetModTime reads a modification time (mtime) from a Metadata object +// +// This is a defacto standard (used in the official python-swiftclient +// amongst others) for storing the modification time (as read using +// os.Stat) for an object. It is stored using the key 'mtime', which +// for example when written to an object will be 'X-Object-Meta-Mtime'. +// +// If an error is returned then time will be returned as the zero time. +func (m Metadata) GetModTime() (t time.Time, err error) { + return FloatStringToTime(m["mtime"]) +} + +// SetModTime writes an modification time (mtime) to a Metadata object +// +// This is a defacto standard (used in the official python-swiftclient +// amongst others) for storing the modification time (as read using +// os.Stat) for an object. It is stored using the key 'mtime', which +// for example when written to an object will be 'X-Object-Meta-Mtime'. +func (m Metadata) SetModTime(t time.Time) { + m["mtime"] = TimeToFloatString(t) +} diff --git a/vendor/github.com/ncw/swift/notes.txt b/vendor/github.com/ncw/swift/notes.txt new file mode 100644 index 00000000000..f738552cd8a --- /dev/null +++ b/vendor/github.com/ncw/swift/notes.txt @@ -0,0 +1,55 @@ +Notes on Go Swift +================= + +Make a builder style interface like the Google Go APIs? Advantages +are that it is easy to add named methods to the service object to do +specific things. Slightly less efficient. Not sure about how to +return extra stuff though - in an object? + +Make a container struct so these could be methods on it? + +Make noResponse check for 204? + +Make storage public so it can be extended easily? + +Rename to go-swift to match user agent string? + +Reconnect on auth error - 401 when token expires isn't tested + +Make more api compatible with python cloudfiles? + +Retry operations on timeout / network errors? +- also 408 error +- GET requests only? + +Make Connection thread safe - whenever it is changed take a write lock whenever it is read from a read lock + +Add extra headers field to Connection (for via etc) + +Make errors use an error heirachy then can catch them with a type assertion + + Error(...) + ObjectCorrupted{ Error } + +Make a Debug flag in connection for logging stuff + +Object If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since etc + +Object range + +Object create, update with X-Delete-At or X-Delete-After + +Large object support +- check uploads are less than 5GB in normal mode? + +Access control CORS? + +Swift client retries and backs off for all types of errors + +Implement net error interface? + +type Error interface { + error + Timeout() bool // Is the error a timeout? + Temporary() bool // Is the error temporary? +} diff --git a/vendor/github.com/ncw/swift/slo.go b/vendor/github.com/ncw/swift/slo.go new file mode 100644 index 00000000000..6a10ddfc056 --- /dev/null +++ b/vendor/github.com/ncw/swift/slo.go @@ -0,0 +1,171 @@ +package swift + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/url" + "os" +) + +// StaticLargeObjectCreateFile represents an open static large object +type StaticLargeObjectCreateFile struct { + largeObjectCreateFile +} + +var SLONotSupported = errors.New("SLO not supported") + +type swiftSegment struct { + Path string `json:"path,omitempty"` + Etag string `json:"etag,omitempty"` + Size int64 `json:"size_bytes,omitempty"` + // When uploading a manifest, the attributes must be named `path`, `etag` and `size_bytes` + // but when querying the JSON content of a manifest with the `multipart-manifest=get` + // parameter, Swift names those attributes `name`, `hash` and `bytes`. + // We use all the different attributes names in this structure to be able to use + // the same structure for both uploading and retrieving. + Name string `json:"name,omitempty"` + Hash string `json:"hash,omitempty"` + Bytes int64 `json:"bytes,omitempty"` + ContentType string `json:"content_type,omitempty"` + LastModified string `json:"last_modified,omitempty"` +} + +// StaticLargeObjectCreateFile creates a static large object returning +// an object which satisfies io.Writer, io.Seeker, io.Closer and +// io.ReaderFrom. The flags are as passed to the largeObjectCreate +// method. +func (c *Connection) StaticLargeObjectCreateFile(opts *LargeObjectOpts) (LargeObjectFile, error) { + info, err := c.cachedQueryInfo() + if err != nil || !info.SupportsSLO() { + return nil, SLONotSupported + } + realMinChunkSize := info.SLOMinSegmentSize() + if realMinChunkSize > opts.MinChunkSize { + opts.MinChunkSize = realMinChunkSize + } + lo, err := c.largeObjectCreate(opts) + if err != nil { + return nil, err + } + return withBuffer(opts, &StaticLargeObjectCreateFile{ + largeObjectCreateFile: *lo, + }), nil +} + +// StaticLargeObjectCreate creates or truncates an existing static +// large object returning a writeable object. This sets opts.Flags to +// an appropriate value before calling StaticLargeObjectCreateFile +func (c *Connection) StaticLargeObjectCreate(opts *LargeObjectOpts) (LargeObjectFile, error) { + opts.Flags = os.O_TRUNC | os.O_CREATE + return c.StaticLargeObjectCreateFile(opts) +} + +// StaticLargeObjectDelete deletes a static large object and all of its segments. +func (c *Connection) StaticLargeObjectDelete(container string, path string) error { + info, err := c.cachedQueryInfo() + if err != nil || !info.SupportsSLO() { + return SLONotSupported + } + return c.LargeObjectDelete(container, path) +} + +// StaticLargeObjectMove moves a static large object from srcContainer, srcObjectName to dstContainer, dstObjectName +func (c *Connection) StaticLargeObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) error { + swiftInfo, err := c.cachedQueryInfo() + if err != nil || !swiftInfo.SupportsSLO() { + return SLONotSupported + } + info, headers, err := c.Object(srcContainer, srcObjectName) + if err != nil { + return err + } + + container, segments, err := c.getAllSegments(srcContainer, srcObjectName, headers) + if err != nil { + return err + } + + //copy only metadata during move (other headers might not be safe for copying) + headers = headers.ObjectMetadata().ObjectHeaders() + + if err := c.createSLOManifest(dstContainer, dstObjectName, info.ContentType, container, segments, headers); err != nil { + return err + } + + if err := c.ObjectDelete(srcContainer, srcObjectName); err != nil { + return err + } + + return nil +} + +// createSLOManifest creates a static large object manifest +func (c *Connection) createSLOManifest(container string, path string, contentType string, segmentContainer string, segments []Object, h Headers) error { + sloSegments := make([]swiftSegment, len(segments)) + for i, segment := range segments { + sloSegments[i].Path = fmt.Sprintf("%s/%s", segmentContainer, segment.Name) + sloSegments[i].Etag = segment.Hash + sloSegments[i].Size = segment.Bytes + } + + content, err := json.Marshal(sloSegments) + if err != nil { + return err + } + + values := url.Values{} + values.Set("multipart-manifest", "put") + if _, err := c.objectPut(container, path, bytes.NewBuffer(content), false, "", contentType, h, values); err != nil { + return err + } + + return nil +} + +func (file *StaticLargeObjectCreateFile) Close() error { + return file.Flush() +} + +func (file *StaticLargeObjectCreateFile) Flush() error { + if err := file.conn.createSLOManifest(file.container, file.objectName, file.contentType, file.segmentContainer, file.segments, file.headers); err != nil { + return err + } + return file.conn.waitForSegmentsToShowUp(file.container, file.objectName, file.Size()) +} + +func (c *Connection) getAllSLOSegments(container, path string) (string, []Object, error) { + var ( + segmentList []swiftSegment + segments []Object + segPath string + segmentContainer string + ) + + values := url.Values{} + values.Set("multipart-manifest", "get") + + file, _, err := c.objectOpen(container, path, true, nil, values) + if err != nil { + return "", nil, err + } + + content, err := ioutil.ReadAll(file) + if err != nil { + return "", nil, err + } + + json.Unmarshal(content, &segmentList) + for _, segment := range segmentList { + segmentContainer, segPath = parseFullPath(segment.Name[1:]) + segments = append(segments, Object{ + Name: segPath, + Bytes: segment.Bytes, + Hash: segment.Hash, + }) + } + + return segmentContainer, segments, nil +} diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go new file mode 100644 index 00000000000..59b68ce96bb --- /dev/null +++ b/vendor/github.com/ncw/swift/swift.go @@ -0,0 +1,2293 @@ +package swift + +import ( + "bufio" + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + "sync" + "time" +) + +const ( + DefaultUserAgent = "goswift/1.0" // Default user agent + DefaultRetries = 3 // Default number of retries on token expiry + TimeFormat = "2006-01-02T15:04:05" // Python date format for json replies parsed as UTC + UploadTar = "tar" // Data format specifier for Connection.BulkUpload(). + UploadTarGzip = "tar.gz" // Data format specifier for Connection.BulkUpload(). + UploadTarBzip2 = "tar.bz2" // Data format specifier for Connection.BulkUpload(). + allContainersLimit = 10000 // Number of containers to fetch at once + allObjectsLimit = 10000 // Number objects to fetch at once + allObjectsChanLimit = 1000 // ...when fetching to a channel +) + +// ObjectType is the type of the swift object, regular, static large, +// or dynamic large. +type ObjectType int + +// Values that ObjectType can take +const ( + RegularObjectType ObjectType = iota + StaticLargeObjectType + DynamicLargeObjectType +) + +// Connection holds the details of the connection to the swift server. +// +// You need to provide UserName, ApiKey and AuthUrl when you create a +// connection then call Authenticate on it. +// +// The auth version in use will be detected from the AuthURL - you can +// override this with the AuthVersion parameter. +// +// If using v2 auth you can also set Region in the Connection +// structure. If you don't set Region you will get the default region +// which may not be what you want. +// +// For reference some common AuthUrls looks like this: +// +// Rackspace US https://auth.api.rackspacecloud.com/v1.0 +// Rackspace UK https://lon.auth.api.rackspacecloud.com/v1.0 +// Rackspace v2 https://identity.api.rackspacecloud.com/v2.0 +// Memset Memstore UK https://auth.storage.memset.com/v1.0 +// Memstore v2 https://auth.storage.memset.com/v2.0 +// +// When using Google Appengine you must provide the Connection with an +// appengine-specific Transport: +// +// import ( +// "appengine/urlfetch" +// "fmt" +// "github.com/ncw/swift" +// ) +// +// func handler(w http.ResponseWriter, r *http.Request) { +// ctx := appengine.NewContext(r) +// tr := urlfetch.Transport{Context: ctx} +// c := swift.Connection{ +// UserName: "user", +// ApiKey: "key", +// AuthUrl: "auth_url", +// Transport: tr, +// } +// _ := c.Authenticate() +// containers, _ := c.ContainerNames(nil) +// fmt.Fprintf(w, "containers: %q", containers) +// } +// +// If you don't supply a Transport, one is made which relies on +// http.ProxyFromEnvironment (http://golang.org/pkg/net/http/#ProxyFromEnvironment). +// This means that the connection will respect the HTTP proxy specified by the +// environment variables $HTTP_PROXY and $NO_PROXY. +type Connection struct { + // Parameters - fill these in before calling Authenticate + // They are all optional except UserName, ApiKey and AuthUrl + Domain string // User's domain name + DomainId string // User's domain Id + UserName string // UserName for api + UserId string // User Id + ApiKey string // Key for api access + ApplicationCredentialId string // Application Credential ID + ApplicationCredentialName string // Application Credential Name + ApplicationCredentialSecret string // Application Credential Secret + AuthUrl string // Auth URL + Retries int // Retries on error (default is 3) + UserAgent string // Http User agent (default goswift/1.0) + ConnectTimeout time.Duration // Connect channel timeout (default 10s) + Timeout time.Duration // Data channel timeout (default 60s) + Region string // Region to use eg "LON", "ORD" - default is use first region (v2,v3 auth only) + AuthVersion int // Set to 1, 2 or 3 or leave at 0 for autodetect + Internal bool // Set this to true to use the the internal / service network + Tenant string // Name of the tenant (v2,v3 auth only) + TenantId string // Id of the tenant (v2,v3 auth only) + EndpointType EndpointType // Endpoint type (v2,v3 auth only) (default is public URL unless Internal is set) + TenantDomain string // Name of the tenant's domain (v3 auth only), only needed if it differs from the user domain + TenantDomainId string // Id of the tenant's domain (v3 auth only), only needed if it differs the from user domain + TrustId string // Id of the trust (v3 auth only) + Transport http.RoundTripper `json:"-" xml:"-"` // Optional specialised http.Transport (eg. for Google Appengine) + // These are filled in after Authenticate is called as are the defaults for above + StorageUrl string + AuthToken string + Expires time.Time // time the token expires, may be Zero if unknown + client *http.Client + Auth Authenticator `json:"-" xml:"-"` // the current authenticator + authLock *sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth + // swiftInfo is filled after QueryInfo is called + swiftInfo SwiftInfo +} + +// setFromEnv reads the value that param points to (it must be a +// pointer), if it isn't the zero value then it reads the environment +// variable name passed in, parses it according to the type and writes +// it to the pointer. +func setFromEnv(param interface{}, name string) (err error) { + val := os.Getenv(name) + if val == "" { + return + } + switch result := param.(type) { + case *string: + if *result == "" { + *result = val + } + case *int: + if *result == 0 { + *result, err = strconv.Atoi(val) + } + case *bool: + if *result == false { + *result, err = strconv.ParseBool(val) + } + case *time.Duration: + if *result == 0 { + *result, err = time.ParseDuration(val) + } + case *EndpointType: + if *result == EndpointType("") { + *result = EndpointType(val) + } + default: + return newErrorf(0, "can't set var of type %T", param) + } + return err +} + +// ApplyEnvironment reads environment variables and applies them to +// the Connection structure. It won't overwrite any parameters which +// are already set in the Connection struct. +// +// To make a new Connection object entirely from the environment you +// would do: +// +// c := new(Connection) +// err := c.ApplyEnvironment() +// if err != nil { log.Fatal(err) } +// +// The naming of these variables follows the official Openstack naming +// scheme so it should be compatible with OpenStack rc files. +// +// For v1 authentication (obsolete) +// ST_AUTH - Auth URL +// ST_USER - UserName for api +// ST_KEY - Key for api access +// +// For v2 authentication +// OS_AUTH_URL - Auth URL +// OS_USERNAME - UserName for api +// OS_PASSWORD - Key for api access +// OS_TENANT_NAME - Name of the tenant +// OS_TENANT_ID - Id of the tenant +// OS_REGION_NAME - Region to use - default is use first region +// +// For v3 authentication +// OS_AUTH_URL - Auth URL +// OS_USERNAME - UserName for api +// OS_USER_ID - User Id +// OS_PASSWORD - Key for api access +// OS_APPLICATION_CREDENTIAL_ID - Application Credential ID +// OS_APPLICATION_CREDENTIAL_NAME - Application Credential Name +// OS_APPLICATION_CREDENTIAL_SECRET - Application Credential Secret +// OS_USER_DOMAIN_NAME - User's domain name +// OS_USER_DOMAIN_ID - User's domain Id +// OS_PROJECT_NAME - Name of the project +// OS_PROJECT_DOMAIN_NAME - Name of the tenant's domain, only needed if it differs from the user domain +// OS_PROJECT_DOMAIN_ID - Id of the tenant's domain, only needed if it differs the from user domain +// OS_TRUST_ID - If of the trust +// OS_REGION_NAME - Region to use - default is use first region +// +// Other +// OS_ENDPOINT_TYPE - Endpoint type public, internal or admin +// ST_AUTH_VERSION - Choose auth version - 1, 2 or 3 or leave at 0 for autodetect +// +// For manual authentication +// OS_STORAGE_URL - storage URL from alternate authentication +// OS_AUTH_TOKEN - Auth Token from alternate authentication +// +// Library specific +// GOSWIFT_RETRIES - Retries on error (default is 3) +// GOSWIFT_USER_AGENT - HTTP User agent (default goswift/1.0) +// GOSWIFT_CONNECT_TIMEOUT - Connect channel timeout with unit, eg "10s", "100ms" (default "10s") +// GOSWIFT_TIMEOUT - Data channel timeout with unit, eg "10s", "100ms" (default "60s") +// GOSWIFT_INTERNAL - Set this to "true" to use the the internal network (obsolete - use OS_ENDPOINT_TYPE) +func (c *Connection) ApplyEnvironment() (err error) { + for _, item := range []struct { + result interface{} + name string + }{ + // Environment variables - keep in same order as Connection + {&c.Domain, "OS_USER_DOMAIN_NAME"}, + {&c.DomainId, "OS_USER_DOMAIN_ID"}, + {&c.UserName, "OS_USERNAME"}, + {&c.UserId, "OS_USER_ID"}, + {&c.ApiKey, "OS_PASSWORD"}, + {&c.ApplicationCredentialId, "OS_APPLICATION_CREDENTIAL_ID"}, + {&c.ApplicationCredentialName, "OS_APPLICATION_CREDENTIAL_NAME"}, + {&c.ApplicationCredentialSecret, "OS_APPLICATION_CREDENTIAL_SECRET"}, + {&c.AuthUrl, "OS_AUTH_URL"}, + {&c.Retries, "GOSWIFT_RETRIES"}, + {&c.UserAgent, "GOSWIFT_USER_AGENT"}, + {&c.ConnectTimeout, "GOSWIFT_CONNECT_TIMEOUT"}, + {&c.Timeout, "GOSWIFT_TIMEOUT"}, + {&c.Region, "OS_REGION_NAME"}, + {&c.AuthVersion, "ST_AUTH_VERSION"}, + {&c.Internal, "GOSWIFT_INTERNAL"}, + {&c.Tenant, "OS_TENANT_NAME"}, //v2 + {&c.Tenant, "OS_PROJECT_NAME"}, // v3 + {&c.TenantId, "OS_TENANT_ID"}, + {&c.EndpointType, "OS_ENDPOINT_TYPE"}, + {&c.TenantDomain, "OS_PROJECT_DOMAIN_NAME"}, + {&c.TenantDomainId, "OS_PROJECT_DOMAIN_ID"}, + {&c.TrustId, "OS_TRUST_ID"}, + {&c.StorageUrl, "OS_STORAGE_URL"}, + {&c.AuthToken, "OS_AUTH_TOKEN"}, + // v1 auth alternatives + {&c.ApiKey, "ST_KEY"}, + {&c.UserName, "ST_USER"}, + {&c.AuthUrl, "ST_AUTH"}, + } { + err = setFromEnv(item.result, item.name) + if err != nil { + return newErrorf(0, "failed to read env var %q: %v", item.name, err) + } + } + return nil +} + +// Error - all errors generated by this package are of this type. Other error +// may be passed on from library functions though. +type Error struct { + StatusCode int // HTTP status code if relevant or 0 if not + Text string +} + +// Error satisfy the error interface. +func (e *Error) Error() string { + return e.Text +} + +// newError make a new error from a string. +func newError(StatusCode int, Text string) *Error { + return &Error{ + StatusCode: StatusCode, + Text: Text, + } +} + +// newErrorf makes a new error from sprintf parameters. +func newErrorf(StatusCode int, Text string, Parameters ...interface{}) *Error { + return newError(StatusCode, fmt.Sprintf(Text, Parameters...)) +} + +// errorMap defines http error codes to error mappings. +type errorMap map[int]error + +var ( + // Specific Errors you might want to check for equality + NotModified = newError(304, "Not Modified") + BadRequest = newError(400, "Bad Request") + AuthorizationFailed = newError(401, "Authorization Failed") + ContainerNotFound = newError(404, "Container Not Found") + ContainerNotEmpty = newError(409, "Container Not Empty") + ObjectNotFound = newError(404, "Object Not Found") + ObjectCorrupted = newError(422, "Object Corrupted") + TimeoutError = newError(408, "Timeout when reading or writing data") + Forbidden = newError(403, "Operation forbidden") + TooLargeObject = newError(413, "Too Large Object") + RateLimit = newError(498, "Rate Limit") + TooManyRequests = newError(429, "TooManyRequests") + + // Mappings for authentication errors + authErrorMap = errorMap{ + 400: BadRequest, + 401: AuthorizationFailed, + 403: Forbidden, + } + + // Mappings for container errors + ContainerErrorMap = errorMap{ + 400: BadRequest, + 403: Forbidden, + 404: ContainerNotFound, + 409: ContainerNotEmpty, + 498: RateLimit, + } + + // Mappings for object errors + objectErrorMap = errorMap{ + 304: NotModified, + 400: BadRequest, + 403: Forbidden, + 404: ObjectNotFound, + 413: TooLargeObject, + 422: ObjectCorrupted, + 429: TooManyRequests, + 498: RateLimit, + } +) + +// checkClose is used to check the return from Close in a defer +// statement. +func checkClose(c io.Closer, err *error) { + cerr := c.Close() + if *err == nil { + *err = cerr + } +} + +// drainAndClose discards all data from rd and closes it. +// If an error occurs during Read, it is discarded. +func drainAndClose(rd io.ReadCloser, err *error) { + if rd == nil { + return + } + + _, _ = io.Copy(ioutil.Discard, rd) + cerr := rd.Close() + if err != nil && *err == nil { + *err = cerr + } +} + +// parseHeaders checks a response for errors and translates into +// standard errors if necessary. If an error is returned, resp.Body +// has been drained and closed. +func (c *Connection) parseHeaders(resp *http.Response, errorMap errorMap) error { + if errorMap != nil { + if err, ok := errorMap[resp.StatusCode]; ok { + drainAndClose(resp.Body, nil) + return err + } + } + if resp.StatusCode < 200 || resp.StatusCode > 299 { + drainAndClose(resp.Body, nil) + return newErrorf(resp.StatusCode, "HTTP Error: %d: %s", resp.StatusCode, resp.Status) + } + return nil +} + +// readHeaders returns a Headers object from the http.Response. +// +// If it receives multiple values for a key (which should never +// happen) it will use the first one +func readHeaders(resp *http.Response) Headers { + headers := Headers{} + for key, values := range resp.Header { + headers[key] = values[0] + } + return headers +} + +// Headers stores HTTP headers (can only have one of each header like Swift). +type Headers map[string]string + +// Does an http request using the running timer passed in +func (c *Connection) doTimeoutRequest(timer *time.Timer, req *http.Request) (*http.Response, error) { + // Do the request in the background so we can check the timeout + type result struct { + resp *http.Response + err error + } + done := make(chan result, 1) + go func() { + resp, err := c.client.Do(req) + done <- result{resp, err} + }() + // Wait for the read or the timeout + select { + case r := <-done: + return r.resp, r.err + case <-timer.C: + // Kill the connection on timeout so we don't leak sockets or goroutines + cancelRequest(c.Transport, req) + return nil, TimeoutError + } + panic("unreachable") // For Go 1.0 +} + +// Set defaults for any unset values +// +// Call with authLock held +func (c *Connection) setDefaults() { + if c.UserAgent == "" { + c.UserAgent = DefaultUserAgent + } + if c.Retries == 0 { + c.Retries = DefaultRetries + } + if c.ConnectTimeout == 0 { + c.ConnectTimeout = 10 * time.Second + } + if c.Timeout == 0 { + c.Timeout = 60 * time.Second + } + if c.Transport == nil { + t := &http.Transport{ + // TLSClientConfig: &tls.Config{RootCAs: pool}, + // DisableCompression: true, + Proxy: http.ProxyFromEnvironment, + // Half of linux's default open files limit (1024). + MaxIdleConnsPerHost: 512, + } + SetExpectContinueTimeout(t, 5*time.Second) + c.Transport = t + } + if c.client == nil { + c.client = &http.Client{ + // CheckRedirect: redirectPolicyFunc, + Transport: c.Transport, + } + } +} + +// Authenticate connects to the Swift server. +// +// If you don't call it before calling one of the connection methods +// then it will be called for you on the first access. +func (c *Connection) Authenticate() (err error) { + if c.authLock == nil { + c.authLock = &sync.Mutex{} + } + c.authLock.Lock() + defer c.authLock.Unlock() + return c.authenticate() +} + +// Internal implementation of Authenticate +// +// Call with authLock held +func (c *Connection) authenticate() (err error) { + c.setDefaults() + + // Flush the keepalives connection - if we are + // re-authenticating then stuff has gone wrong + flushKeepaliveConnections(c.Transport) + + if c.Auth == nil { + c.Auth, err = newAuth(c) + if err != nil { + return + } + } + + retries := 1 +again: + var req *http.Request + req, err = c.Auth.Request(c) + if err != nil { + return + } + if req != nil { + timer := time.NewTimer(c.ConnectTimeout) + defer timer.Stop() + var resp *http.Response + resp, err = c.doTimeoutRequest(timer, req) + if err != nil { + return + } + defer func() { + drainAndClose(resp.Body, &err) + // Flush the auth connection - we don't want to keep + // it open if keepalives were enabled + flushKeepaliveConnections(c.Transport) + }() + if err = c.parseHeaders(resp, authErrorMap); err != nil { + // Try again for a limited number of times on + // AuthorizationFailed or BadRequest. This allows us + // to try some alternate forms of the request + if (err == AuthorizationFailed || err == BadRequest) && retries > 0 { + retries-- + goto again + } + return + } + err = c.Auth.Response(resp) + if err != nil { + return + } + } + if customAuth, isCustom := c.Auth.(CustomEndpointAuthenticator); isCustom && c.EndpointType != "" { + c.StorageUrl = customAuth.StorageUrlForEndpoint(c.EndpointType) + } else { + c.StorageUrl = c.Auth.StorageUrl(c.Internal) + } + c.AuthToken = c.Auth.Token() + if do, ok := c.Auth.(Expireser); ok { + c.Expires = do.Expires() + } else { + c.Expires = time.Time{} + } + + if !c.authenticated() { + err = newError(0, "Response didn't have storage url and auth token") + return + } + return +} + +// Get an authToken and url +// +// The Url may be updated if it needed to authenticate using the OnReAuth function +func (c *Connection) getUrlAndAuthToken(targetUrlIn string, OnReAuth func() (string, error)) (targetUrlOut, authToken string, err error) { + c.authLock.Lock() + defer c.authLock.Unlock() + targetUrlOut = targetUrlIn + if !c.authenticated() { + err = c.authenticate() + if err != nil { + return + } + if OnReAuth != nil { + targetUrlOut, err = OnReAuth() + if err != nil { + return + } + } + } + authToken = c.AuthToken + return +} + +// flushKeepaliveConnections is called to flush pending requests after an error. +func flushKeepaliveConnections(transport http.RoundTripper) { + if tr, ok := transport.(interface { + CloseIdleConnections() + }); ok { + tr.CloseIdleConnections() + } +} + +// UnAuthenticate removes the authentication from the Connection. +func (c *Connection) UnAuthenticate() { + c.authLock.Lock() + c.StorageUrl = "" + c.AuthToken = "" + c.authLock.Unlock() +} + +// Authenticated returns a boolean to show if the current connection +// is authenticated. +// +// Doesn't actually check the credentials against the server. +func (c *Connection) Authenticated() bool { + if c.authLock == nil { + c.authLock = &sync.Mutex{} + } + c.authLock.Lock() + defer c.authLock.Unlock() + return c.authenticated() +} + +// Internal version of Authenticated() +// +// Call with authLock held +func (c *Connection) authenticated() bool { + if c.StorageUrl == "" || c.AuthToken == "" { + return false + } + if c.Expires.IsZero() { + return true + } + timeUntilExpiry := c.Expires.Sub(time.Now()) + return timeUntilExpiry >= 60*time.Second +} + +// SwiftInfo contains the JSON object returned by Swift when the /info +// route is queried. The object contains, among others, the Swift version, +// the enabled middlewares and their configuration +type SwiftInfo map[string]interface{} + +func (i SwiftInfo) SupportsBulkDelete() bool { + _, val := i["bulk_delete"] + return val +} + +func (i SwiftInfo) SupportsSLO() bool { + _, val := i["slo"] + return val +} + +func (i SwiftInfo) SLOMinSegmentSize() int64 { + if slo, ok := i["slo"].(map[string]interface{}); ok { + val, _ := slo["min_segment_size"].(float64) + return int64(val) + } + return 1 +} + +// Discover Swift configuration by doing a request against /info +func (c *Connection) QueryInfo() (infos SwiftInfo, err error) { + infoUrl, err := url.Parse(c.StorageUrl) + if err != nil { + return nil, err + } + infoUrl.Path = path.Join(infoUrl.Path, "..", "..", "info") + resp, err := c.client.Get(infoUrl.String()) + if err == nil { + if resp.StatusCode != http.StatusOK { + drainAndClose(resp.Body, nil) + return nil, fmt.Errorf("Invalid status code for info request: %d", resp.StatusCode) + } + err = readJson(resp, &infos) + if err == nil { + c.authLock.Lock() + c.swiftInfo = infos + c.authLock.Unlock() + } + return infos, err + } + return nil, err +} + +func (c *Connection) cachedQueryInfo() (infos SwiftInfo, err error) { + c.authLock.Lock() + infos = c.swiftInfo + c.authLock.Unlock() + if infos == nil { + infos, err = c.QueryInfo() + if err != nil { + return + } + } + return infos, nil +} + +// RequestOpts contains parameters for Connection.storage. +type RequestOpts struct { + Container string + ObjectName string + Operation string + Parameters url.Values + Headers Headers + ErrorMap errorMap + NoResponse bool + Body io.Reader + Retries int + // if set this is called on re-authentication to refresh the targetUrl + OnReAuth func() (string, error) +} + +// Call runs a remote command on the targetUrl, returns a +// response, headers and possible error. +// +// operation is GET, HEAD etc +// container is the name of a container +// Any other parameters (if not None) are added to the targetUrl +// +// Returns a response or an error. If response is returned then +// the resp.Body must be read completely and +// resp.Body.Close() must be called on it, unless noResponse is set in +// which case the body will be closed in this function +// +// If "Content-Length" is set in p.Headers it will be used - this can +// be used to override the default chunked transfer encoding for +// uploads. +// +// This will Authenticate if necessary, and re-authenticate if it +// receives a 401 error which means the token has expired +// +// This method is exported so extensions can call it. +func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response, headers Headers, err error) { + c.authLock.Lock() + c.setDefaults() + c.authLock.Unlock() + retries := p.Retries + if retries == 0 { + retries = c.Retries + } + var req *http.Request + for { + var authToken string + if targetUrl, authToken, err = c.getUrlAndAuthToken(targetUrl, p.OnReAuth); err != nil { + return //authentication failure + } + var URL *url.URL + URL, err = url.Parse(targetUrl) + if err != nil { + return + } + if p.Container != "" { + URL.Path += "/" + p.Container + if p.ObjectName != "" { + URL.Path += "/" + p.ObjectName + } + } + if p.Parameters != nil { + URL.RawQuery = p.Parameters.Encode() + } + timer := time.NewTimer(c.ConnectTimeout) + defer timer.Stop() + reader := p.Body + if reader != nil { + reader = newWatchdogReader(reader, c.Timeout, timer) + } + req, err = http.NewRequest(p.Operation, URL.String(), reader) + if err != nil { + return + } + if p.Headers != nil { + for k, v := range p.Headers { + // Set ContentLength in req if the user passed it in in the headers + if k == "Content-Length" { + req.ContentLength, err = strconv.ParseInt(v, 10, 64) + if err != nil { + err = fmt.Errorf("Invalid %q header %q: %v", k, v, err) + return + } + } else { + req.Header.Add(k, v) + } + } + } + req.Header.Add("User-Agent", c.UserAgent) + req.Header.Add("X-Auth-Token", authToken) + + _, hasCL := p.Headers["Content-Length"] + AddExpectAndTransferEncoding(req, hasCL) + + resp, err = c.doTimeoutRequest(timer, req) + if err != nil { + if (p.Operation == "HEAD" || p.Operation == "GET") && retries > 0 { + retries-- + continue + } + return + } + // Check to see if token has expired + if resp.StatusCode == 401 && retries > 0 { + drainAndClose(resp.Body, nil) + c.UnAuthenticate() + retries-- + } else { + break + } + } + + headers = readHeaders(resp) + if err = c.parseHeaders(resp, p.ErrorMap); err != nil { + return + } + if p.NoResponse { + drainAndClose(resp.Body, &err) + if err != nil { + return + } + } else { + // Cancel the request on timeout + cancel := func() { + cancelRequest(c.Transport, req) + } + // Wrap resp.Body to make it obey an idle timeout + resp.Body = newTimeoutReader(resp.Body, c.Timeout, cancel) + } + return +} + +// storage runs a remote command on a the storage url, returns a +// response, headers and possible error. +// +// operation is GET, HEAD etc +// container is the name of a container +// Any other parameters (if not None) are added to the storage url +// +// Returns a response or an error. If response is returned then +// resp.Body.Close() must be called on it, unless noResponse is set in +// which case the body will be closed in this function +// +// This will Authenticate if necessary, and re-authenticate if it +// receives a 401 error which means the token has expired +func (c *Connection) storage(p RequestOpts) (resp *http.Response, headers Headers, err error) { + p.OnReAuth = func() (string, error) { + return c.StorageUrl, nil + } + c.authLock.Lock() + url := c.StorageUrl + c.authLock.Unlock() + return c.Call(url, p) +} + +// readLines reads the response into an array of strings. +// +// Closes the response when done +func readLines(resp *http.Response) (lines []string, err error) { + defer drainAndClose(resp.Body, &err) + reader := bufio.NewReader(resp.Body) + buffer := bytes.NewBuffer(make([]byte, 0, 128)) + var part []byte + var prefix bool + for { + if part, prefix, err = reader.ReadLine(); err != nil { + break + } + buffer.Write(part) + if !prefix { + lines = append(lines, buffer.String()) + buffer.Reset() + } + } + if err == io.EOF { + err = nil + } + return +} + +// readJson reads the response into the json type passed in +// +// Closes the response when done +func readJson(resp *http.Response, result interface{}) (err error) { + defer drainAndClose(resp.Body, &err) + decoder := json.NewDecoder(resp.Body) + return decoder.Decode(result) +} + +/* ------------------------------------------------------------ */ + +// ContainersOpts is options for Containers() and ContainerNames() +type ContainersOpts struct { + Limit int // For an integer value n, limits the number of results to at most n values. + Prefix string // Given a string value x, return container names matching the specified prefix. + Marker string // Given a string value x, return container names greater in value than the specified marker. + EndMarker string // Given a string value x, return container names less in value than the specified marker. + Headers Headers // Any additional HTTP headers - can be nil +} + +// parse the ContainerOpts +func (opts *ContainersOpts) parse() (url.Values, Headers) { + v := url.Values{} + var h Headers + if opts != nil { + if opts.Limit > 0 { + v.Set("limit", strconv.Itoa(opts.Limit)) + } + if opts.Prefix != "" { + v.Set("prefix", opts.Prefix) + } + if opts.Marker != "" { + v.Set("marker", opts.Marker) + } + if opts.EndMarker != "" { + v.Set("end_marker", opts.EndMarker) + } + h = opts.Headers + } + return v, h +} + +// ContainerNames returns a slice of names of containers in this account. +func (c *Connection) ContainerNames(opts *ContainersOpts) ([]string, error) { + v, h := opts.parse() + resp, _, err := c.storage(RequestOpts{ + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + lines, err := readLines(resp) + return lines, err +} + +// Container contains information about a container +type Container struct { + Name string // Name of the container + Count int64 // Number of objects in the container + Bytes int64 // Total number of bytes used in the container +} + +// Containers returns a slice of structures with full information as +// described in Container. +func (c *Connection) Containers(opts *ContainersOpts) ([]Container, error) { + v, h := opts.parse() + v.Set("format", "json") + resp, _, err := c.storage(RequestOpts{ + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + var containers []Container + err = readJson(resp, &containers) + return containers, err +} + +// containersAllOpts makes a copy of opts if set or makes a new one and +// overrides Limit and Marker +func containersAllOpts(opts *ContainersOpts) *ContainersOpts { + var newOpts ContainersOpts + if opts != nil { + newOpts = *opts + } + if newOpts.Limit == 0 { + newOpts.Limit = allContainersLimit + } + newOpts.Marker = "" + return &newOpts +} + +// ContainersAll is like Containers but it returns all the Containers +// +// It calls Containers multiple times using the Marker parameter +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ContainersAll(opts *ContainersOpts) ([]Container, error) { + opts = containersAllOpts(opts) + containers := make([]Container, 0) + for { + newContainers, err := c.Containers(opts) + if err != nil { + return nil, err + } + containers = append(containers, newContainers...) + if len(newContainers) < opts.Limit { + break + } + opts.Marker = newContainers[len(newContainers)-1].Name + } + return containers, nil +} + +// ContainerNamesAll is like ContainerNames but it returns all the Containers +// +// It calls ContainerNames multiple times using the Marker parameter +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ContainerNamesAll(opts *ContainersOpts) ([]string, error) { + opts = containersAllOpts(opts) + containers := make([]string, 0) + for { + newContainers, err := c.ContainerNames(opts) + if err != nil { + return nil, err + } + containers = append(containers, newContainers...) + if len(newContainers) < opts.Limit { + break + } + opts.Marker = newContainers[len(newContainers)-1] + } + return containers, nil +} + +/* ------------------------------------------------------------ */ + +// ObjectOpts is options for Objects() and ObjectNames() +type ObjectsOpts struct { + Limit int // For an integer value n, limits the number of results to at most n values. + Marker string // Given a string value x, return object names greater in value than the specified marker. + EndMarker string // Given a string value x, return object names less in value than the specified marker + Prefix string // For a string value x, causes the results to be limited to object names beginning with the substring x. + Path string // For a string value x, return the object names nested in the pseudo path + Delimiter rune // For a character c, return all the object names nested in the container + Headers Headers // Any additional HTTP headers - can be nil + KeepMarker bool // Do not reset Marker when using ObjectsAll or ObjectNamesAll +} + +// parse reads values out of ObjectsOpts +func (opts *ObjectsOpts) parse() (url.Values, Headers) { + v := url.Values{} + var h Headers + if opts != nil { + if opts.Limit > 0 { + v.Set("limit", strconv.Itoa(opts.Limit)) + } + if opts.Marker != "" { + v.Set("marker", opts.Marker) + } + if opts.EndMarker != "" { + v.Set("end_marker", opts.EndMarker) + } + if opts.Prefix != "" { + v.Set("prefix", opts.Prefix) + } + if opts.Path != "" { + v.Set("path", opts.Path) + } + if opts.Delimiter != 0 { + v.Set("delimiter", string(opts.Delimiter)) + } + h = opts.Headers + } + return v, h +} + +// ObjectNames returns a slice of names of objects in a given container. +func (c *Connection) ObjectNames(container string, opts *ObjectsOpts) ([]string, error) { + v, h := opts.parse() + resp, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + return readLines(resp) +} + +// Object contains information about an object +type Object struct { + Name string `json:"name"` // object name + ContentType string `json:"content_type"` // eg application/directory + Bytes int64 `json:"bytes"` // size in bytes + ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server + LastModified time.Time // Last modified time converted to a time.Time + Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" + SLOHash string `json:"slo_etag"` // MD5 hash of all segments' MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e" + PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist + SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories" + ObjectType ObjectType // type of this object +} + +// Objects returns a slice of Object with information about each +// object in the container. +// +// If Delimiter is set in the opts then PseudoDirectory may be set, +// with ContentType 'application/directory'. These are not real +// objects but represent directories of objects which haven't had an +// object created for them. +func (c *Connection) Objects(container string, opts *ObjectsOpts) ([]Object, error) { + v, h := opts.parse() + v.Set("format", "json") + resp, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "GET", + Parameters: v, + ErrorMap: ContainerErrorMap, + Headers: h, + }) + if err != nil { + return nil, err + } + var objects []Object + err = readJson(resp, &objects) + // Convert Pseudo directories and dates + for i := range objects { + object := &objects[i] + if object.SubDir != "" { + object.Name = object.SubDir + object.PseudoDirectory = true + object.ContentType = "application/directory" + } + if object.ServerLastModified != "" { + // 2012-11-11T14:49:47.887250 + // + // Remove fractional seconds if present. This + // then keeps it consistent with Object + // which can only return timestamps accurate + // to 1 second + // + // The TimeFormat will parse fractional + // seconds if desired though + datetime := strings.SplitN(object.ServerLastModified, ".", 2)[0] + object.LastModified, err = time.Parse(TimeFormat, datetime) + if err != nil { + return nil, err + } + } + if object.SLOHash != "" { + object.ObjectType = StaticLargeObjectType + } + } + return objects, err +} + +// objectsAllOpts makes a copy of opts if set or makes a new one and +// overrides Limit and Marker +// Marker is not overriden if KeepMarker is set +func objectsAllOpts(opts *ObjectsOpts, Limit int) *ObjectsOpts { + var newOpts ObjectsOpts + if opts != nil { + newOpts = *opts + } + if newOpts.Limit == 0 { + newOpts.Limit = Limit + } + if !newOpts.KeepMarker { + newOpts.Marker = "" + } + return &newOpts +} + +// A closure defined by the caller to iterate through all objects +// +// Call Objects or ObjectNames from here with the *ObjectOpts passed in +// +// Do whatever is required with the results then return them +type ObjectsWalkFn func(*ObjectsOpts) (interface{}, error) + +// ObjectsWalk is uses to iterate through all the objects in chunks as +// returned by Objects or ObjectNames using the Marker and Limit +// parameters in the ObjectsOpts. +// +// Pass in a closure `walkFn` which calls Objects or ObjectNames with +// the *ObjectsOpts passed to it and does something with the results. +// +// Errors will be returned from this function +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ObjectsWalk(container string, opts *ObjectsOpts, walkFn ObjectsWalkFn) error { + opts = objectsAllOpts(opts, allObjectsChanLimit) + for { + objects, err := walkFn(opts) + if err != nil { + return err + } + var n int + var last string + switch objects := objects.(type) { + case []string: + n = len(objects) + if n > 0 { + last = objects[len(objects)-1] + } + case []Object: + n = len(objects) + if n > 0 { + last = objects[len(objects)-1].Name + } + default: + panic("Unknown type returned to ObjectsWalk") + } + if n < opts.Limit { + break + } + opts.Marker = last + } + return nil +} + +// ObjectsAll is like Objects but it returns an unlimited number of Objects in a slice +// +// It calls Objects multiple times using the Marker parameter +func (c *Connection) ObjectsAll(container string, opts *ObjectsOpts) ([]Object, error) { + objects := make([]Object, 0) + err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { + newObjects, err := c.Objects(container, opts) + if err == nil { + objects = append(objects, newObjects...) + } + return newObjects, err + }) + return objects, err +} + +// ObjectNamesAll is like ObjectNames but it returns all the Objects +// +// It calls ObjectNames multiple times using the Marker parameter. Marker is +// reset unless KeepMarker is set +// +// It has a default Limit parameter but you may pass in your own +func (c *Connection) ObjectNamesAll(container string, opts *ObjectsOpts) ([]string, error) { + objects := make([]string, 0) + err := c.ObjectsWalk(container, opts, func(opts *ObjectsOpts) (interface{}, error) { + newObjects, err := c.ObjectNames(container, opts) + if err == nil { + objects = append(objects, newObjects...) + } + return newObjects, err + }) + return objects, err +} + +// Account contains information about this account. +type Account struct { + BytesUsed int64 // total number of bytes used + Containers int64 // total number of containers + Objects int64 // total number of objects +} + +// getInt64FromHeader is a helper function to decode int64 from header. +func getInt64FromHeader(resp *http.Response, header string) (result int64, err error) { + value := resp.Header.Get(header) + result, err = strconv.ParseInt(value, 10, 64) + if err != nil { + err = newErrorf(0, "Bad Header '%s': '%s': %s", header, value, err) + } + return +} + +// Account returns info about the account in an Account struct. +func (c *Connection) Account() (info Account, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Operation: "HEAD", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into a dict + // + // {'Accept-Ranges': 'bytes', + // 'Content-Length': '0', + // 'Date': 'Tue, 05 Jul 2011 16:37:06 GMT', + // 'X-Account-Bytes-Used': '316598182', + // 'X-Account-Container-Count': '4', + // 'X-Account-Object-Count': '1433'} + if info.BytesUsed, err = getInt64FromHeader(resp, "X-Account-Bytes-Used"); err != nil { + return + } + if info.Containers, err = getInt64FromHeader(resp, "X-Account-Container-Count"); err != nil { + return + } + if info.Objects, err = getInt64FromHeader(resp, "X-Account-Object-Count"); err != nil { + return + } + return +} + +// AccountUpdate adds, replaces or remove account metadata. +// +// Add or update keys by mentioning them in the Headers. +// +// Remove keys by setting them to an empty string. +func (c *Connection) AccountUpdate(h Headers) error { + _, _, err := c.storage(RequestOpts{ + Operation: "POST", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ContainerCreate creates a container. +// +// If you don't want to add Headers just pass in nil +// +// No error is returned if it already exists but the metadata if any will be updated. +func (c *Connection) ContainerCreate(container string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "PUT", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ContainerDelete deletes a container. +// +// May return ContainerDoesNotExist or ContainerNotEmpty +func (c *Connection) ContainerDelete(container string) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "DELETE", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + return err +} + +// Container returns info about a single container including any +// metadata in the headers. +func (c *Connection) Container(container string) (info Container, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Container: container, + Operation: "HEAD", + ErrorMap: ContainerErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into the struct + info.Name = container + if info.Bytes, err = getInt64FromHeader(resp, "X-Container-Bytes-Used"); err != nil { + return + } + if info.Count, err = getInt64FromHeader(resp, "X-Container-Object-Count"); err != nil { + return + } + return +} + +// ContainerUpdate adds, replaces or removes container metadata. +// +// Add or update keys by mentioning them in the Metadata. +// +// Remove keys by setting them to an empty string. +// +// Container metadata can only be read with Container() not with Containers(). +func (c *Connection) ContainerUpdate(container string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + Operation: "POST", + ErrorMap: ContainerErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// ------------------------------------------------------------ + +// ObjectCreateFile represents a swift object open for writing +type ObjectCreateFile struct { + checkHash bool // whether we are checking the hash + pipeReader *io.PipeReader // pipe for the caller to use + pipeWriter *io.PipeWriter + hash hash.Hash // hash being build up as we go along + done chan struct{} // signals when the upload has finished + resp *http.Response // valid when done has signalled + err error // ditto + headers Headers // ditto +} + +// Write bytes to the object - see io.Writer +func (file *ObjectCreateFile) Write(p []byte) (n int, err error) { + n, err = file.pipeWriter.Write(p) + if err == io.ErrClosedPipe { + if file.err != nil { + return 0, file.err + } + return 0, newError(500, "Write on closed file") + } + if err == nil && file.checkHash { + _, _ = file.hash.Write(p) + } + return +} + +// CloseWithError closes the object, aborting the upload. +func (file *ObjectCreateFile) CloseWithError(err error) error { + _ = file.pipeWriter.CloseWithError(err) + <-file.done + return nil +} + +// Close the object and checks the md5sum if it was required. +// +// Also returns any other errors from the server (eg container not +// found) so it is very important to check the errors on this method. +func (file *ObjectCreateFile) Close() error { + // Close the body + err := file.pipeWriter.Close() + if err != nil { + return err + } + + // Wait for the HTTP operation to complete + <-file.done + + // Check errors + if file.err != nil { + return file.err + } + if file.checkHash { + receivedMd5 := strings.ToLower(file.headers["Etag"]) + calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + return ObjectCorrupted + } + } + return nil +} + +// Headers returns the response headers from the created object if the upload +// has been completed. The Close() method must be called on an ObjectCreateFile +// before this method. +func (file *ObjectCreateFile) Headers() (Headers, error) { + // error out if upload is not complete. + select { + case <-file.done: + default: + return nil, fmt.Errorf("Cannot get metadata, object upload failed or has not yet completed.") + } + return file.headers, nil +} + +// Check it satisfies the interface +var _ io.WriteCloser = &ObjectCreateFile{} + +// objectPutHeaders create a set of headers for a PUT +// +// It guesses the contentType from the objectName if it isn't set +// +// checkHash may be changed +func objectPutHeaders(objectName string, checkHash *bool, Hash string, contentType string, h Headers) Headers { + if contentType == "" { + contentType = mime.TypeByExtension(path.Ext(objectName)) + if contentType == "" { + contentType = "application/octet-stream" + } + } + // Meta stuff + extraHeaders := map[string]string{ + "Content-Type": contentType, + } + for key, value := range h { + extraHeaders[key] = value + } + if Hash != "" { + extraHeaders["Etag"] = Hash + *checkHash = false // the server will do it + } + return extraHeaders +} + +// ObjectCreate creates or updates the object in the container. It +// returns an io.WriteCloser you should write the contents to. You +// MUST call Close() on it and you MUST check the error return from +// Close(). +// +// If checkHash is True then it will calculate the MD5 Hash of the +// file as it is being uploaded and check it against that returned +// from the server. If it is wrong then it will return +// ObjectCorrupted on Close() +// +// If you know the MD5 hash of the object ahead of time then set the +// Hash parameter and it will be sent to the server (as an Etag +// header) and the server will check the MD5 itself after the upload, +// and this will return ObjectCorrupted on Close() if it is incorrect. +// +// If you don't want any error protection (not recommended) then set +// checkHash to false and Hash to "". +// +// If contentType is set it will be used, otherwise one will be +// guessed from objectName using mime.TypeByExtension +func (c *Connection) ObjectCreate(container string, objectName string, checkHash bool, Hash string, contentType string, h Headers) (file *ObjectCreateFile, err error) { + extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) + pipeReader, pipeWriter := io.Pipe() + file = &ObjectCreateFile{ + hash: md5.New(), + checkHash: checkHash, + pipeReader: pipeReader, + pipeWriter: pipeWriter, + done: make(chan struct{}), + } + // Run the PUT in the background piping it data + go func() { + opts := RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "PUT", + Headers: extraHeaders, + Body: pipeReader, + NoResponse: true, + ErrorMap: objectErrorMap, + } + file.resp, file.headers, file.err = c.storage(opts) + // Signal finished + pipeReader.Close() + close(file.done) + }() + return +} + +func (c *Connection) ObjectSymlinkCreate(container string, symlink string, targetAccount string, targetContainer string, targetObject string, targetEtag string) (headers Headers, err error) { + + EMPTY_MD5 := "d41d8cd98f00b204e9800998ecf8427e" + symHeaders := Headers{} + contents := bytes.NewBufferString("") + if targetAccount != "" { + symHeaders["X-Symlink-Target-Account"] = targetAccount + } + if targetEtag != "" { + symHeaders["X-Symlink-Target-Etag"] = targetEtag + } + symHeaders["X-Symlink-Target"] = fmt.Sprintf("%s/%s", targetContainer, targetObject) + _, err = c.ObjectPut(container, symlink, contents, true, EMPTY_MD5, "application/symlink", symHeaders) + return +} + +func (c *Connection) objectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers, parameters url.Values) (headers Headers, err error) { + extraHeaders := objectPutHeaders(objectName, &checkHash, Hash, contentType, h) + hash := md5.New() + var body io.Reader = contents + if checkHash { + body = io.TeeReader(contents, hash) + } + _, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "PUT", + Headers: extraHeaders, + Body: body, + NoResponse: true, + ErrorMap: objectErrorMap, + Parameters: parameters, + }) + if err != nil { + return + } + if checkHash { + receivedMd5 := strings.ToLower(headers["Etag"]) + calculatedMd5 := fmt.Sprintf("%x", hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + err = ObjectCorrupted + return + } + } + return +} + +// ObjectPut creates or updates the path in the container from +// contents. contents should be an open io.Reader which will have all +// its contents read. +// +// This is a low level interface. +// +// If checkHash is True then it will calculate the MD5 Hash of the +// file as it is being uploaded and check it against that returned +// from the server. If it is wrong then it will return +// ObjectCorrupted. +// +// If you know the MD5 hash of the object ahead of time then set the +// Hash parameter and it will be sent to the server (as an Etag +// header) and the server will check the MD5 itself after the upload, +// and this will return ObjectCorrupted if it is incorrect. +// +// If you don't want any error protection (not recommended) then set +// checkHash to false and Hash to "". +// +// If contentType is set it will be used, otherwise one will be +// guessed from objectName using mime.TypeByExtension +func (c *Connection) ObjectPut(container string, objectName string, contents io.Reader, checkHash bool, Hash string, contentType string, h Headers) (headers Headers, err error) { + return c.objectPut(container, objectName, contents, checkHash, Hash, contentType, h, nil) +} + +// ObjectPutBytes creates an object from a []byte in a container. +// +// This is a simplified interface which checks the MD5. +func (c *Connection) ObjectPutBytes(container string, objectName string, contents []byte, contentType string) (err error) { + buf := bytes.NewBuffer(contents) + h := Headers{"Content-Length": strconv.Itoa(len(contents))} + _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h) + return +} + +// ObjectPutString creates an object from a string in a container. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectPutString(container string, objectName string, contents string, contentType string) (err error) { + buf := strings.NewReader(contents) + h := Headers{"Content-Length": strconv.Itoa(len(contents))} + _, err = c.ObjectPut(container, objectName, buf, true, "", contentType, h) + return +} + +// ObjectOpenFile represents a swift object open for reading +type ObjectOpenFile struct { + connection *Connection // stored copy of Connection used in Open + container string // stored copy of container used in Open + objectName string // stored copy of objectName used in Open + headers Headers // stored copy of headers used in Open + resp *http.Response // http connection + body io.Reader // read data from this + checkHash bool // true if checking MD5 + hash hash.Hash // currently accumulating MD5 + bytes int64 // number of bytes read on this connection + eof bool // whether we have read end of file + pos int64 // current position when reading + lengthOk bool // whether length is valid + length int64 // length of the object if read + seeked bool // whether we have seeked this file or not + overSeeked bool // set if we have seeked to the end or beyond +} + +// Read bytes from the object - see io.Reader +func (file *ObjectOpenFile) Read(p []byte) (n int, err error) { + if file.overSeeked { + return 0, io.EOF + } + n, err = file.body.Read(p) + file.bytes += int64(n) + file.pos += int64(n) + if err == io.EOF { + file.eof = true + } + return +} + +// Seek sets the offset for the next Read to offset, interpreted +// according to whence: 0 means relative to the origin of the file, 1 +// means relative to the current offset, and 2 means relative to the +// end. Seek returns the new offset and an Error, if any. +// +// Seek uses HTTP Range headers which, if the file pointer is moved, +// will involve reopening the HTTP connection. +// +// Note that you can't seek to the end of a file or beyond; HTTP Range +// requests don't support the file pointer being outside the data, +// unlike os.File +// +// Seek(0, 1) will return the current file pointer. +func (file *ObjectOpenFile) Seek(offset int64, whence int) (newPos int64, err error) { + file.overSeeked = false + switch whence { + case 0: // relative to start + newPos = offset + case 1: // relative to current + newPos = file.pos + offset + case 2: // relative to end + if !file.lengthOk { + return file.pos, newError(0, "Length of file unknown so can't seek from end") + } + newPos = file.length + offset + if offset >= 0 { + file.overSeeked = true + return + } + default: + panic("Unknown whence in ObjectOpenFile.Seek") + } + // If at correct position (quite likely), do nothing + if newPos == file.pos { + return + } + // Close the file... + file.seeked = true + err = file.Close() + if err != nil { + return + } + // ...and re-open with a Range header + if file.headers == nil { + file.headers = Headers{} + } + if newPos > 0 { + file.headers["Range"] = fmt.Sprintf("bytes=%d-", newPos) + } else { + delete(file.headers, "Range") + } + newFile, _, err := file.connection.ObjectOpen(file.container, file.objectName, false, file.headers) + if err != nil { + return + } + // Update the file + file.resp = newFile.resp + file.body = newFile.body + file.checkHash = false + file.pos = newPos + return +} + +// Length gets the objects content length either from a cached copy or +// from the server. +func (file *ObjectOpenFile) Length() (int64, error) { + if !file.lengthOk { + info, _, err := file.connection.Object(file.container, file.objectName) + file.length = info.Bytes + file.lengthOk = (err == nil) + return file.length, err + } + return file.length, nil +} + +// Close the object and checks the length and md5sum if it was +// required and all the object was read +func (file *ObjectOpenFile) Close() (err error) { + // Close the body at the end + defer checkClose(file.resp.Body, &err) + + // If not end of file or seeked then can't check anything + if !file.eof || file.seeked { + return + } + + // Check the MD5 sum if requested + if file.checkHash { + receivedMd5 := strings.ToLower(file.resp.Header.Get("Etag")) + calculatedMd5 := fmt.Sprintf("%x", file.hash.Sum(nil)) + if receivedMd5 != calculatedMd5 { + err = ObjectCorrupted + return + } + } + + // Check to see we read the correct number of bytes + if file.lengthOk && file.length != file.bytes { + err = ObjectCorrupted + return + } + return +} + +// Check it satisfies the interfaces +var _ io.ReadCloser = &ObjectOpenFile{} +var _ io.Seeker = &ObjectOpenFile{} + +func (c *Connection) objectOpenBase(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) { + var resp *http.Response + opts := RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "GET", + ErrorMap: objectErrorMap, + Headers: h, + Parameters: parameters, + } + resp, headers, err = c.storage(opts) + if err != nil { + return + } + // Can't check MD5 on an object with X-Object-Manifest or X-Static-Large-Object set + if checkHash && headers.IsLargeObject() { + // log.Printf("swift: turning off md5 checking on object with manifest %v", objectName) + checkHash = false + } + file = &ObjectOpenFile{ + connection: c, + container: container, + objectName: objectName, + headers: h, + resp: resp, + checkHash: checkHash, + body: resp.Body, + } + if checkHash { + file.hash = md5.New() + file.body = io.TeeReader(resp.Body, file.hash) + } + // Read Content-Length + if resp.Header.Get("Content-Length") != "" { + file.length, err = getInt64FromHeader(resp, "Content-Length") + file.lengthOk = (err == nil) + } + return +} + +func (c *Connection) objectOpen(container string, objectName string, checkHash bool, h Headers, parameters url.Values) (file *ObjectOpenFile, headers Headers, err error) { + err = withLORetry(0, func() (Headers, int64, error) { + file, headers, err = c.objectOpenBase(container, objectName, checkHash, h, parameters) + if err != nil { + return headers, 0, err + } + return headers, file.length, nil + }) + return +} + +// ObjectOpen returns an ObjectOpenFile for reading the contents of +// the object. This satisfies the io.ReadCloser and the io.Seeker +// interfaces. +// +// You must call Close() on contents when finished +// +// Returns the headers of the response. +// +// If checkHash is true then it will calculate the md5sum of the file +// as it is being received and check it against that returned from the +// server. If it is wrong then it will return ObjectCorrupted. It +// will also check the length returned. No checking will be done if +// you don't read all the contents. +// +// Note that objects with X-Object-Manifest or X-Static-Large-Object +// set won't ever have their md5sum's checked as the md5sum reported +// on the object is actually the md5sum of the md5sums of the +// parts. This isn't very helpful to detect a corrupted download as +// the size of the parts aren't known without doing more operations. +// If you want to ensure integrity of an object with a manifest then +// you will need to download everything in the manifest separately. +// +// headers["Content-Type"] will give the content type if desired. +func (c *Connection) ObjectOpen(container string, objectName string, checkHash bool, h Headers) (file *ObjectOpenFile, headers Headers, err error) { + return c.objectOpen(container, objectName, checkHash, h, nil) +} + +// ObjectGet gets the object into the io.Writer contents. +// +// Returns the headers of the response. +// +// If checkHash is true then it will calculate the md5sum of the file +// as it is being received and check it against that returned from the +// server. If it is wrong then it will return ObjectCorrupted. +// +// headers["Content-Type"] will give the content type if desired. +func (c *Connection) ObjectGet(container string, objectName string, contents io.Writer, checkHash bool, h Headers) (headers Headers, err error) { + file, headers, err := c.ObjectOpen(container, objectName, checkHash, h) + if err != nil { + return + } + defer checkClose(file, &err) + _, err = io.Copy(contents, file) + return +} + +// ObjectGetBytes returns an object as a []byte. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectGetBytes(container string, objectName string) (contents []byte, err error) { + var buf bytes.Buffer + _, err = c.ObjectGet(container, objectName, &buf, true, nil) + contents = buf.Bytes() + return +} + +// ObjectGetString returns an object as a string. +// +// This is a simplified interface which checks the MD5 +func (c *Connection) ObjectGetString(container string, objectName string) (contents string, err error) { + var buf bytes.Buffer + _, err = c.ObjectGet(container, objectName, &buf, true, nil) + contents = buf.String() + return +} + +// ObjectDelete deletes the object. +// +// May return ObjectNotFound if the object isn't found +func (c *Connection) ObjectDelete(container string, objectName string) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "DELETE", + ErrorMap: objectErrorMap, + }) + return err +} + +// ObjectTempUrl returns a temporary URL for an object +func (c *Connection) ObjectTempUrl(container string, objectName string, secretKey string, method string, expires time.Time) string { + mac := hmac.New(sha1.New, []byte(secretKey)) + prefix, _ := url.Parse(c.StorageUrl) + body := fmt.Sprintf("%s\n%d\n%s/%s/%s", method, expires.Unix(), prefix.Path, container, objectName) + mac.Write([]byte(body)) + sig := hex.EncodeToString(mac.Sum(nil)) + return fmt.Sprintf("%s/%s/%s?temp_url_sig=%s&temp_url_expires=%d", c.StorageUrl, container, objectName, sig, expires.Unix()) +} + +// parseResponseStatus parses string like "200 OK" and returns Error. +// +// For status codes beween 200 and 299, this returns nil. +func parseResponseStatus(resp string, errorMap errorMap) error { + code := 0 + reason := resp + t := strings.SplitN(resp, " ", 2) + if len(t) == 2 { + ncode, err := strconv.Atoi(t[0]) + if err == nil { + code = ncode + reason = t[1] + } + } + if errorMap != nil { + if err, ok := errorMap[code]; ok { + return err + } + } + if 200 <= code && code <= 299 { + return nil + } + return newError(code, reason) +} + +// BulkDeleteResult stores results of BulkDelete(). +// +// Individual errors may (or may not) be returned by Errors. +// Errors is a map whose keys are a full path of where the object was +// to be deleted, and whose values are Error objects. A full path of +// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". +type BulkDeleteResult struct { + NumberNotFound int64 // # of objects not found. + NumberDeleted int64 // # of deleted objects. + Errors map[string]error // Mapping between object name and an error. + Headers Headers // Response HTTP headers. +} + +func (c *Connection) doBulkDelete(objects []string, h Headers) (result BulkDeleteResult, err error) { + var buffer bytes.Buffer + for _, s := range objects { + u := url.URL{Path: s} + buffer.WriteString(u.String() + "\n") + } + extraHeaders := Headers{ + "Accept": "application/json", + "Content-Type": "text/plain", + "Content-Length": strconv.Itoa(buffer.Len()), + } + for key, value := range h { + extraHeaders[key] = value + } + resp, headers, err := c.storage(RequestOpts{ + Operation: "DELETE", + Parameters: url.Values{"bulk-delete": []string{"1"}}, + Headers: extraHeaders, + ErrorMap: ContainerErrorMap, + Body: &buffer, + }) + if err != nil { + return + } + var jsonResult struct { + NotFound int64 `json:"Number Not Found"` + Status string `json:"Response Status"` + Errors [][]string + Deleted int64 `json:"Number Deleted"` + } + err = readJson(resp, &jsonResult) + if err != nil { + return + } + + err = parseResponseStatus(jsonResult.Status, objectErrorMap) + result.NumberNotFound = jsonResult.NotFound + result.NumberDeleted = jsonResult.Deleted + result.Headers = headers + el := make(map[string]error, len(jsonResult.Errors)) + for _, t := range jsonResult.Errors { + if len(t) != 2 { + continue + } + el[t[0]] = parseResponseStatus(t[1], objectErrorMap) + } + result.Errors = el + return +} + +// BulkDelete deletes multiple objectNames from container in one operation. +// +// Some servers may not accept bulk-delete requests since bulk-delete is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html +func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) { + return c.BulkDeleteHeaders(container, objectNames, nil) +} + +// BulkDeleteHeaders deletes multiple objectNames from container in one operation. +// +// Some servers may not accept bulk-delete requests since bulk-delete is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html +func (c *Connection) BulkDeleteHeaders(container string, objectNames []string, h Headers) (result BulkDeleteResult, err error) { + if len(objectNames) == 0 { + result.Errors = make(map[string]error) + return + } + fullPaths := make([]string, len(objectNames)) + for i, name := range objectNames { + fullPaths[i] = fmt.Sprintf("/%s/%s", container, name) + } + return c.doBulkDelete(fullPaths, h) +} + +// BulkUploadResult stores results of BulkUpload(). +// +// Individual errors may (or may not) be returned by Errors. +// Errors is a map whose keys are a full path of where an object was +// to be created, and whose values are Error objects. A full path of +// object looks like "/API_VERSION/USER_ACCOUNT/CONTAINER/OBJECT_PATH". +type BulkUploadResult struct { + NumberCreated int64 // # of created objects. + Errors map[string]error // Mapping between object name and an error. + Headers Headers // Response HTTP headers. +} + +// BulkUpload uploads multiple files in one operation. +// +// uploadPath can be empty, a container name, or a pseudo-directory +// within a container. If uploadPath is empty, new containers may be +// automatically created. +// +// Files are read from dataStream. The format of the stream is specified +// by the format parameter. Available formats are: +// * UploadTar - Plain tar stream. +// * UploadTarGzip - Gzip compressed tar stream. +// * UploadTarBzip2 - Bzip2 compressed tar stream. +// +// Some servers may not accept bulk-upload requests since bulk-upload is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-extract-archive.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Extract_Archive-d1e2338.html +func (c *Connection) BulkUpload(uploadPath string, dataStream io.Reader, format string, h Headers) (result BulkUploadResult, err error) { + extraHeaders := Headers{"Accept": "application/json"} + for key, value := range h { + extraHeaders[key] = value + } + // The following code abuses Container parameter intentionally. + // The best fix might be to rename Container to UploadPath. + resp, headers, err := c.storage(RequestOpts{ + Container: uploadPath, + Operation: "PUT", + Parameters: url.Values{"extract-archive": []string{format}}, + Headers: extraHeaders, + ErrorMap: ContainerErrorMap, + Body: dataStream, + }) + if err != nil { + return + } + // Detect old servers which don't support this feature + if headers["Content-Type"] != "application/json" { + err = Forbidden + return + } + var jsonResult struct { + Created int64 `json:"Number Files Created"` + Status string `json:"Response Status"` + Errors [][]string + } + err = readJson(resp, &jsonResult) + if err != nil { + return + } + + err = parseResponseStatus(jsonResult.Status, objectErrorMap) + result.NumberCreated = jsonResult.Created + result.Headers = headers + el := make(map[string]error, len(jsonResult.Errors)) + for _, t := range jsonResult.Errors { + if len(t) != 2 { + continue + } + el[t[0]] = parseResponseStatus(t[1], objectErrorMap) + } + result.Errors = el + return +} + +// Object returns info about a single object including any metadata in the header. +// +// May return ObjectNotFound. +// +// Use headers.ObjectMetadata() to read the metadata in the Headers. +func (c *Connection) Object(container string, objectName string) (info Object, headers Headers, err error) { + err = withLORetry(0, func() (Headers, int64, error) { + info, headers, err = c.objectBase(container, objectName) + if err != nil { + return headers, 0, err + } + return headers, info.Bytes, nil + }) + return +} + +func (c *Connection) objectBase(container string, objectName string) (info Object, headers Headers, err error) { + var resp *http.Response + resp, headers, err = c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "HEAD", + ErrorMap: objectErrorMap, + NoResponse: true, + }) + if err != nil { + return + } + // Parse the headers into the struct + // HTTP/1.1 200 OK + // Date: Thu, 07 Jun 2010 20:59:39 GMT + // Server: Apache + // Last-Modified: Fri, 12 Jun 2010 13:40:18 GMT + // ETag: 8a964ee2a5e88be344f36c22562a6486 + // Content-Length: 512000 + // Content-Type: text/plain; charset=UTF-8 + // X-Object-Meta-Meat: Bacon + // X-Object-Meta-Fruit: Bacon + // X-Object-Meta-Veggie: Bacon + // X-Object-Meta-Dairy: Bacon + info.Name = objectName + info.ContentType = resp.Header.Get("Content-Type") + if resp.Header.Get("Content-Length") != "" { + if info.Bytes, err = getInt64FromHeader(resp, "Content-Length"); err != nil { + return + } + } + // Currently ceph doesn't return a Last-Modified header for DLO manifests without any segments + // See ceph http://tracker.ceph.com/issues/15812 + if resp.Header.Get("Last-Modified") != "" { + info.ServerLastModified = resp.Header.Get("Last-Modified") + if info.LastModified, err = time.Parse(http.TimeFormat, info.ServerLastModified); err != nil { + return + } + } + + info.Hash = resp.Header.Get("Etag") + if resp.Header.Get("X-Object-Manifest") != "" { + info.ObjectType = DynamicLargeObjectType + } else if resp.Header.Get("X-Static-Large-Object") != "" { + info.ObjectType = StaticLargeObjectType + } + + return +} + +// ObjectUpdate adds, replaces or removes object metadata. +// +// Add or Update keys by mentioning them in the Metadata. Use +// Metadata.ObjectHeaders and Headers.ObjectMetadata to convert your +// Metadata to and from normal HTTP headers. +// +// This removes all metadata previously added to the object and +// replaces it with that passed in so to delete keys, just don't +// mention them the headers you pass in. +// +// Object metadata can only be read with Object() not with Objects(). +// +// This can also be used to set headers not already assigned such as +// X-Delete-At or X-Delete-After for expiring objects. +// +// You cannot use this to change any of the object's other headers +// such as Content-Type, ETag, etc. +// +// Refer to copying an object when you need to update metadata or +// other headers such as Content-Type or CORS headers. +// +// May return ObjectNotFound. +func (c *Connection) ObjectUpdate(container string, objectName string, h Headers) error { + _, _, err := c.storage(RequestOpts{ + Container: container, + ObjectName: objectName, + Operation: "POST", + ErrorMap: objectErrorMap, + NoResponse: true, + Headers: h, + }) + return err +} + +// urlPathEscape escapes URL path the in string using URL escaping rules +// +// This mimics url.PathEscape which only available from go 1.8 +func urlPathEscape(in string) string { + var u url.URL + u.Path = in + return u.String() +} + +// ObjectCopy does a server side copy of an object to a new position +// +// All metadata is preserved. If metadata is set in the headers then +// it overrides the old metadata on the copied object. +// +// The destination container must exist before the copy. +// +// You can use this to copy an object to itself - this is the only way +// to update the content type of an object. +func (c *Connection) ObjectCopy(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string, h Headers) (headers Headers, err error) { + // Meta stuff + extraHeaders := map[string]string{ + "Destination": urlPathEscape(dstContainer + "/" + dstObjectName), + } + for key, value := range h { + extraHeaders[key] = value + } + _, headers, err = c.storage(RequestOpts{ + Container: srcContainer, + ObjectName: srcObjectName, + Operation: "COPY", + ErrorMap: objectErrorMap, + NoResponse: true, + Headers: extraHeaders, + }) + return +} + +// ObjectMove does a server side move of an object to a new position +// +// This is a convenience method which calls ObjectCopy then ObjectDelete +// +// All metadata is preserved. +// +// The destination container must exist before the copy. +func (c *Connection) ObjectMove(srcContainer string, srcObjectName string, dstContainer string, dstObjectName string) (err error) { + _, err = c.ObjectCopy(srcContainer, srcObjectName, dstContainer, dstObjectName, nil) + if err != nil { + return + } + return c.ObjectDelete(srcContainer, srcObjectName) +} + +// ObjectUpdateContentType updates the content type of an object +// +// This is a convenience method which calls ObjectCopy +// +// All other metadata is preserved. +func (c *Connection) ObjectUpdateContentType(container string, objectName string, contentType string) (err error) { + h := Headers{"Content-Type": contentType} + _, err = c.ObjectCopy(container, objectName, container, objectName, h) + return +} + +// ------------------------------------------------------------ + +// VersionContainerCreate is a helper method for creating and enabling version controlled containers. +// +// It builds the current object container, the non-current object version container, and enables versioning. +// +// If the server doesn't support versioning then it will return +// Forbidden however it will have created both the containers at that point. +func (c *Connection) VersionContainerCreate(current, version string) error { + if err := c.ContainerCreate(version, nil); err != nil { + return err + } + if err := c.ContainerCreate(current, nil); err != nil { + return err + } + if err := c.VersionEnable(current, version); err != nil { + return err + } + return nil +} + +// VersionEnable enables versioning on the current container with version as the tracking container. +// +// May return Forbidden if this isn't supported by the server +func (c *Connection) VersionEnable(current, version string) error { + h := Headers{"X-Versions-Location": version} + if err := c.ContainerUpdate(current, h); err != nil { + return err + } + // Check to see if the header was set properly + _, headers, err := c.Container(current) + if err != nil { + return err + } + // If failed to set versions header, return Forbidden as the server doesn't support this + if headers["X-Versions-Location"] != version { + return Forbidden + } + return nil +} + +// VersionDisable disables versioning on the current container. +func (c *Connection) VersionDisable(current string) error { + h := Headers{"X-Versions-Location": ""} + if err := c.ContainerUpdate(current, h); err != nil { + return err + } + return nil +} + +// VersionObjectList returns a list of older versions of the object. +// +// Objects are returned in the format / +func (c *Connection) VersionObjectList(version, object string) ([]string, error) { + opts := &ObjectsOpts{ + // <3-character zero-padded hexadecimal character length>/ + Prefix: fmt.Sprintf("%03x", len(object)) + object + "/", + } + return c.ObjectNames(version, opts) +} diff --git a/vendor/github.com/ncw/swift/timeout_reader.go b/vendor/github.com/ncw/swift/timeout_reader.go new file mode 100644 index 00000000000..88ae733281e --- /dev/null +++ b/vendor/github.com/ncw/swift/timeout_reader.go @@ -0,0 +1,59 @@ +package swift + +import ( + "io" + "time" +) + +// An io.ReadCloser which obeys an idle timeout +type timeoutReader struct { + reader io.ReadCloser + timeout time.Duration + cancel func() +} + +// Returns a wrapper around the reader which obeys an idle +// timeout. The cancel function is called if the timeout happens +func newTimeoutReader(reader io.ReadCloser, timeout time.Duration, cancel func()) *timeoutReader { + return &timeoutReader{ + reader: reader, + timeout: timeout, + cancel: cancel, + } +} + +// Read reads up to len(p) bytes into p +// +// Waits at most for timeout for the read to complete otherwise returns a timeout +func (t *timeoutReader) Read(p []byte) (int, error) { + // FIXME limit the amount of data read in one chunk so as to not exceed the timeout? + // Do the read in the background + type result struct { + n int + err error + } + done := make(chan result, 1) + go func() { + n, err := t.reader.Read(p) + done <- result{n, err} + }() + // Wait for the read or the timeout + timer := time.NewTimer(t.timeout) + defer timer.Stop() + select { + case r := <-done: + return r.n, r.err + case <-timer.C: + t.cancel() + return 0, TimeoutError + } + panic("unreachable") // for Go 1.0 +} + +// Close the channel +func (t *timeoutReader) Close() error { + return t.reader.Close() +} + +// Check it satisfies the interface +var _ io.ReadCloser = &timeoutReader{} diff --git a/vendor/github.com/ncw/swift/travis_realserver.sh b/vendor/github.com/ncw/swift/travis_realserver.sh new file mode 100644 index 00000000000..970e94c0d1f --- /dev/null +++ b/vendor/github.com/ncw/swift/travis_realserver.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +if [ "${TRAVIS_PULL_REQUEST}" = "true" ]; then + exit 0 +fi + +if [ "${TEST_REAL_SERVER}" = "rackspace" ] && [ ! -z "${RACKSPACE_APIKEY}" ]; then + echo "Running tests pointing to Rackspace" + export SWIFT_API_KEY=$RACKSPACE_APIKEY + export SWIFT_API_USER=$RACKSPACE_USER + export SWIFT_AUTH_URL=$RACKSPACE_AUTH + go test ./... +fi + +if [ "${TEST_REAL_SERVER}" = "memset" ] && [ ! -z "${MEMSET_APIKEY}" ]; then + echo "Running tests pointing to Memset" + export SWIFT_API_KEY=$MEMSET_APIKEY + export SWIFT_API_USER=$MEMSET_USER + export SWIFT_AUTH_URL=$MEMSET_AUTH + go test +fi diff --git a/vendor/github.com/ncw/swift/watchdog_reader.go b/vendor/github.com/ncw/swift/watchdog_reader.go new file mode 100644 index 00000000000..2714c9e1a47 --- /dev/null +++ b/vendor/github.com/ncw/swift/watchdog_reader.go @@ -0,0 +1,55 @@ +package swift + +import ( + "io" + "time" +) + +var watchdogChunkSize = 1 << 20 // 1 MiB + +// An io.Reader which resets a watchdog timer whenever data is read +type watchdogReader struct { + timeout time.Duration + reader io.Reader + timer *time.Timer + chunkSize int +} + +// Returns a new reader which will kick the watchdog timer whenever data is read +func newWatchdogReader(reader io.Reader, timeout time.Duration, timer *time.Timer) *watchdogReader { + return &watchdogReader{ + timeout: timeout, + reader: reader, + timer: timer, + chunkSize: watchdogChunkSize, + } +} + +// Read reads up to len(p) bytes into p +func (t *watchdogReader) Read(p []byte) (int, error) { + //read from underlying reader in chunks not larger than t.chunkSize + //while resetting the watchdog timer before every read; the small chunk + //size ensures that the timer does not fire when reading a large amount of + //data from a slow connection + start := 0 + end := len(p) + for start < end { + length := end - start + if length > t.chunkSize { + length = t.chunkSize + } + + resetTimer(t.timer, t.timeout) + n, err := t.reader.Read(p[start : start+length]) + start += n + if n == 0 || err != nil { + return start, err + } + } + + resetTimer(t.timer, t.timeout) + return start, nil +} + +// Check it satisfies the interface +var _ io.Reader = &watchdogReader{} diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index b79ee9c5aa3..44222220a38 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,19 @@ +## 2.17.1 + +### Fixes +- If the user sets --seed=0, make sure all parallel nodes get the same seed [af0330d] + +## 2.17.0 + +### Features + +- add `--github-output` for nicer output in github actions [e8a2056] + +### Maintenance + +- fix typo in core_dsl.go [977bc6f] +- Fix typo in docs [e297e7b] + ## 2.16.0 ### Features diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 0e633f3098f..a3e8237e938 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -792,8 +792,8 @@ DeferCleanup can be passed: For example: BeforeEach(func() { - DeferCleanup(os.SetEnv, "FOO", os.GetEnv("FOO")) - os.SetEnv("FOO", "BAR") + DeferCleanup(os.Setenv, "FOO", os.GetEnv("FOO")) + os.Setenv("FOO", "BAR") }) will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 56b7be75879..4026859ec39 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -182,6 +182,22 @@ func (r *DefaultReporter) WillRun(report types.SpecReport) { r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) } +func (r *DefaultReporter) wrapTextBlock(sectionName string, fn func()) { + r.emitBlock("\n") + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::group::%s", sectionName)) + } else { + r.emitBlock(r.fi(1, "{{gray}}%s >>{{/}}", sectionName)) + } + fn() + if r.conf.GithubOutput { + r.emitBlock(r.fi(1, "::endgroup::")) + } else { + r.emitBlock(r.fi(1, "{{gray}}<< %s{{/}}", sectionName)) + } + +} + func (r *DefaultReporter) DidRun(report types.SpecReport) { v := r.conf.Verbosity() inParallel := report.RunningInParallel @@ -283,26 +299,23 @@ func (r *DefaultReporter) DidRun(report types.SpecReport) { //Emit Stdout/Stderr Output if showSeparateStdSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) + r.wrapTextBlock("Captured StdOut/StdErr Output", func() { + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + }) } if showSeparateVisibilityAlwaysReportsSection { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) - for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { - r.emitReportEntry(1, entry) - } - r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) + r.wrapTextBlock("Report Entries", func() { + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + }) } if showTimeline { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) - r.emitTimeline(1, report, timeline) - r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) + r.wrapTextBlock("Timeline", func() { + r.emitTimeline(1, report, timeline) + }) } // Emit Failure Message @@ -405,7 +418,11 @@ func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, f func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + if r.conf.GithubOutput { + r.emitBlock(r.fi(indent, "::error file=%s,line=%d::%s %s", failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } else { + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + } if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index c88fc85a75f..cef273ee1ff 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -89,6 +89,7 @@ type ReporterConfig struct { VeryVerbose bool FullTrace bool ShowNodeEvents bool + GithubOutput bool JSONReport string JUnitReport string @@ -264,7 +265,7 @@ var FlagSections = GinkgoFlagSections{ // SuiteConfigFlags provides flags for the Ginkgo test process, and CLI var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.RandomSeed", Name: "seed", SectionKey: "order", UsageDefaultValue: "randomly generated by Ginkgo", - Usage: "The seed used to randomize the spec suite."}, + Usage: "The seed used to randomize the spec suite.", AlwaysExport: true}, {KeyPath: "S.RandomizeAllSpecs", Name: "randomize-all", SectionKey: "order", DeprecatedName: "randomizeAllSpecs", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will randomize all specs together. By default, ginkgo only randomizes the top level Describe, Context and When containers."}, @@ -331,6 +332,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, + {KeyPath: "R.GithubOutput", Name: "github-output", SectionKey: "output", + Usage: "If set, default reporter prints easier to manage output in Github Actions."}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/flags.go b/vendor/github.com/onsi/ginkgo/v2/types/flags.go index 9186ae873d0..de69f3022de 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/flags.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/flags.go @@ -24,7 +24,8 @@ type GinkgoFlag struct { DeprecatedDocLink string DeprecatedVersion string - ExportAs string + ExportAs string + AlwaysExport bool } type GinkgoFlags []GinkgoFlag @@ -431,7 +432,7 @@ func (ssv stringSliceVar) Set(s string) error { return nil } -//given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. +// given a set of GinkgoFlags and bindings, generate flag arguments suitable to be passed to an application with that set of flags configured. func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) { result := []string{} for _, flag := range flags { @@ -451,19 +452,19 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) iface := value.Interface() switch value.Type() { case reflect.TypeOf(string("")): - if iface.(string) != "" { + if iface.(string) != "" || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } case reflect.TypeOf(int64(0)): - if iface.(int64) != 0 { + if iface.(int64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(float64(0)): - if iface.(float64) != 0 { + if iface.(float64) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%f", name, iface)) } case reflect.TypeOf(int(0)): - if iface.(int) != 0 { + if iface.(int) != 0 || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%d", name, iface)) } case reflect.TypeOf(bool(true)): @@ -471,7 +472,7 @@ func GenerateFlagArgs(flags GinkgoFlags, bindings interface{}) ([]string, error) result = append(result, fmt.Sprintf("--%s", name)) } case reflect.TypeOf(time.Duration(0)): - if iface.(time.Duration) != time.Duration(0) { + if iface.(time.Duration) != time.Duration(0) || flag.AlwaysExport { result = append(result, fmt.Sprintf("--%s=%s", name, iface)) } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 675f8db2fe8..851d42b456b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.16.0" +const VERSION = "2.17.1" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 9a14b81517e..01ec5245cdc 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,16 @@ +## 1.32.0 + +### Maintenance +- Migrate github.com/golang/protobuf to google.golang.org/protobuf [436a197] + + This release drops the deprecated github.com/golang/protobuf and adopts google.golang.org/protobuf. Care was taken to ensure the release is backwards compatible (thanks @jbduncan !). Please open an issue if you run into one. + +- chore: test with Go 1.22 (#733) [32ef35e] +- Bump golang.org/x/net from 0.19.0 to 0.20.0 (#717) [a0d0387] +- Bump github-pages and jekyll-feed in /docs (#732) [b71e477] +- docs: fix typo and broken anchor link to gstruct [f460154] +- docs: fix HaveEach matcher signature [a2862e4] + ## 1.31.1 ### Fixes diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 5b46a165815..ffb81b1feb3 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.31.1" +const GOMEGA_VERSION = "1.32.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index b2b89b017e6..25cfaa21643 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -75,14 +75,14 @@ func ResponseFormat(h http.Header) Format { func NewDecoder(r io.Reader, format Format) Decoder { switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: r} + return &protoDecoder{r: bufio.NewReader(r)} } return &textDecoder{r: r} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { - r io.Reader + r protodelim.Reader } // Decode implements the Decoder interface. @@ -90,7 +90,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { opts := protodelim.UnmarshalOptions{ MaxSize: -1, } - if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil { + if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 6fc9555e3ff..051b38cd178 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,6 +15,7 @@ package expfmt import ( + "fmt" "strings" "github.com/prometheus/common/model" @@ -63,7 +64,7 @@ const ( type FormatType int const ( - TypeUnknown = iota + TypeUnknown FormatType = iota TypeProtoCompact TypeProtoDelim TypeProtoText @@ -73,7 +74,8 @@ const ( // NewFormat generates a new Format from the type provided. Mostly used for // tests, most Formats should be generated as part of content negotiation in -// encode.go. +// encode.go. If a type has more than one version, the latest version will be +// returned. func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: @@ -91,13 +93,21 @@ func NewFormat(t FormatType) Format { } } +// NewOpenMetricsFormat generates a new OpenMetrics format matching the +// specified version number. +func NewOpenMetricsFormat(version string) (Format, error) { + if version == OpenMetricsVersion_0_0_1 { + return fmtOpenMetrics_0_0_1, nil + } + if version == OpenMetricsVersion_1_0_0 { + return fmtOpenMetrics_1_0_0, nil + } + return fmtUnknown, fmt.Errorf("unknown open metrics version string") +} + // FormatType deduces an overall FormatType for the given format. func (f Format) FormatType() FormatType { toks := strings.Split(string(f), ";") - if len(toks) < 2 { - return TypeUnknown - } - params := make(map[string]string) for i, t := range toks { if i == 0 { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 432843da335..353c5e93f92 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -248,12 +248,12 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E var createdTsBytesWritten int // Finally the samples, one line for each. + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + compliantName = compliantName + "_total" + } for _, metric := range in.Metric { switch metricType { case dto.MetricType_COUNTER: - if strings.HasSuffix(name, "_total") { - compliantName = compliantName + "_total" - } if metric.Counter == nil { return written, fmt.Errorf( "expected counter in metric %s %s", compliantName, metric, diff --git a/vendor/github.com/rclone/rclone/COPYING b/vendor/github.com/rclone/rclone/COPYING new file mode 100644 index 00000000000..8c27c67fd0a --- /dev/null +++ b/vendor/github.com/rclone/rclone/COPYING @@ -0,0 +1,20 @@ +Copyright (C) 2012 by Nick Craig-Wood http://www.craig-wood.com/nick/ + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/rclone/rclone/backend/azureblob/azureblob.go b/vendor/github.com/rclone/rclone/backend/azureblob/azureblob.go new file mode 100644 index 00000000000..e0478c2074d --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/azureblob/azureblob.go @@ -0,0 +1,1683 @@ +// Package azureblob provides an interface to the Microsoft Azure blob object storage system + +// +build !plan9,!solaris,!js,go1.14 + +package azureblob + +import ( + "context" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "path" + "strings" + "sync" + "time" + + "github.com/Azure/azure-pipeline-go/pipeline" + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/walk" + "github.com/rclone/rclone/lib/bucket" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/env" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/pool" +) + +const ( + minSleep = 10 * time.Millisecond + maxSleep = 10 * time.Second + decayConstant = 1 // bigger for slower decay, exponential + maxListChunkSize = 5000 // number of items to read at once + modTimeKey = "mtime" + timeFormatIn = time.RFC3339 + timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" + storageDefaultBaseURL = "blob.core.windows.net" + defaultChunkSize = 4 * fs.MebiByte + maxChunkSize = 100 * fs.MebiByte + uploadConcurrency = 4 + defaultAccessTier = azblob.AccessTierNone + maxTryTimeout = time.Hour * 24 * 365 //max time of an azure web request response window (whether or not data is flowing) + // Default storage account, key and blob endpoint for emulator support, + // though it is a base64 key checked in here, it is publicly available secret. + emulatorAccount = "devstoreaccount1" + emulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + emulatorBlobEndpoint = "http://127.0.0.1:10000/devstoreaccount1" + memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long + memoryPoolUseMmap = false +) + +var ( + errCantUpdateArchiveTierBlobs = fserrors.NoRetryError(errors.New("can't update archive tier blob without --azureblob-archive-tier-delete")) +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "azureblob", + Description: "Microsoft Azure Blob Storage", + NewFs: NewFs, + Options: []fs.Option{{ + Name: "account", + Help: "Storage Account Name (leave blank to use SAS URL or Emulator)", + }, { + Name: "service_principal_file", + Help: `Path to file containing credentials for use with a service principal. + +Leave blank normally. Needed only if you want to use a service principal instead of interactive login. + + $ az sp create-for-rbac --name "" \ + --role "Storage Blob Data Owner" \ + --scopes "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" \ + > azure-principal.json + +See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) +for more details. +`, + }, { + Name: "key", + Help: "Storage Account Key (leave blank to use SAS URL or Emulator)", + }, { + Name: "sas_url", + Help: "SAS URL for container level access only\n(leave blank if using account/key or Emulator)", + }, { + Name: "use_msi", + Help: `Use a managed service identity to authenticate (only works in Azure) + +When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/) +to authenticate to Azure Storage instead of a SAS token or account key. + +If the VM(SS) on which this program is running has a system-assigned identity, it will +be used by default. If the resource has no system-assigned but exactly one user-assigned identity, +the user-assigned identity will be used by default. If the resource has multiple user-assigned +identities, the identity to use must be explicitly specified using exactly one of the msi_object_id, +msi_client_id, or msi_mi_res_id parameters.`, + Default: false, + }, { + Name: "msi_object_id", + Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified.", + Advanced: true, + }, { + Name: "msi_client_id", + Help: "Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified.", + Advanced: true, + }, { + Name: "msi_mi_res_id", + Help: "Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified.", + Advanced: true, + }, { + Name: "use_emulator", + Help: "Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint)", + Default: false, + }, { + Name: "endpoint", + Help: "Endpoint for the service\nLeave blank normally.", + Advanced: true, + }, { + Name: "upload_cutoff", + Help: "Cutoff for switching to chunked upload (<= 256MB). (Deprecated)", + Advanced: true, + }, { + Name: "chunk_size", + Help: `Upload chunk size (<= 100MB). + +Note that this is stored in memory and there may be up to +"--transfers" chunks stored at once in memory.`, + Default: defaultChunkSize, + Advanced: true, + }, { + Name: "list_chunk", + Help: `Size of blob list. + +This sets the number of blobs requested in each listing chunk. Default +is the maximum, 5000. "List blobs" requests are permitted 2 minutes +per megabyte to complete. If an operation is taking longer than 2 +minutes per megabyte on average, it will time out ( +[source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval) +). This can be used to limit the number of blobs items to return, to +avoid the time out.`, + Default: maxListChunkSize, + Advanced: true, + }, { + Name: "access_tier", + Help: `Access tier of blob: hot, cool or archive. + +Archived blobs can be restored by setting access tier to hot or +cool. Leave blank if you intend to use default access tier, which is +set at account level + +If there is no "access tier" specified, rclone doesn't apply any tier. +rclone performs "Set Tier" operation on blobs while uploading, if objects +are not modified, specifying "access tier" to new one will have no effect. +If blobs are in "archive tier" at remote, trying to perform data transfer +operations from remote will not be allowed. User should first restore by +tiering blob to "Hot" or "Cool".`, + Advanced: true, + }, { + Name: "archive_tier_delete", + Default: false, + Help: fmt.Sprintf(`Delete archive tier blobs before overwriting. + +Archive tier blobs cannot be updated. So without this flag, if you +attempt to update an archive tier blob, then rclone will produce the +error: + + %v + +With this flag set then before rclone attempts to overwrite an archive +tier blob, it will delete the existing blob before uploading its +replacement. This has the potential for data loss if the upload fails +(unlike updating a normal blob) and also may cost more since deleting +archive tier blobs early may be chargable. +`, errCantUpdateArchiveTierBlobs), + Advanced: true, + }, { + Name: "disable_checksum", + Help: `Don't store MD5 checksum with object metadata. + +Normally rclone will calculate the MD5 checksum of the input before +uploading it so it can add it to metadata on the object. This is great +for data integrity checking but can cause long delays for large files +to start uploading.`, + Default: false, + Advanced: true, + }, { + Name: "memory_pool_flush_time", + Default: memoryPoolFlushTime, + Advanced: true, + Help: `How often internal memory buffer pools will be flushed. +Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. +This option controls how often unused buffers will be removed from the pool.`, + }, { + Name: "memory_pool_use_mmap", + Default: memoryPoolUseMmap, + Advanced: true, + Help: `Whether to use mmap buffers in internal memory pool.`, + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: (encoder.EncodeInvalidUtf8 | + encoder.EncodeSlash | + encoder.EncodeCtl | + encoder.EncodeDel | + encoder.EncodeBackSlash | + encoder.EncodeRightPeriod), + }}, + }) +} + +// Options defines the configuration for this backend +type Options struct { + Account string `config:"account"` + ServicePrincipalFile string `config:"service_principal_file"` + Key string `config:"key"` + UseMSI bool `config:"use_msi"` + MSIObjectID string `config:"msi_object_id"` + MSIClientID string `config:"msi_client_id"` + MSIResourceID string `config:"msi_mi_res_id"` + Endpoint string `config:"endpoint"` + SASURL string `config:"sas_url"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + ListChunkSize uint `config:"list_chunk"` + AccessTier string `config:"access_tier"` + ArchiveTierDelete bool `config:"archive_tier_delete"` + UseEmulator bool `config:"use_emulator"` + DisableCheckSum bool `config:"disable_checksum"` + MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"` + MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"` + Enc encoder.MultiEncoder `config:"encoding"` +} + +// Fs represents a remote azure server +type Fs struct { + name string // name of this remote + root string // the path we are working on if any + opt Options // parsed config options + ci *fs.ConfigInfo // global config + features *fs.Features // optional features + client *http.Client // http client we are using + svcURL *azblob.ServiceURL // reference to serviceURL + cntURLcacheMu sync.Mutex // mutex to protect cntURLcache + cntURLcache map[string]*azblob.ContainerURL // reference to containerURL per container + rootContainer string // container part of root (if any) + rootDirectory string // directory part of root (if any) + isLimited bool // if limited to one container + cache *bucket.Cache // cache for container creation status + pacer *fs.Pacer // To pace and retry the API calls + imdsPacer *fs.Pacer // Same but for IMDS + uploadToken *pacer.TokenDispenser // control concurrency + pool *pool.Pool // memory pool +} + +// Object describes an azure object +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + modTime time.Time // The modified time of the object if known + md5 string // MD5 hash if known + size int64 // Size of the object + mimeType string // Content-Type of the object + accessTier azblob.AccessTierType // Blob Access Tier + meta map[string]string // blob metadata +} + +// ------------------------------------------------------------ + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + if f.rootContainer == "" { + return "Azure root" + } + if f.rootDirectory == "" { + return fmt.Sprintf("Azure container %s", f.rootContainer) + } + return fmt.Sprintf("Azure container %s path %s", f.rootContainer, f.rootDirectory) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// parsePath parses a remote 'url' +func parsePath(path string) (root string) { + root = strings.Trim(path, "/") + return +} + +// split returns container and containerPath from the rootRelativePath +// relative to f.root +func (f *Fs) split(rootRelativePath string) (containerName, containerPath string) { + containerName, containerPath = bucket.Split(path.Join(f.root, rootRelativePath)) + return f.opt.Enc.FromStandardName(containerName), f.opt.Enc.FromStandardPath(containerPath) +} + +// split returns container and containerPath from the object +func (o *Object) split() (container, containerPath string) { + return o.fs.split(o.remote) +} + +// validateAccessTier checks if azureblob supports user supplied tier +func validateAccessTier(tier string) bool { + switch tier { + case string(azblob.AccessTierHot), + string(azblob.AccessTierCool), + string(azblob.AccessTierArchive): + // valid cases + return true + default: + return false + } +} + +// retryErrorCodes is a slice of error codes that we will retry +var retryErrorCodes = []int{ + 401, // Unauthorized (e.g. "Token has expired") + 408, // Request Timeout + 429, // Rate exceeded. + 500, // Get occasional 500 Internal Server Error + 503, // Service Unavailable + 504, // Gateway Time-out +} + +// shouldRetry returns a boolean as to whether this resp and err +// deserve to be retried. It returns the err as a convenience +func (f *Fs) shouldRetry(err error) (bool, error) { + // FIXME interpret special errors - more to do here + if storageErr, ok := err.(azblob.StorageError); ok { + switch storageErr.ServiceCode() { + case "InvalidBlobOrBlock": + // These errors happen sometimes in multipart uploads + // because of block concurrency issues + return true, err + } + statusCode := storageErr.Response().StatusCode + for _, e := range retryErrorCodes { + if statusCode == e { + return true, err + } + } + } else if httpErr, ok := err.(httpError); ok { + return fserrors.ShouldRetryHTTP(httpErr.Response, retryErrorCodes), err + } + return fserrors.ShouldRetry(err), err +} + +func checkUploadChunkSize(cs fs.SizeSuffix) error { + const minChunkSize = fs.Byte + if cs < minChunkSize { + return errors.Errorf("%s is less than %s", cs, minChunkSize) + } + if cs > maxChunkSize { + return errors.Errorf("%s is greater than %s", cs, maxChunkSize) + } + return nil +} + +func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { + err = checkUploadChunkSize(cs) + if err == nil { + old, f.opt.ChunkSize = f.opt.ChunkSize, cs + } + return +} + +// httpClientFactory creates a Factory object that sends HTTP requests +// to an rclone's http.Client. +// +// copied from azblob.newDefaultHTTPClientFactory +func httpClientFactory(client *http.Client) pipeline.Factory { + return pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { + return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { + r, err := client.Do(request.WithContext(ctx)) + if err != nil { + err = pipeline.NewError(err, "HTTP request failed") + } + return pipeline.NewHTTPResponse(r), err + } + }) +} + +type servicePrincipalCredentials struct { + AppID string `json:"appId"` + Password string `json:"password"` + Tenant string `json:"tenant"` +} + +const azureActiveDirectoryEndpoint = "https://login.microsoftonline.com/" +const azureStorageEndpoint = "https://storage.azure.com/" + +// newServicePrincipalTokenRefresher takes the client ID and secret, and returns a refresh-able access token. +func newServicePrincipalTokenRefresher(ctx context.Context, credentialsData []byte) (azblob.TokenRefresher, error) { + var spCredentials servicePrincipalCredentials + if err := json.Unmarshal(credentialsData, &spCredentials); err != nil { + return nil, errors.Wrap(err, "error parsing credentials from JSON file") + } + oauthConfig, err := adal.NewOAuthConfig(azureActiveDirectoryEndpoint, spCredentials.Tenant) + if err != nil { + return nil, errors.Wrap(err, "error creating oauth config") + } + + // Create service principal token for Azure Storage. + servicePrincipalToken, err := adal.NewServicePrincipalToken( + *oauthConfig, + spCredentials.AppID, + spCredentials.Password, + azureStorageEndpoint) + if err != nil { + return nil, errors.Wrap(err, "error creating service principal token") + } + + // Wrap token inside a refresher closure. + var tokenRefresher azblob.TokenRefresher = func(credential azblob.TokenCredential) time.Duration { + if err := servicePrincipalToken.Refresh(); err != nil { + panic(err) + } + refreshedToken := servicePrincipalToken.Token() + credential.SetToken(refreshedToken.AccessToken) + exp := refreshedToken.Expires().Sub(time.Now().Add(2 * time.Minute)) + return exp + } + + return tokenRefresher, nil +} + +// newPipeline creates a Pipeline using the specified credentials and options. +// +// this code was copied from azblob.NewPipeline +func (f *Fs) newPipeline(c azblob.Credential, o azblob.PipelineOptions) pipeline.Pipeline { + // Don't log stuff to syslog/Windows Event log + pipeline.SetForceLogEnabled(false) + + // Closest to API goes first; closest to the wire goes last + factories := []pipeline.Factory{ + azblob.NewTelemetryPolicyFactory(o.Telemetry), + azblob.NewUniqueRequestIDPolicyFactory(), + azblob.NewRetryPolicyFactory(o.Retry), + c, + pipeline.MethodFactoryMarker(), // indicates at what stage in the pipeline the method factory is invoked + azblob.NewRequestLogPolicyFactory(o.RequestLog), + } + return pipeline.NewPipeline(factories, pipeline.Options{HTTPSender: httpClientFactory(f.client), Log: o.Log}) +} + +// setRoot changes the root of the Fs +func (f *Fs) setRoot(root string) { + f.root = parsePath(root) + f.rootContainer, f.rootDirectory = bucket.Split(f.root) +} + +// NewFs constructs an Fs from the path, container:path +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + + err = checkUploadChunkSize(opt.ChunkSize) + if err != nil { + return nil, errors.Wrap(err, "azure: chunk size") + } + if opt.ListChunkSize > maxListChunkSize { + return nil, errors.Errorf("azure: blob list size can't be greater than %v - was %v", maxListChunkSize, opt.ListChunkSize) + } + if opt.Endpoint == "" { + opt.Endpoint = storageDefaultBaseURL + } + + if opt.AccessTier == "" { + opt.AccessTier = string(defaultAccessTier) + } else if !validateAccessTier(opt.AccessTier) { + return nil, errors.Errorf("Azure Blob: Supported access tiers are %s, %s and %s", + string(azblob.AccessTierHot), string(azblob.AccessTierCool), string(azblob.AccessTierArchive)) + } + + ci := fs.GetConfig(ctx) + f := &Fs{ + name: name, + opt: *opt, + ci: ci, + pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))), + imdsPacer: fs.NewPacer(ctx, pacer.NewAzureIMDS()), + uploadToken: pacer.NewTokenDispenser(ci.Transfers), + client: fshttp.NewClient(ctx), + cache: bucket.NewCache(), + cntURLcache: make(map[string]*azblob.ContainerURL, 1), + pool: pool.New( + time.Duration(opt.MemoryPoolFlushTime), + int(opt.ChunkSize), + ci.Transfers, + opt.MemoryPoolUseMmap, + ), + } + f.imdsPacer.SetRetries(5) // per IMDS documentation + f.setRoot(root) + f.features = (&fs.Features{ + ReadMimeType: true, + WriteMimeType: true, + BucketBased: true, + BucketBasedRootOK: true, + SetTier: true, + GetTier: true, + }).Fill(ctx, f) + + var ( + u *url.URL + serviceURL azblob.ServiceURL + ) + switch { + case opt.UseEmulator: + credential, err := azblob.NewSharedKeyCredential(emulatorAccount, emulatorAccountKey) + if err != nil { + return nil, errors.Wrapf(err, "Failed to parse credentials") + } + u, err = url.Parse(emulatorBlobEndpoint) + if err != nil { + return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint") + } + pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}) + serviceURL = azblob.NewServiceURL(*u, pipeline) + case opt.UseMSI: + var token adal.Token + var userMSI *userMSI = &userMSI{} + if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 { + // Specifying a user-assigned identity. Exactly one of the above IDs must be specified. + // Validate and ensure exactly one is set. (To do: better validation.) + if len(opt.MSIClientID) > 0 { + if len(opt.MSIObjectID) > 0 || len(opt.MSIResourceID) > 0 { + return nil, errors.New("more than one user-assigned identity ID is set") + } + userMSI.Type = msiClientID + userMSI.Value = opt.MSIClientID + } + if len(opt.MSIObjectID) > 0 { + if len(opt.MSIClientID) > 0 || len(opt.MSIResourceID) > 0 { + return nil, errors.New("more than one user-assigned identity ID is set") + } + userMSI.Type = msiObjectID + userMSI.Value = opt.MSIObjectID + } + if len(opt.MSIResourceID) > 0 { + if len(opt.MSIClientID) > 0 || len(opt.MSIObjectID) > 0 { + return nil, errors.New("more than one user-assigned identity ID is set") + } + userMSI.Type = msiResourceID + userMSI.Value = opt.MSIResourceID + } + } else { + userMSI = nil + } + err = f.imdsPacer.Call(func() (bool, error) { + // Retry as specified by the documentation: + // https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#retry-guidance + token, err = GetMSIToken(ctx, userMSI) + return f.shouldRetry(err) + }) + + if err != nil { + return nil, errors.Wrapf(err, "Failed to acquire MSI token") + } + + u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint)) + if err != nil { + return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint") + } + credential := azblob.NewTokenCredential(token.AccessToken, func(credential azblob.TokenCredential) time.Duration { + fs.Debugf(f, "Token refresher called.") + var refreshedToken adal.Token + err := f.imdsPacer.Call(func() (bool, error) { + refreshedToken, err = GetMSIToken(ctx, userMSI) + return f.shouldRetry(err) + }) + if err != nil { + // Failed to refresh. + return 0 + } + credential.SetToken(refreshedToken.AccessToken) + now := time.Now().UTC() + // Refresh one minute before expiry. + refreshAt := refreshedToken.Expires().UTC().Add(-1 * time.Minute) + fs.Debugf(f, "Acquired new token that expires at %v; refreshing in %d s", refreshedToken.Expires(), + int(refreshAt.Sub(now).Seconds())) + if now.After(refreshAt) { + // Acquired a causality violation. + return 0 + } + return refreshAt.Sub(now) + }) + pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}) + serviceURL = azblob.NewServiceURL(*u, pipeline) + case opt.Account != "" && opt.Key != "": + credential, err := azblob.NewSharedKeyCredential(opt.Account, opt.Key) + if err != nil { + return nil, errors.Wrapf(err, "Failed to parse credentials") + } + + u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint)) + if err != nil { + return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint") + } + pipeline := f.newPipeline(credential, azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}) + serviceURL = azblob.NewServiceURL(*u, pipeline) + case opt.SASURL != "": + u, err = url.Parse(opt.SASURL) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse SAS URL") + } + // use anonymous credentials in case of sas url + pipeline := f.newPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}}) + // Check if we have container level SAS or account level sas + parts := azblob.NewBlobURLParts(*u) + if parts.ContainerName != "" { + if f.rootContainer != "" && parts.ContainerName != f.rootContainer { + return nil, errors.New("Container name in SAS URL and container provided in command do not match") + } + containerURL := azblob.NewContainerURL(*u, pipeline) + f.cntURLcache[parts.ContainerName] = &containerURL + f.isLimited = true + } else { + serviceURL = azblob.NewServiceURL(*u, pipeline) + } + case opt.ServicePrincipalFile != "": + // Create a standard URL. + u, err = url.Parse(fmt.Sprintf("https://%s.%s", opt.Account, opt.Endpoint)) + if err != nil { + return nil, errors.Wrap(err, "failed to make azure storage url from account and endpoint") + } + // Try loading service principal credentials from file. + loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServicePrincipalFile)) + if err != nil { + return nil, errors.Wrap(err, "error opening service principal credentials file") + } + // Create a token refresher from service principal credentials. + tokenRefresher, err := newServicePrincipalTokenRefresher(ctx, loadedCreds) + if err != nil { + return nil, errors.Wrap(err, "failed to create a service principal token") + } + options := azblob.PipelineOptions{Retry: azblob.RetryOptions{TryTimeout: maxTryTimeout}} + pipe := f.newPipeline(azblob.NewTokenCredential("", tokenRefresher), options) + serviceURL = azblob.NewServiceURL(*u, pipe) + default: + return nil, errors.New("No authentication method configured") + } + f.svcURL = &serviceURL + + if f.rootContainer != "" && f.rootDirectory != "" { + // Check to see if the (container,directory) is actually an existing file + oldRoot := f.root + newRoot, leaf := path.Split(oldRoot) + f.setRoot(newRoot) + _, err := f.NewObject(ctx, leaf) + if err != nil { + if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile { + // File doesn't exist or is a directory so return old f + f.setRoot(oldRoot) + return f, nil + } + return nil, err + } + // return an error with an fs which points to the parent + return f, fs.ErrorIsFile + } + return f, nil +} + +// return the container URL for the container passed in +func (f *Fs) cntURL(container string) (containerURL *azblob.ContainerURL) { + f.cntURLcacheMu.Lock() + defer f.cntURLcacheMu.Unlock() + var ok bool + if containerURL, ok = f.cntURLcache[container]; !ok { + cntURL := f.svcURL.NewContainerURL(container) + containerURL = &cntURL + f.cntURLcache[container] = containerURL + } + return containerURL + +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(remote string, info *azblob.BlobItemInternal) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + if info != nil { + err := o.decodeMetaDataFromBlob(info) + if err != nil { + return nil, err + } + } else { + err := o.readMetaData() // reads info and headers, returning an error + if err != nil { + return nil, err + } + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(remote, nil) +} + +// getBlobReference creates an empty blob reference with no metadata +func (f *Fs) getBlobReference(container, containerPath string) azblob.BlobURL { + return f.cntURL(container).NewBlobURL(containerPath) +} + +// updateMetadataWithModTime adds the modTime passed in to o.meta. +func (o *Object) updateMetadataWithModTime(modTime time.Time) { + // Make sure o.meta is not nil + if o.meta == nil { + o.meta = make(map[string]string, 1) + } + + // Set modTimeKey in it + o.meta[modTimeKey] = modTime.Format(timeFormatOut) +} + +// Returns whether file is a directory marker or not +func isDirectoryMarker(size int64, metadata azblob.Metadata, remote string) bool { + // Directory markers are 0 length + if size == 0 { + // Note that metadata with hdi_isfolder = true seems to be a + // defacto standard for marking blobs as directories. + endsWithSlash := strings.HasSuffix(remote, "/") + if endsWithSlash || remote == "" || metadata["hdi_isfolder"] == "true" { + return true + } + + } + return false +} + +// listFn is called from list to handle an object +type listFn func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error + +// list lists the objects into the function supplied from +// the container and root supplied +// +// dir is the starting directory, "" for root +// +// The remote has prefix removed from it and if addContainer is set then +// it adds the container to the start. +func (f *Fs) list(ctx context.Context, container, directory, prefix string, addContainer bool, recurse bool, maxResults uint, fn listFn) error { + if f.cache.IsDeleted(container) { + return fs.ErrorDirNotFound + } + if prefix != "" { + prefix += "/" + } + if directory != "" { + directory += "/" + } + delimiter := "" + if !recurse { + delimiter = "/" + } + + options := azblob.ListBlobsSegmentOptions{ + Details: azblob.BlobListingDetails{ + Copy: false, + Metadata: true, + Snapshots: false, + UncommittedBlobs: false, + Deleted: false, + }, + Prefix: directory, + MaxResults: int32(maxResults), + } + for marker := (azblob.Marker{}); marker.NotDone(); { + var response *azblob.ListBlobsHierarchySegmentResponse + err := f.pacer.Call(func() (bool, error) { + var err error + response, err = f.cntURL(container).ListBlobsHierarchySegment(ctx, marker, delimiter, options) + return f.shouldRetry(err) + }) + + if err != nil { + // Check http error code along with service code, current SDK doesn't populate service code correctly sometimes + if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) { + return fs.ErrorDirNotFound + } + return err + } + // Advance marker to next + marker = response.NextMarker + for i := range response.Segment.BlobItems { + file := &response.Segment.BlobItems[i] + // Finish if file name no longer has prefix + // if prefix != "" && !strings.HasPrefix(file.Name, prefix) { + // return nil + // } + remote := f.opt.Enc.ToStandardPath(file.Name) + if !strings.HasPrefix(remote, prefix) { + fs.Debugf(f, "Odd name received %q", remote) + continue + } + remote = remote[len(prefix):] + if isDirectoryMarker(*file.Properties.ContentLength, file.Metadata, remote) { + continue // skip directory marker + } + if addContainer { + remote = path.Join(container, remote) + } + // Send object + err = fn(remote, file, false) + if err != nil { + return err + } + } + // Send the subdirectories + for _, remote := range response.Segment.BlobPrefixes { + remote := strings.TrimRight(remote.Name, "/") + remote = f.opt.Enc.ToStandardPath(remote) + if !strings.HasPrefix(remote, prefix) { + fs.Debugf(f, "Odd directory name received %q", remote) + continue + } + remote = remote[len(prefix):] + if addContainer { + remote = path.Join(container, remote) + } + // Send object + err = fn(remote, nil, true) + if err != nil { + return err + } + } + } + return nil +} + +// Convert a list item into a DirEntry +func (f *Fs) itemToDirEntry(remote string, object *azblob.BlobItemInternal, isDirectory bool) (fs.DirEntry, error) { + if isDirectory { + d := fs.NewDir(remote, time.Time{}) + return d, nil + } + o, err := f.newObjectWithInfo(remote, object) + if err != nil { + return nil, err + } + return o, nil +} + +// Check to see if this is a limited container and the container is not found +func (f *Fs) containerOK(container string) bool { + if !f.isLimited { + return true + } + f.cntURLcacheMu.Lock() + defer f.cntURLcacheMu.Unlock() + for limitedContainer := range f.cntURLcache { + if container == limitedContainer { + return true + } + } + return false +} + +// listDir lists a single directory +func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) { + if !f.containerOK(container) { + return nil, fs.ErrorDirNotFound + } + err = f.list(ctx, container, directory, prefix, addContainer, false, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error { + entry, err := f.itemToDirEntry(remote, object, isDirectory) + if err != nil { + return err + } + if entry != nil { + entries = append(entries, entry) + } + return nil + }) + if err != nil { + return nil, err + } + // container must be present if listing succeeded + f.cache.MarkOK(container) + return entries, nil +} + +// listContainers returns all the containers to out +func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err error) { + if f.isLimited { + f.cntURLcacheMu.Lock() + for container := range f.cntURLcache { + d := fs.NewDir(container, time.Time{}) + entries = append(entries, d) + } + f.cntURLcacheMu.Unlock() + return entries, nil + } + err = f.listContainersToFn(func(container *azblob.ContainerItem) error { + d := fs.NewDir(f.opt.Enc.ToStandardName(container.Name), container.Properties.LastModified) + f.cache.MarkOK(container.Name) + entries = append(entries, d) + return nil + }) + if err != nil { + return nil, err + } + return entries, nil +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + container, directory := f.split(dir) + if container == "" { + if directory != "" { + return nil, fs.ErrorListBucketRequired + } + return f.listContainers(ctx) + } + return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "") +} + +// ListR lists the objects and directories of the Fs starting +// from dir recursively into out. +// +// dir should be "" to start from the root, and should not +// have trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +// +// It should call callback for each tranche of entries read. +// These need not be returned in any particular order. If +// callback returns an error then the listing will stop +// immediately. +// +// Don't implement this unless you have a more efficient way +// of listing recursively that doing a directory traversal. +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { + container, directory := f.split(dir) + list := walk.NewListRHelper(callback) + listR := func(container, directory, prefix string, addContainer bool) error { + return f.list(ctx, container, directory, prefix, addContainer, true, f.opt.ListChunkSize, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error { + entry, err := f.itemToDirEntry(remote, object, isDirectory) + if err != nil { + return err + } + return list.Add(entry) + }) + } + if container == "" { + entries, err := f.listContainers(ctx) + if err != nil { + return err + } + for _, entry := range entries { + err = list.Add(entry) + if err != nil { + return err + } + container := entry.Remote() + err = listR(container, "", f.rootDirectory, true) + if err != nil { + return err + } + // container must be present if listing succeeded + f.cache.MarkOK(container) + } + } else { + if !f.containerOK(container) { + return fs.ErrorDirNotFound + } + err = listR(container, directory, f.rootDirectory, f.rootContainer == "") + if err != nil { + return err + } + // container must be present if listing succeeded + f.cache.MarkOK(container) + } + return list.Flush() +} + +// listContainerFn is called from listContainersToFn to handle a container +type listContainerFn func(*azblob.ContainerItem) error + +// listContainersToFn lists the containers to the function supplied +func (f *Fs) listContainersToFn(fn listContainerFn) error { + params := azblob.ListContainersSegmentOptions{ + MaxResults: int32(f.opt.ListChunkSize), + } + ctx := context.Background() + for marker := (azblob.Marker{}); marker.NotDone(); { + var response *azblob.ListContainersSegmentResponse + err := f.pacer.Call(func() (bool, error) { + var err error + response, err = f.svcURL.ListContainersSegment(ctx, marker, params) + return f.shouldRetry(err) + }) + if err != nil { + return err + } + + for i := range response.ContainerItems { + err = fn(&response.ContainerItems[i]) + if err != nil { + return err + } + } + marker = response.NextMarker + } + + return nil +} + +// Put the object into the container +// +// Copy the reader in to the new object which is returned +// +// The new object may have been created if an error is returned +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + // Temporary Object under construction + fs := &Object{ + fs: f, + remote: src.Remote(), + } + return fs, fs.Update(ctx, in, src, options...) +} + +// PutStream uploads to the remote path with the modTime given of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + +// Mkdir creates the container if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + container, _ := f.split(dir) + return f.makeContainer(ctx, container) +} + +// makeContainer creates the container if it doesn't exist +func (f *Fs) makeContainer(ctx context.Context, container string) error { + return f.cache.Create(container, func() error { + // If this is a SAS URL limited to a container then assume it is already created + if f.isLimited { + return nil + } + // now try to create the container + return f.pacer.Call(func() (bool, error) { + _, err := f.cntURL(container).Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) + if err != nil { + if storageErr, ok := err.(azblob.StorageError); ok { + switch storageErr.ServiceCode() { + case azblob.ServiceCodeContainerAlreadyExists: + return false, nil + case azblob.ServiceCodeContainerBeingDeleted: + // From https://docs.microsoft.com/en-us/rest/api/storageservices/delete-container + // When a container is deleted, a container with the same name cannot be created + // for at least 30 seconds; the container may not be available for more than 30 + // seconds if the service is still processing the request. + time.Sleep(6 * time.Second) // default 10 retries will be 60 seconds + f.cache.MarkDeleted(container) + return true, err + } + } + } + return f.shouldRetry(err) + }) + }, nil) +} + +// isEmpty checks to see if a given (container, directory) is empty and returns an error if not +func (f *Fs) isEmpty(ctx context.Context, container, directory string) (err error) { + empty := true + err = f.list(ctx, container, directory, f.rootDirectory, f.rootContainer == "", true, 1, func(remote string, object *azblob.BlobItemInternal, isDirectory bool) error { + empty = false + return nil + }) + if err != nil { + return err + } + if !empty { + return fs.ErrorDirectoryNotEmpty + } + return nil +} + +// deleteContainer deletes the container. It can delete a full +// container so use isEmpty if you don't want that. +func (f *Fs) deleteContainer(ctx context.Context, container string) error { + return f.cache.Remove(container, func() error { + options := azblob.ContainerAccessConditions{} + return f.pacer.Call(func() (bool, error) { + _, err := f.cntURL(container).GetProperties(ctx, azblob.LeaseAccessConditions{}) + if err == nil { + _, err = f.cntURL(container).Delete(ctx, options) + } + + if err != nil { + // Check http error code along with service code, current SDK doesn't populate service code correctly sometimes + if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeContainerNotFound || storageErr.Response().StatusCode == http.StatusNotFound) { + return false, fs.ErrorDirNotFound + } + + return f.shouldRetry(err) + } + + return f.shouldRetry(err) + }) + }) +} + +// Rmdir deletes the container if the fs is at the root +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + container, directory := f.split(dir) + if container == "" || directory != "" { + return nil + } + err := f.isEmpty(ctx, container, directory) + if err != nil { + return err + } + return f.deleteContainer(ctx, container) +} + +// Precision of the remote +func (f *Fs) Precision() time.Duration { + return time.Nanosecond +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.MD5) +} + +// Purge deletes all the files and directories including the old versions. +func (f *Fs) Purge(ctx context.Context, dir string) error { + container, directory := f.split(dir) + if container == "" || directory != "" { + // Delegate to caller if not root of a container + return fs.ErrorCantPurge + } + return f.deleteContainer(ctx, container) +} + +// Copy src to this remote using server-side copy operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + dstContainer, dstPath := f.split(remote) + err := f.makeContainer(ctx, dstContainer) + if err != nil { + return nil, err + } + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + dstBlobURL := f.getBlobReference(dstContainer, dstPath) + srcBlobURL := srcObj.getBlobReference() + + source, err := url.Parse(srcBlobURL.String()) + if err != nil { + return nil, err + } + + options := azblob.BlobAccessConditions{} + var startCopy *azblob.BlobStartCopyFromURLResponse + + err = f.pacer.Call(func() (bool, error) { + startCopy, err = dstBlobURL.StartCopyFromURL(ctx, *source, nil, azblob.ModifiedAccessConditions{}, options, azblob.AccessTierType(f.opt.AccessTier), nil) + return f.shouldRetry(err) + }) + if err != nil { + return nil, err + } + + copyStatus := startCopy.CopyStatus() + for copyStatus == azblob.CopyStatusPending { + time.Sleep(1 * time.Second) + getMetadata, err := dstBlobURL.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{}) + if err != nil { + return nil, err + } + copyStatus = getMetadata.CopyStatus() + } + + return f.NewObject(ctx, remote) +} + +func (f *Fs) getMemoryPool(size int64) *pool.Pool { + if size == int64(f.opt.ChunkSize) { + return f.pool + } + + return pool.New( + time.Duration(f.opt.MemoryPoolFlushTime), + int(size), + f.ci.Transfers, + f.opt.MemoryPoolUseMmap, + ) +} + +// ------------------------------------------------------------ + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Hash returns the MD5 of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + if t != hash.MD5 { + return "", hash.ErrUnsupported + } + // Convert base64 encoded md5 into lower case hex + if o.md5 == "" { + return "", nil + } + data, err := base64.StdEncoding.DecodeString(o.md5) + if err != nil { + return "", errors.Wrapf(err, "Failed to decode Content-MD5: %q", o.md5) + } + return hex.EncodeToString(data), nil +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.size +} + +func (o *Object) setMetadata(metadata azblob.Metadata) { + if len(metadata) > 0 { + o.meta = metadata + if modTime, ok := metadata[modTimeKey]; ok { + when, err := time.Parse(timeFormatIn, modTime) + if err != nil { + fs.Debugf(o, "Couldn't parse %v = %q: %v", modTimeKey, modTime, err) + } + o.modTime = when + } + } else { + o.meta = nil + } +} + +// decodeMetaDataFromPropertiesResponse sets the metadata from the data passed in +// +// Sets +// o.id +// o.modTime +// o.size +// o.md5 +// o.meta +func (o *Object) decodeMetaDataFromPropertiesResponse(info *azblob.BlobGetPropertiesResponse) (err error) { + metadata := info.NewMetadata() + size := info.ContentLength() + if isDirectoryMarker(size, metadata, o.remote) { + return fs.ErrorNotAFile + } + // NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain + // this as base64 encoded string. + o.md5 = base64.StdEncoding.EncodeToString(info.ContentMD5()) + o.mimeType = info.ContentType() + o.size = size + o.modTime = info.LastModified() + o.accessTier = azblob.AccessTierType(info.AccessTier()) + o.setMetadata(metadata) + + return nil +} + +func (o *Object) decodeMetaDataFromBlob(info *azblob.BlobItemInternal) (err error) { + metadata := info.Metadata + size := *info.Properties.ContentLength + if isDirectoryMarker(size, metadata, o.remote) { + return fs.ErrorNotAFile + } + // NOTE - Client library always returns MD5 as base64 decoded string, Object needs to maintain + // this as base64 encoded string. + o.md5 = base64.StdEncoding.EncodeToString(info.Properties.ContentMD5) + o.mimeType = *info.Properties.ContentType + o.size = size + o.modTime = info.Properties.LastModified + o.accessTier = info.Properties.AccessTier + o.setMetadata(metadata) + return nil +} + +// getBlobReference creates an empty blob reference with no metadata +func (o *Object) getBlobReference() azblob.BlobURL { + container, directory := o.split() + return o.fs.getBlobReference(container, directory) +} + +// clearMetaData clears enough metadata so readMetaData will re-read it +func (o *Object) clearMetaData() { + o.modTime = time.Time{} +} + +// readMetaData gets the metadata if it hasn't already been fetched +// +// Sets +// o.id +// o.modTime +// o.size +// o.md5 +func (o *Object) readMetaData() (err error) { + if !o.modTime.IsZero() { + return nil + } + blob := o.getBlobReference() + + // Read metadata (this includes metadata) + options := azblob.BlobAccessConditions{} + ctx := context.Background() + var blobProperties *azblob.BlobGetPropertiesResponse + err = o.fs.pacer.Call(func() (bool, error) { + blobProperties, err = blob.GetProperties(ctx, options, azblob.ClientProvidedKeyOptions{}) + return o.fs.shouldRetry(err) + }) + if err != nil { + // On directories - GetProperties does not work and current SDK does not populate service code correctly hence check regular http response as well + if storageErr, ok := err.(azblob.StorageError); ok && (storageErr.ServiceCode() == azblob.ServiceCodeBlobNotFound || storageErr.Response().StatusCode == http.StatusNotFound) { + return fs.ErrorObjectNotFound + } + return err + } + + return o.decodeMetaDataFromPropertiesResponse(blobProperties) +} + +// ModTime returns the modification time of the object +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) (result time.Time) { + // The error is logged in readMetaData + _ = o.readMetaData() + return o.modTime +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + // Make sure o.meta is not nil + if o.meta == nil { + o.meta = make(map[string]string, 1) + } + // Set modTimeKey in it + o.meta[modTimeKey] = modTime.Format(timeFormatOut) + + blob := o.getBlobReference() + err := o.fs.pacer.Call(func() (bool, error) { + _, err := blob.SetMetadata(ctx, o.meta, azblob.BlobAccessConditions{}, azblob.ClientProvidedKeyOptions{}) + return o.fs.shouldRetry(err) + }) + if err != nil { + return err + } + o.modTime = modTime + return nil +} + +// Storable returns if this object is storable +func (o *Object) Storable() bool { + return true +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + // Offset and Count for range download + var offset int64 + var count int64 + if o.AccessTier() == azblob.AccessTierArchive { + return nil, errors.Errorf("Blob in archive tier, you need to set tier to hot or cool first") + } + fs.FixRangeOption(options, o.size) + for _, option := range options { + switch x := option.(type) { + case *fs.RangeOption: + offset, count = x.Decode(o.size) + if count < 0 { + count = o.size - offset + } + case *fs.SeekOption: + offset = x.Offset + default: + if option.Mandatory() { + fs.Logf(o, "Unsupported mandatory option: %v", option) + } + } + } + blob := o.getBlobReference() + ac := azblob.BlobAccessConditions{} + var downloadResponse *azblob.DownloadResponse + err = o.fs.pacer.Call(func() (bool, error) { + downloadResponse, err = blob.Download(ctx, offset, count, ac, false, azblob.ClientProvidedKeyOptions{}) + return o.fs.shouldRetry(err) + }) + if err != nil { + return nil, errors.Wrap(err, "failed to open for download") + } + in = downloadResponse.Body(azblob.RetryReaderOptions{}) + return in, nil +} + +// dontEncode is the characters that do not need percent-encoding +// +// The characters that do not need percent-encoding are a subset of +// the printable ASCII characters: upper-case letters, lower-case +// letters, digits, ".", "_", "-", "/", "~", "!", "$", "'", "(", ")", +// "*", ";", "=", ":", and "@". All other byte values in a UTF-8 must +// be replaced with "%" and the two-digit hex value of the byte. +const dontEncode = (`abcdefghijklmnopqrstuvwxyz` + + `ABCDEFGHIJKLMNOPQRSTUVWXYZ` + + `0123456789` + + `._-/~!$'()*;=:@`) + +// noNeedToEncode is a bitmap of characters which don't need % encoding +var noNeedToEncode [256]bool + +func init() { + for _, c := range dontEncode { + noNeedToEncode[c] = true + } +} + +// increment the slice passed in as LSB binary +func increment(xs []byte) { + for i, digit := range xs { + newDigit := digit + 1 + xs[i] = newDigit + if newDigit >= digit { + // exit if no carry + break + } + } +} + +// poolWrapper wraps a pool.Pool as an azblob.TransferManager +type poolWrapper struct { + pool *pool.Pool + bufToken chan struct{} + runToken chan struct{} +} + +// newPoolWrapper creates an azblob.TransferManager that will use a +// pool.Pool with maximum concurrency as specified. +func (f *Fs) newPoolWrapper(concurrency int) azblob.TransferManager { + return &poolWrapper{ + pool: f.pool, + bufToken: make(chan struct{}, concurrency), + runToken: make(chan struct{}, concurrency), + } +} + +// Get implements TransferManager.Get(). +func (pw *poolWrapper) Get() []byte { + pw.bufToken <- struct{}{} + return pw.pool.Get() +} + +// Put implements TransferManager.Put(). +func (pw *poolWrapper) Put(b []byte) { + pw.pool.Put(b) + <-pw.bufToken +} + +// Run implements TransferManager.Run(). +func (pw *poolWrapper) Run(f func()) { + pw.runToken <- struct{}{} + go func() { + f() + <-pw.runToken + }() +} + +// Close implements TransferManager.Close(). +func (pw *poolWrapper) Close() { +} + +// Update the object with the contents of the io.Reader, modTime and size +// +// The new object may have been created if an error is returned +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + if o.accessTier == azblob.AccessTierArchive { + if o.fs.opt.ArchiveTierDelete { + fs.Debugf(o, "deleting archive tier blob before updating") + err = o.Remove(ctx) + if err != nil { + return errors.Wrap(err, "failed to delete archive blob before updating") + } + } else { + return errCantUpdateArchiveTierBlobs + } + } + container, _ := o.split() + err = o.fs.makeContainer(ctx, container) + if err != nil { + return err + } + + // Update Mod time + o.updateMetadataWithModTime(src.ModTime(ctx)) + if err != nil { + return err + } + + blob := o.getBlobReference() + httpHeaders := azblob.BlobHTTPHeaders{} + httpHeaders.ContentType = fs.MimeType(ctx, src) + + // Compute the Content-MD5 of the file. As we stream all uploads it + // will be set in PutBlockList API call using the 'x-ms-blob-content-md5' header + if !o.fs.opt.DisableCheckSum { + if sourceMD5, _ := src.Hash(ctx, hash.MD5); sourceMD5 != "" { + sourceMD5bytes, err := hex.DecodeString(sourceMD5) + if err == nil { + httpHeaders.ContentMD5 = sourceMD5bytes + } else { + fs.Debugf(o, "Failed to decode %q as MD5: %v", sourceMD5, err) + } + } + } + + putBlobOptions := azblob.UploadStreamToBlockBlobOptions{ + BufferSize: int(o.fs.opt.ChunkSize), + MaxBuffers: uploadConcurrency, + Metadata: o.meta, + BlobHTTPHeaders: httpHeaders, + TransferManager: o.fs.newPoolWrapper(uploadConcurrency), + } + + // Don't retry, return a retry error instead + err = o.fs.pacer.CallNoRetry(func() (bool, error) { + // Stream contents of the reader object to the given blob URL + blockBlobURL := blob.ToBlockBlobURL() + _, err = azblob.UploadStreamToBlockBlob(ctx, in, blockBlobURL, putBlobOptions) + return o.fs.shouldRetry(err) + }) + if err != nil { + return err + } + // Refresh metadata on object + o.clearMetaData() + err = o.readMetaData() + if err != nil { + return err + } + + // If tier is not changed or not specified, do not attempt to invoke `SetBlobTier` operation + if o.fs.opt.AccessTier == string(defaultAccessTier) || o.fs.opt.AccessTier == string(o.AccessTier()) { + return nil + } + + // Now, set blob tier based on configured access tier + return o.SetTier(o.fs.opt.AccessTier) +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) error { + blob := o.getBlobReference() + snapShotOptions := azblob.DeleteSnapshotsOptionNone + ac := azblob.BlobAccessConditions{} + return o.fs.pacer.Call(func() (bool, error) { + _, err := blob.Delete(ctx, snapShotOptions, ac) + return o.fs.shouldRetry(err) + }) +} + +// MimeType of an Object if known, "" otherwise +func (o *Object) MimeType(ctx context.Context) string { + return o.mimeType +} + +// AccessTier of an object, default is of type none +func (o *Object) AccessTier() azblob.AccessTierType { + return o.accessTier +} + +// SetTier performs changing object tier +func (o *Object) SetTier(tier string) error { + if !validateAccessTier(tier) { + return errors.Errorf("Tier %s not supported by Azure Blob Storage", tier) + } + + // Check if current tier already matches with desired tier + if o.GetTier() == tier { + return nil + } + desiredAccessTier := azblob.AccessTierType(tier) + blob := o.getBlobReference() + ctx := context.Background() + err := o.fs.pacer.Call(func() (bool, error) { + _, err := blob.SetTier(ctx, desiredAccessTier, azblob.LeaseAccessConditions{}) + return o.fs.shouldRetry(err) + }) + + if err != nil { + return errors.Wrap(err, "Failed to set Blob Tier") + } + + // Set access tier on local object also, this typically + // gets updated on get blob properties + o.accessTier = desiredAccessTier + fs.Debugf(o, "Successfully changed object tier to %s", tier) + + return nil +} + +// GetTier returns object tier in azure as string +func (o *Object) GetTier() string { + return string(o.accessTier) +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = &Fs{} + _ fs.Copier = &Fs{} + _ fs.PutStreamer = &Fs{} + _ fs.Purger = &Fs{} + _ fs.ListRer = &Fs{} + _ fs.Object = &Object{} + _ fs.MimeTyper = &Object{} + _ fs.GetTierer = &Object{} + _ fs.SetTierer = &Object{} +) diff --git a/vendor/github.com/rclone/rclone/backend/azureblob/azureblob_unsupported.go b/vendor/github.com/rclone/rclone/backend/azureblob/azureblob_unsupported.go new file mode 100644 index 00000000000..8a4a8255e34 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/azureblob/azureblob_unsupported.go @@ -0,0 +1,6 @@ +// Build for azureblob for unsupported platforms to stop go complaining +// about "no buildable Go source files " + +// +build plan9 solaris js !go1.14 + +package azureblob diff --git a/vendor/github.com/rclone/rclone/backend/azureblob/imds.go b/vendor/github.com/rclone/rclone/backend/azureblob/imds.go new file mode 100644 index 00000000000..b07981c3e42 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/azureblob/imds.go @@ -0,0 +1,137 @@ +// +build !plan9,!solaris,!js,go1.14 + +package azureblob + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/fshttp" +) + +const ( + azureResource = "https://storage.azure.com" + imdsAPIVersion = "2018-02-01" + msiEndpointDefault = "http://169.254.169.254/metadata/identity/oauth2/token" +) + +// This custom type is used to add the port the test server has bound to +// to the request context. +type testPortKey string + +type msiIdentifierType int + +const ( + msiClientID msiIdentifierType = iota + msiObjectID + msiResourceID +) + +type userMSI struct { + Type msiIdentifierType + Value string +} + +type httpError struct { + Response *http.Response +} + +func (e httpError) Error() string { + return fmt.Sprintf("HTTP error %v (%v)", e.Response.StatusCode, e.Response.Status) +} + +// GetMSIToken attempts to obtain an MSI token from the Azure Instance +// Metadata Service. +func GetMSIToken(ctx context.Context, identity *userMSI) (adal.Token, error) { + // Attempt to get an MSI token; silently continue if unsuccessful. + // This code has been lovingly stolen from azcopy's OAuthTokenManager. + result := adal.Token{} + req, err := http.NewRequestWithContext(ctx, "GET", msiEndpointDefault, nil) + if err != nil { + fs.Debugf(nil, "Failed to create request: %v", err) + return result, err + } + params := req.URL.Query() + params.Set("resource", azureResource) + params.Set("api-version", imdsAPIVersion) + + // Specify user-assigned identity if requested. + if identity != nil { + switch identity.Type { + case msiClientID: + params.Set("client_id", identity.Value) + case msiObjectID: + params.Set("object_id", identity.Value) + case msiResourceID: + params.Set("mi_res_id", identity.Value) + default: + // If this happens, the calling function and this one don't agree on + // what valid ID types exist. + return result, fmt.Errorf("unknown MSI identity type specified") + } + } + req.URL.RawQuery = params.Encode() + + // The Metadata header is required by all calls to IMDS. + req.Header.Set("Metadata", "true") + + // If this function is run in a test, query the test server instead of IMDS. + testPort, isTest := ctx.Value(testPortKey("testPort")).(int) + if isTest { + req.URL.Host = fmt.Sprintf("localhost:%d", testPort) + req.Host = req.URL.Host + } + + // Send request + httpClient := fshttp.NewClient(ctx) + resp, err := httpClient.Do(req) + if err != nil { + return result, errors.Wrap(err, "MSI is not enabled on this VM") + } + defer func() { // resp and Body should not be nil + _, err = io.Copy(ioutil.Discard, resp.Body) + if err != nil { + fs.Debugf(nil, "Unable to drain IMDS response: %v", err) + } + err = resp.Body.Close() + if err != nil { + fs.Debugf(nil, "Unable to close IMDS response: %v", err) + } + }() + // Check if the status code indicates success + // The request returns 200 currently, add 201 and 202 as well for possible extension. + switch resp.StatusCode { + case 200, 201, 202: + break + default: + body, _ := ioutil.ReadAll(resp.Body) + fs.Errorf(nil, "Couldn't obtain OAuth token from IMDS; server returned status code %d and body: %v", resp.StatusCode, string(body)) + return result, httpError{Response: resp} + } + + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return result, errors.Wrap(err, "Couldn't read IMDS response") + } + // Remove BOM, if any. azcopy does this so I'm following along. + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + + // This would be a good place to persist the token if a large number of rclone + // invocations are being made in a short amount of time. If the token is + // persisted, the azureblob code will need to check for expiry before every + // storage API call. + err = json.Unmarshal(b, &result) + if err != nil { + return result, errors.Wrap(err, "Couldn't unmarshal IMDS response") + } + + return result, nil +} diff --git a/vendor/github.com/rclone/rclone/backend/crypt/cipher.go b/vendor/github.com/rclone/rclone/backend/crypt/cipher.go new file mode 100644 index 00000000000..77dc34643e4 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/crypt/cipher.go @@ -0,0 +1,1050 @@ +package crypt + +import ( + "bytes" + "context" + "crypto/aes" + gocipher "crypto/cipher" + "crypto/rand" + "encoding/base32" + "fmt" + "io" + "strconv" + "strings" + "sync" + "unicode/utf8" + + "github.com/pkg/errors" + "github.com/rclone/rclone/backend/crypt/pkcs7" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rfjakob/eme" + "golang.org/x/crypto/nacl/secretbox" + "golang.org/x/crypto/scrypt" +) + +// Constants +const ( + nameCipherBlockSize = aes.BlockSize + fileMagic = "RCLONE\x00\x00" + fileMagicSize = len(fileMagic) + fileNonceSize = 24 + fileHeaderSize = fileMagicSize + fileNonceSize + blockHeaderSize = secretbox.Overhead + blockDataSize = 64 * 1024 + blockSize = blockHeaderSize + blockDataSize + encryptedSuffix = ".bin" // when file name encryption is off we add this suffix to make sure the cloud provider doesn't process the file +) + +// Errors returned by cipher +var ( + ErrorBadDecryptUTF8 = errors.New("bad decryption - utf-8 invalid") + ErrorBadDecryptControlChar = errors.New("bad decryption - contains control chars") + ErrorNotAMultipleOfBlocksize = errors.New("not a multiple of blocksize") + ErrorTooShortAfterDecode = errors.New("too short after base32 decode") + ErrorTooLongAfterDecode = errors.New("too long after base32 decode") + ErrorEncryptedFileTooShort = errors.New("file is too short to be encrypted") + ErrorEncryptedFileBadHeader = errors.New("file has truncated block header") + ErrorEncryptedBadMagic = errors.New("not an encrypted file - bad magic string") + ErrorEncryptedBadBlock = errors.New("failed to authenticate decrypted block - bad password?") + ErrorBadBase32Encoding = errors.New("bad base32 filename encoding") + ErrorFileClosed = errors.New("file already closed") + ErrorNotAnEncryptedFile = errors.New("not an encrypted file - no \"" + encryptedSuffix + "\" suffix") + ErrorBadSeek = errors.New("Seek beyond end of file") + defaultSalt = []byte{0xA8, 0x0D, 0xF4, 0x3A, 0x8F, 0xBD, 0x03, 0x08, 0xA7, 0xCA, 0xB8, 0x3E, 0x58, 0x1F, 0x86, 0xB1} + obfuscQuoteRune = '!' +) + +// Global variables +var ( + fileMagicBytes = []byte(fileMagic) +) + +// ReadSeekCloser is the interface of the read handles +type ReadSeekCloser interface { + io.Reader + io.Seeker + io.Closer + fs.RangeSeeker +} + +// OpenRangeSeek opens the file handle at the offset with the limit given +type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error) + +// NameEncryptionMode is the type of file name encryption in use +type NameEncryptionMode int + +// NameEncryptionMode levels +const ( + NameEncryptionOff NameEncryptionMode = iota + NameEncryptionStandard + NameEncryptionObfuscated +) + +// NewNameEncryptionMode turns a string into a NameEncryptionMode +func NewNameEncryptionMode(s string) (mode NameEncryptionMode, err error) { + s = strings.ToLower(s) + switch s { + case "off": + mode = NameEncryptionOff + case "standard": + mode = NameEncryptionStandard + case "obfuscate": + mode = NameEncryptionObfuscated + default: + err = errors.Errorf("Unknown file name encryption mode %q", s) + } + return mode, err +} + +// String turns mode into a human readable string +func (mode NameEncryptionMode) String() (out string) { + switch mode { + case NameEncryptionOff: + out = "off" + case NameEncryptionStandard: + out = "standard" + case NameEncryptionObfuscated: + out = "obfuscate" + default: + out = fmt.Sprintf("Unknown mode #%d", mode) + } + return out +} + +// Cipher defines an encoding and decoding cipher for the crypt backend +type Cipher struct { + dataKey [32]byte // Key for secretbox + nameKey [32]byte // 16,24 or 32 bytes + nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto + block gocipher.Block + mode NameEncryptionMode + buffers sync.Pool // encrypt/decrypt buffers + cryptoRand io.Reader // read crypto random numbers from here + dirNameEncrypt bool +} + +// newCipher initialises the cipher. If salt is "" then it uses a built in salt val +func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) { + c := &Cipher{ + mode: mode, + cryptoRand: rand.Reader, + dirNameEncrypt: dirNameEncrypt, + } + c.buffers.New = func() interface{} { + return make([]byte, blockSize) + } + err := c.Key(password, salt) + if err != nil { + return nil, err + } + return c, nil +} + +// Key creates all the internal keys from the password passed in using +// scrypt. +// +// If salt is "" we use a fixed salt just to make attackers lives +// slighty harder than using no salt. +// +// Note that empty password makes all 0x00 keys which is used in the +// tests. +func (c *Cipher) Key(password, salt string) (err error) { + const keySize = len(c.dataKey) + len(c.nameKey) + len(c.nameTweak) + var saltBytes = defaultSalt + if salt != "" { + saltBytes = []byte(salt) + } + var key []byte + if password == "" { + key = make([]byte, keySize) + } else { + key, err = scrypt.Key([]byte(password), saltBytes, 16384, 8, 1, keySize) + if err != nil { + return err + } + } + copy(c.dataKey[:], key) + copy(c.nameKey[:], key[len(c.dataKey):]) + copy(c.nameTweak[:], key[len(c.dataKey)+len(c.nameKey):]) + // Key the name cipher + c.block, err = aes.NewCipher(c.nameKey[:]) + return err +} + +// getBlock gets a block from the pool of size blockSize +func (c *Cipher) getBlock() []byte { + return c.buffers.Get().([]byte) +} + +// putBlock returns a block to the pool of size blockSize +func (c *Cipher) putBlock(buf []byte) { + if len(buf) != blockSize { + panic("bad blocksize returned to pool") + } + c.buffers.Put(buf) +} + +// encodeFileName encodes a filename using a modified version of +// standard base32 as described in RFC4648 +// +// The standard encoding is modified in two ways +// * it becomes lower case (no-one likes upper case filenames!) +// * we strip the padding character `=` +func encodeFileName(in []byte) string { + encoded := base32.HexEncoding.EncodeToString(in) + encoded = strings.TrimRight(encoded, "=") + return strings.ToLower(encoded) +} + +// decodeFileName decodes a filename as encoded by encodeFileName +func decodeFileName(in string) ([]byte, error) { + if strings.HasSuffix(in, "=") { + return nil, ErrorBadBase32Encoding + } + // First figure out how many padding characters to add + roundUpToMultipleOf8 := (len(in) + 7) &^ 7 + equals := roundUpToMultipleOf8 - len(in) + in = strings.ToUpper(in) + "========"[:equals] + return base32.HexEncoding.DecodeString(in) +} + +// encryptSegment encrypts a path segment +// +// This uses EME with AES +// +// EME (ECB-Mix-ECB) is a wide-block encryption mode presented in the +// 2003 paper "A Parallelizable Enciphering Mode" by Halevi and +// Rogaway. +// +// This makes for deterministic encryption which is what we want - the +// same filename must encrypt to the same thing. +// +// This means that +// * filenames with the same name will encrypt the same +// * filenames which start the same won't have a common prefix +func (c *Cipher) encryptSegment(plaintext string) string { + if plaintext == "" { + return "" + } + paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext)) + ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt) + return encodeFileName(ciphertext) +} + +// decryptSegment decrypts a path segment +func (c *Cipher) decryptSegment(ciphertext string) (string, error) { + if ciphertext == "" { + return "", nil + } + rawCiphertext, err := decodeFileName(ciphertext) + if err != nil { + return "", err + } + if len(rawCiphertext)%nameCipherBlockSize != 0 { + return "", ErrorNotAMultipleOfBlocksize + } + if len(rawCiphertext) == 0 { + // not possible if decodeFilename() working correctly + return "", ErrorTooShortAfterDecode + } + if len(rawCiphertext) > 2048 { + return "", ErrorTooLongAfterDecode + } + paddedPlaintext := eme.Transform(c.block, c.nameTweak[:], rawCiphertext, eme.DirectionDecrypt) + plaintext, err := pkcs7.Unpad(nameCipherBlockSize, paddedPlaintext) + if err != nil { + return "", err + } + return string(plaintext), err +} + +// Simple obfuscation routines +func (c *Cipher) obfuscateSegment(plaintext string) string { + if plaintext == "" { + return "" + } + + // If the string isn't valid UTF8 then don't rotate; just + // prepend a !. + if !utf8.ValidString(plaintext) { + return "!." + plaintext + } + + // Calculate a simple rotation based on the filename and + // the nameKey + var dir int + for _, runeValue := range plaintext { + dir += int(runeValue) + } + dir = dir % 256 + + // We'll use this number to store in the result filename... + var result bytes.Buffer + _, _ = result.WriteString(strconv.Itoa(dir) + ".") + + // but we'll augment it with the nameKey for real calculation + for i := 0; i < len(c.nameKey); i++ { + dir += int(c.nameKey[i]) + } + + // Now for each character, depending on the range it is in + // we will actually rotate a different amount + for _, runeValue := range plaintext { + switch { + case runeValue == obfuscQuoteRune: + // Quote the Quote character + _, _ = result.WriteRune(obfuscQuoteRune) + _, _ = result.WriteRune(obfuscQuoteRune) + + case runeValue >= '0' && runeValue <= '9': + // Number + thisdir := (dir % 9) + 1 + newRune := '0' + (int(runeValue)-'0'+thisdir)%10 + _, _ = result.WriteRune(rune(newRune)) + + case (runeValue >= 'A' && runeValue <= 'Z') || + (runeValue >= 'a' && runeValue <= 'z'): + // ASCII letter. Try to avoid trivial A->a mappings + thisdir := dir%25 + 1 + // Calculate the offset of this character in A-Za-z + pos := int(runeValue - 'A') + if pos >= 26 { + pos -= 6 // It's lower case + } + // Rotate the character to the new location + pos = (pos + thisdir) % 52 + if pos >= 26 { + pos += 6 // and handle lower case offset again + } + _, _ = result.WriteRune(rune('A' + pos)) + + case runeValue >= 0xA0 && runeValue <= 0xFF: + // Latin 1 supplement + thisdir := (dir % 95) + 1 + newRune := 0xA0 + (int(runeValue)-0xA0+thisdir)%96 + _, _ = result.WriteRune(rune(newRune)) + + case runeValue >= 0x100: + // Some random Unicode range; we have no good rules here + thisdir := (dir % 127) + 1 + base := int(runeValue - runeValue%256) + newRune := rune(base + (int(runeValue)-base+thisdir)%256) + // If the new character isn't a valid UTF8 char + // then don't rotate it. Quote it instead + if !utf8.ValidRune(newRune) { + _, _ = result.WriteRune(obfuscQuoteRune) + _, _ = result.WriteRune(runeValue) + } else { + _, _ = result.WriteRune(newRune) + } + + default: + // Leave character untouched + _, _ = result.WriteRune(runeValue) + } + } + return result.String() +} + +func (c *Cipher) deobfuscateSegment(ciphertext string) (string, error) { + if ciphertext == "" { + return "", nil + } + pos := strings.Index(ciphertext, ".") + if pos == -1 { + return "", ErrorNotAnEncryptedFile + } // No . + num := ciphertext[:pos] + if num == "!" { + // No rotation; probably original was not valid unicode + return ciphertext[pos+1:], nil + } + dir, err := strconv.Atoi(num) + if err != nil { + return "", ErrorNotAnEncryptedFile // Not a number + } + + // add the nameKey to get the real rotate distance + for i := 0; i < len(c.nameKey); i++ { + dir += int(c.nameKey[i]) + } + + var result bytes.Buffer + + inQuote := false + for _, runeValue := range ciphertext[pos+1:] { + switch { + case inQuote: + _, _ = result.WriteRune(runeValue) + inQuote = false + + case runeValue == obfuscQuoteRune: + inQuote = true + + case runeValue >= '0' && runeValue <= '9': + // Number + thisdir := (dir % 9) + 1 + newRune := '0' + int(runeValue) - '0' - thisdir + if newRune < '0' { + newRune += 10 + } + _, _ = result.WriteRune(rune(newRune)) + + case (runeValue >= 'A' && runeValue <= 'Z') || + (runeValue >= 'a' && runeValue <= 'z'): + thisdir := dir%25 + 1 + pos := int(runeValue - 'A') + if pos >= 26 { + pos -= 6 + } + pos = pos - thisdir + if pos < 0 { + pos += 52 + } + if pos >= 26 { + pos += 6 + } + _, _ = result.WriteRune(rune('A' + pos)) + + case runeValue >= 0xA0 && runeValue <= 0xFF: + thisdir := (dir % 95) + 1 + newRune := 0xA0 + int(runeValue) - 0xA0 - thisdir + if newRune < 0xA0 { + newRune += 96 + } + _, _ = result.WriteRune(rune(newRune)) + + case runeValue >= 0x100: + thisdir := (dir % 127) + 1 + base := int(runeValue - runeValue%256) + newRune := rune(base + (int(runeValue) - base - thisdir)) + if int(newRune) < base { + newRune += 256 + } + _, _ = result.WriteRune(newRune) + + default: + _, _ = result.WriteRune(runeValue) + + } + } + + return result.String(), nil +} + +// encryptFileName encrypts a file path +func (c *Cipher) encryptFileName(in string) string { + segments := strings.Split(in, "/") + for i := range segments { + // Skip directory name encryption if the user chose to + // leave them intact + if !c.dirNameEncrypt && i != (len(segments)-1) { + continue + } + if c.mode == NameEncryptionStandard { + segments[i] = c.encryptSegment(segments[i]) + } else { + segments[i] = c.obfuscateSegment(segments[i]) + } + } + return strings.Join(segments, "/") +} + +// EncryptFileName encrypts a file path +func (c *Cipher) EncryptFileName(in string) string { + if c.mode == NameEncryptionOff { + return in + encryptedSuffix + } + return c.encryptFileName(in) +} + +// EncryptDirName encrypts a directory path +func (c *Cipher) EncryptDirName(in string) string { + if c.mode == NameEncryptionOff || !c.dirNameEncrypt { + return in + } + return c.encryptFileName(in) +} + +// decryptFileName decrypts a file path +func (c *Cipher) decryptFileName(in string) (string, error) { + segments := strings.Split(in, "/") + for i := range segments { + var err error + // Skip directory name decryption if the user chose to + // leave them intact + if !c.dirNameEncrypt && i != (len(segments)-1) { + continue + } + if c.mode == NameEncryptionStandard { + segments[i], err = c.decryptSegment(segments[i]) + } else { + segments[i], err = c.deobfuscateSegment(segments[i]) + } + + if err != nil { + return "", err + } + } + return strings.Join(segments, "/"), nil +} + +// DecryptFileName decrypts a file path +func (c *Cipher) DecryptFileName(in string) (string, error) { + if c.mode == NameEncryptionOff { + remainingLength := len(in) - len(encryptedSuffix) + if remainingLength > 0 && strings.HasSuffix(in, encryptedSuffix) { + return in[:remainingLength], nil + } + return "", ErrorNotAnEncryptedFile + } + return c.decryptFileName(in) +} + +// DecryptDirName decrypts a directory path +func (c *Cipher) DecryptDirName(in string) (string, error) { + if c.mode == NameEncryptionOff || !c.dirNameEncrypt { + return in, nil + } + return c.decryptFileName(in) +} + +// NameEncryptionMode returns the encryption mode in use for names +func (c *Cipher) NameEncryptionMode() NameEncryptionMode { + return c.mode +} + +// nonce is an NACL secretbox nonce +type nonce [fileNonceSize]byte + +// pointer returns the nonce as a *[24]byte for secretbox +func (n *nonce) pointer() *[fileNonceSize]byte { + return (*[fileNonceSize]byte)(n) +} + +// fromReader fills the nonce from an io.Reader - normally the OSes +// crypto random number generator +func (n *nonce) fromReader(in io.Reader) error { + read, err := io.ReadFull(in, (*n)[:]) + if read != fileNonceSize { + return errors.Wrap(err, "short read of nonce") + } + return nil +} + +// fromBuf fills the nonce from the buffer passed in +func (n *nonce) fromBuf(buf []byte) { + read := copy((*n)[:], buf) + if read != fileNonceSize { + panic("buffer to short to read nonce") + } +} + +// carry 1 up the nonce from position i +func (n *nonce) carry(i int) { + for ; i < len(*n); i++ { + digit := (*n)[i] + newDigit := digit + 1 + (*n)[i] = newDigit + if newDigit >= digit { + // exit if no carry + break + } + } +} + +// increment to add 1 to the nonce +func (n *nonce) increment() { + n.carry(0) +} + +// add a uint64 to the nonce +func (n *nonce) add(x uint64) { + carry := uint16(0) + for i := 0; i < 8; i++ { + digit := (*n)[i] + xDigit := byte(x) + x >>= 8 + carry += uint16(digit) + uint16(xDigit) + (*n)[i] = byte(carry) + carry >>= 8 + } + if carry != 0 { + n.carry(8) + } +} + +// encrypter encrypts an io.Reader on the fly +type encrypter struct { + mu sync.Mutex + in io.Reader + c *Cipher + nonce nonce + buf []byte + readBuf []byte + bufIndex int + bufSize int + err error +} + +// newEncrypter creates a new file handle encrypting on the fly +func (c *Cipher) newEncrypter(in io.Reader, nonce *nonce) (*encrypter, error) { + fh := &encrypter{ + in: in, + c: c, + buf: c.getBlock(), + readBuf: c.getBlock(), + bufSize: fileHeaderSize, + } + // Initialise nonce + if nonce != nil { + fh.nonce = *nonce + } else { + err := fh.nonce.fromReader(c.cryptoRand) + if err != nil { + return nil, err + } + } + // Copy magic into buffer + copy(fh.buf, fileMagicBytes) + // Copy nonce into buffer + copy(fh.buf[fileMagicSize:], fh.nonce[:]) + return fh, nil +} + +// Read as per io.Reader +func (fh *encrypter) Read(p []byte) (n int, err error) { + fh.mu.Lock() + defer fh.mu.Unlock() + + if fh.err != nil { + return 0, fh.err + } + if fh.bufIndex >= fh.bufSize { + // Read data + // FIXME should overlap the reads with a go-routine and 2 buffers? + readBuf := fh.readBuf[:blockDataSize] + n, err = io.ReadFull(fh.in, readBuf) + if n == 0 { + // err can't be nil since: + // n == len(buf) if and only if err == nil. + return fh.finish(err) + } + // possibly err != nil here, but we will process the + // data and the next call to ReadFull will return 0, err + // Encrypt the block using the nonce + secretbox.Seal(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey) + fh.bufIndex = 0 + fh.bufSize = blockHeaderSize + n + fh.nonce.increment() + } + n = copy(p, fh.buf[fh.bufIndex:fh.bufSize]) + fh.bufIndex += n + return n, nil +} + +// finish sets the final error and tidies up +func (fh *encrypter) finish(err error) (int, error) { + if fh.err != nil { + return 0, fh.err + } + fh.err = err + fh.c.putBlock(fh.buf) + fh.buf = nil + fh.c.putBlock(fh.readBuf) + fh.readBuf = nil + return 0, err +} + +// Encrypt data encrypts the data stream +func (c *Cipher) encryptData(in io.Reader) (io.Reader, *encrypter, error) { + in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader + out, err := c.newEncrypter(in, nil) + if err != nil { + return nil, nil, err + } + return wrap(out), out, nil // and wrap the accounting back on +} + +// EncryptData encrypts the data stream +func (c *Cipher) EncryptData(in io.Reader) (io.Reader, error) { + out, _, err := c.encryptData(in) + return out, err +} + +// decrypter decrypts an io.ReaderCloser on the fly +type decrypter struct { + mu sync.Mutex + rc io.ReadCloser + nonce nonce + initialNonce nonce + c *Cipher + buf []byte + readBuf []byte + bufIndex int + bufSize int + err error + limit int64 // limit of bytes to read, -1 for unlimited + open OpenRangeSeek +} + +// newDecrypter creates a new file handle decrypting on the fly +func (c *Cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) { + fh := &decrypter{ + rc: rc, + c: c, + buf: c.getBlock(), + readBuf: c.getBlock(), + limit: -1, + } + // Read file header (magic + nonce) + readBuf := fh.readBuf[:fileHeaderSize] + _, err := io.ReadFull(fh.rc, readBuf) + if err == io.EOF || err == io.ErrUnexpectedEOF { + // This read from 0..fileHeaderSize-1 bytes + return nil, fh.finishAndClose(ErrorEncryptedFileTooShort) + } else if err != nil { + return nil, fh.finishAndClose(err) + } + // check the magic + if !bytes.Equal(readBuf[:fileMagicSize], fileMagicBytes) { + return nil, fh.finishAndClose(ErrorEncryptedBadMagic) + } + // retrieve the nonce + fh.nonce.fromBuf(readBuf[fileMagicSize:]) + fh.initialNonce = fh.nonce + return fh, nil +} + +// newDecrypterSeek creates a new file handle decrypting on the fly +func (c *Cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) { + var rc io.ReadCloser + doRangeSeek := false + setLimit := false + // Open initially with no seek + if offset == 0 && limit < 0 { + // If no offset or limit then open whole file + rc, err = open(ctx, 0, -1) + } else if offset == 0 { + // If no offset open the header + limit worth of the file + _, underlyingLimit, _, _ := calculateUnderlying(offset, limit) + rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit) + setLimit = true + } else { + // Otherwise just read the header to start with + rc, err = open(ctx, 0, int64(fileHeaderSize)) + doRangeSeek = true + } + if err != nil { + return nil, err + } + // Open the stream which fills in the nonce + fh, err = c.newDecrypter(rc) + if err != nil { + return nil, err + } + fh.open = open // will be called by fh.RangeSeek + if doRangeSeek { + _, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit) + if err != nil { + _ = fh.Close() + return nil, err + } + } + if setLimit { + fh.limit = limit + } + return fh, nil +} + +// read data into internal buffer - call with fh.mu held +func (fh *decrypter) fillBuffer() (err error) { + // FIXME should overlap the reads with a go-routine and 2 buffers? + readBuf := fh.readBuf + n, err := io.ReadFull(fh.rc, readBuf) + if n == 0 { + // err can't be nil since: + // n == len(buf) if and only if err == nil. + return err + } + // possibly err != nil here, but we will process the data and + // the next call to ReadFull will return 0, err + + // Check header + 1 byte exists + if n <= blockHeaderSize { + if err != nil { + return err // return pending error as it is likely more accurate + } + return ErrorEncryptedFileBadHeader + } + // Decrypt the block using the nonce + _, ok := secretbox.Open(fh.buf[:0], readBuf[:n], fh.nonce.pointer(), &fh.c.dataKey) + if !ok { + if err != nil { + return err // return pending error as it is likely more accurate + } + return ErrorEncryptedBadBlock + } + fh.bufIndex = 0 + fh.bufSize = n - blockHeaderSize + fh.nonce.increment() + return nil +} + +// Read as per io.Reader +func (fh *decrypter) Read(p []byte) (n int, err error) { + fh.mu.Lock() + defer fh.mu.Unlock() + + if fh.err != nil { + return 0, fh.err + } + if fh.bufIndex >= fh.bufSize { + err = fh.fillBuffer() + if err != nil { + return 0, fh.finish(err) + } + } + toCopy := fh.bufSize - fh.bufIndex + if fh.limit >= 0 && fh.limit < int64(toCopy) { + toCopy = int(fh.limit) + } + n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy]) + fh.bufIndex += n + if fh.limit >= 0 { + fh.limit -= int64(n) + if fh.limit == 0 { + return n, fh.finish(io.EOF) + } + } + return n, nil +} + +// calculateUnderlying converts an (offset, limit) in a crypted file +// into an (underlyingOffset, underlyingLimit) for the underlying +// file. +// +// It also returns number of bytes to discard after reading the first +// block and number of blocks this is from the start so the nonce can +// be incremented. +func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit, discard, blocks int64) { + // blocks we need to seek, plus bytes we need to discard + blocks, discard = offset/blockDataSize, offset%blockDataSize + + // Offset in underlying stream we need to seek + underlyingOffset = int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize) + + // work out how many blocks we need to read + underlyingLimit = int64(-1) + if limit >= 0 { + // bytes to read beyond the first block + bytesToRead := limit - (blockDataSize - discard) + + // Read the first block + blocksToRead := int64(1) + + if bytesToRead > 0 { + // Blocks that need to be read plus left over blocks + extraBlocksToRead, endBytes := bytesToRead/blockDataSize, bytesToRead%blockDataSize + if endBytes != 0 { + // If left over bytes must read another block + extraBlocksToRead++ + } + blocksToRead += extraBlocksToRead + } + + // Must read a whole number of blocks + underlyingLimit = blocksToRead * (blockHeaderSize + blockDataSize) + } + return +} + +// RangeSeek behaves like a call to Seek(offset int64, whence +// int) with the output wrapped in an io.LimitedReader +// limiting the total length to limit. +// +// RangeSeek with a limit of < 0 is equivalent to a regular Seek. +func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) { + fh.mu.Lock() + defer fh.mu.Unlock() + + if fh.open == nil { + return 0, fh.finish(errors.New("can't seek - not initialised with newDecrypterSeek")) + } + if whence != io.SeekStart { + return 0, fh.finish(errors.New("can only seek from the start")) + } + + // Reset error or return it if not EOF + if fh.err == io.EOF { + fh.unFinish() + } else if fh.err != nil { + return 0, fh.err + } + + underlyingOffset, underlyingLimit, discard, blocks := calculateUnderlying(offset, limit) + + // Move the nonce on the correct number of blocks from the start + fh.nonce = fh.initialNonce + fh.nonce.add(uint64(blocks)) + + // Can we seek underlying stream directly? + if do, ok := fh.rc.(fs.RangeSeeker); ok { + // Seek underlying stream directly + _, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit) + if err != nil { + return 0, fh.finish(err) + } + } else { + // if not reopen with seek + _ = fh.rc.Close() // close underlying file + fh.rc = nil + + // Re-open the underlying object with the offset given + rc, err := fh.open(ctx, underlyingOffset, underlyingLimit) + if err != nil { + return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit")) + } + + // Set the file handle + fh.rc = rc + } + + // Fill the buffer + err := fh.fillBuffer() + if err != nil { + return 0, fh.finish(err) + } + + // Discard bytes from the buffer + if int(discard) > fh.bufSize { + return 0, fh.finish(ErrorBadSeek) + } + fh.bufIndex = int(discard) + + // Set the limit + fh.limit = limit + + return offset, nil +} + +// Seek implements the io.Seeker interface +func (fh *decrypter) Seek(offset int64, whence int) (int64, error) { + return fh.RangeSeek(context.TODO(), offset, whence, -1) +} + +// finish sets the final error and tidies up +func (fh *decrypter) finish(err error) error { + if fh.err != nil { + return fh.err + } + fh.err = err + fh.c.putBlock(fh.buf) + fh.buf = nil + fh.c.putBlock(fh.readBuf) + fh.readBuf = nil + return err +} + +// unFinish undoes the effects of finish +func (fh *decrypter) unFinish() { + // Clear error + fh.err = nil + + // reinstate the buffers + fh.buf = fh.c.getBlock() + fh.readBuf = fh.c.getBlock() + + // Empty the buffer + fh.bufIndex = 0 + fh.bufSize = 0 +} + +// Close +func (fh *decrypter) Close() error { + fh.mu.Lock() + defer fh.mu.Unlock() + + // Check already closed + if fh.err == ErrorFileClosed { + return fh.err + } + // Closed before reading EOF so not finish()ed yet + if fh.err == nil { + _ = fh.finish(io.EOF) + } + // Show file now closed + fh.err = ErrorFileClosed + if fh.rc == nil { + return nil + } + return fh.rc.Close() +} + +// finishAndClose does finish then Close() +// +// Used when we are returning a nil fh from new +func (fh *decrypter) finishAndClose(err error) error { + _ = fh.finish(err) + _ = fh.Close() + return err +} + +// DecryptData decrypts the data stream +func (c *Cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) { + out, err := c.newDecrypter(rc) + if err != nil { + return nil, err + } + return out, nil +} + +// DecryptDataSeek decrypts the data stream from offset +// +// The open function must return a ReadCloser opened to the offset supplied +// +// You must use this form of DecryptData if you might want to Seek the file handle +func (c *Cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) { + out, err := c.newDecrypterSeek(ctx, open, offset, limit) + if err != nil { + return nil, err + } + return out, nil +} + +// EncryptedSize calculates the size of the data when encrypted +func (c *Cipher) EncryptedSize(size int64) int64 { + blocks, residue := size/blockDataSize, size%blockDataSize + encryptedSize := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize) + if residue != 0 { + encryptedSize += blockHeaderSize + residue + } + return encryptedSize +} + +// DecryptedSize calculates the size of the data when decrypted +func (c *Cipher) DecryptedSize(size int64) (int64, error) { + size -= int64(fileHeaderSize) + if size < 0 { + return 0, ErrorEncryptedFileTooShort + } + blocks, residue := size/blockSize, size%blockSize + decryptedSize := blocks * blockDataSize + if residue != 0 { + residue -= blockHeaderSize + if residue <= 0 { + return 0, ErrorEncryptedFileBadHeader + } + } + decryptedSize += residue + return decryptedSize, nil +} + +// check interfaces +var ( + _ io.ReadCloser = (*decrypter)(nil) + _ io.Seeker = (*decrypter)(nil) + _ fs.RangeSeeker = (*decrypter)(nil) + _ io.Reader = (*encrypter)(nil) +) diff --git a/vendor/github.com/rclone/rclone/backend/crypt/crypt.go b/vendor/github.com/rclone/rclone/backend/crypt/crypt.go new file mode 100644 index 00000000000..f3794e745f0 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/crypt/crypt.go @@ -0,0 +1,1045 @@ +// Package crypt provides wrappers for Fs and Object which implement encryption +package crypt + +import ( + "context" + "fmt" + "io" + "path" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/cache" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/fs/fspath" + "github.com/rclone/rclone/fs/hash" +) + +// Globals +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "crypt", + Description: "Encrypt/Decrypt a remote", + NewFs: NewFs, + CommandHelp: commandHelp, + Options: []fs.Option{{ + Name: "remote", + Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, e.g. \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).", + Required: true, + }, { + Name: "filename_encryption", + Help: "How to encrypt the filenames.", + Default: "standard", + Examples: []fs.OptionExample{ + { + Value: "standard", + Help: "Encrypt the filenames see the docs for the details.", + }, { + Value: "obfuscate", + Help: "Very simple filename obfuscation.", + }, { + Value: "off", + Help: "Don't encrypt the file names. Adds a \".bin\" extension only.", + }, + }, + }, { + Name: "directory_name_encryption", + Help: `Option to either encrypt directory names or leave them intact. + +NB If filename_encryption is "off" then this option will do nothing.`, + Default: true, + Examples: []fs.OptionExample{ + { + Value: "true", + Help: "Encrypt directory names.", + }, + { + Value: "false", + Help: "Don't encrypt directory names, leave them intact.", + }, + }, + }, { + Name: "password", + Help: "Password or pass phrase for encryption.", + IsPassword: true, + Required: true, + }, { + Name: "password2", + Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.", + IsPassword: true, + }, { + Name: "server_side_across_configs", + Default: false, + Help: `Allow server-side operations (e.g. copy) to work across different crypt configs. + +Normally this option is not what you want, but if you have two crypts +pointing to the same backend you can use it. + +This can be used, for example, to change file name encryption type +without re-uploading all the data. Just make two crypt backends +pointing to two different directories with the single changed +parameter and use rclone move to move the files between the crypt +remotes.`, + Advanced: true, + }, { + Name: "show_mapping", + Help: `For all files listed show how the names encrypt. + +If this flag is set then for each file that the remote is asked to +list, it will log (at level INFO) a line stating the decrypted file +name and the encrypted file name. + +This is so you can work out which encrypted names are which decrypted +names just in case you need to do something with the encrypted file +names, or for debugging purposes.`, + Default: false, + Hide: fs.OptionHideConfigurator, + Advanced: true, + }}, + }) +} + +// newCipherForConfig constructs a Cipher for the given config name +func newCipherForConfig(opt *Options) (*Cipher, error) { + mode, err := NewNameEncryptionMode(opt.FilenameEncryption) + if err != nil { + return nil, err + } + if opt.Password == "" { + return nil, errors.New("password not set in config file") + } + password, err := obscure.Reveal(opt.Password) + if err != nil { + return nil, errors.Wrap(err, "failed to decrypt password") + } + var salt string + if opt.Password2 != "" { + salt, err = obscure.Reveal(opt.Password2) + if err != nil { + return nil, errors.Wrap(err, "failed to decrypt password2") + } + } + cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption) + if err != nil { + return nil, errors.Wrap(err, "failed to make cipher") + } + return cipher, nil +} + +// NewCipher constructs a Cipher for the given config +func NewCipher(m configmap.Mapper) (*Cipher, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + return newCipherForConfig(opt) +} + +// NewFs constructs an Fs from the path, container:path +func NewFs(ctx context.Context, name, rpath string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + cipher, err := newCipherForConfig(opt) + if err != nil { + return nil, err + } + remote := opt.Remote + if strings.HasPrefix(remote, name+":") { + return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting") + } + // Make sure to remove trailing . referring to the current dir + if path.Base(rpath) == "." { + rpath = strings.TrimSuffix(rpath, ".") + } + // Look for a file first + var wrappedFs fs.Fs + if rpath == "" { + wrappedFs, err = cache.Get(ctx, remote) + } else { + remotePath := fspath.JoinRootPath(remote, cipher.EncryptFileName(rpath)) + wrappedFs, err = cache.Get(ctx, remotePath) + // if that didn't produce a file, look for a directory + if err != fs.ErrorIsFile { + remotePath = fspath.JoinRootPath(remote, cipher.EncryptDirName(rpath)) + wrappedFs, err = cache.Get(ctx, remotePath) + } + } + if err != fs.ErrorIsFile && err != nil { + return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remote) + } + f := &Fs{ + Fs: wrappedFs, + name: name, + root: rpath, + opt: *opt, + cipher: cipher, + } + cache.PinUntilFinalized(f.Fs, f) + // the features here are ones we could support, and they are + // ANDed with the ones from wrappedFs + f.features = (&fs.Features{ + CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff, + DuplicateFiles: true, + ReadMimeType: false, // MimeTypes not supported with crypt + WriteMimeType: false, + BucketBased: true, + CanHaveEmptyDirectories: true, + SetTier: true, + GetTier: true, + ServerSideAcrossConfigs: opt.ServerSideAcrossConfigs, + }).Fill(ctx, f).Mask(ctx, wrappedFs).WrapsFs(f, wrappedFs) + + return f, err +} + +// Options defines the configuration for this backend +type Options struct { + Remote string `config:"remote"` + FilenameEncryption string `config:"filename_encryption"` + DirectoryNameEncryption bool `config:"directory_name_encryption"` + Password string `config:"password"` + Password2 string `config:"password2"` + ServerSideAcrossConfigs bool `config:"server_side_across_configs"` + ShowMapping bool `config:"show_mapping"` +} + +// Fs represents a wrapped fs.Fs +type Fs struct { + fs.Fs + wrapper fs.Fs + name string + root string + opt Options + features *fs.Features // optional features + cipher *Cipher +} + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// String returns a description of the FS +func (f *Fs) String() string { + return fmt.Sprintf("Encrypted drive '%s:%s'", f.name, f.root) +} + +// Encrypt an object file name to entries. +func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) { + remote := obj.Remote() + decryptedRemote, err := f.cipher.DecryptFileName(remote) + if err != nil { + fs.Debugf(remote, "Skipping undecryptable file name: %v", err) + return + } + if f.opt.ShowMapping { + fs.Logf(decryptedRemote, "Encrypts to %q", remote) + } + *entries = append(*entries, f.newObject(obj)) +} + +// Encrypt a directory file name to entries. +func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) { + remote := dir.Remote() + decryptedRemote, err := f.cipher.DecryptDirName(remote) + if err != nil { + fs.Debugf(remote, "Skipping undecryptable dir name: %v", err) + return + } + if f.opt.ShowMapping { + fs.Logf(decryptedRemote, "Encrypts to %q", remote) + } + *entries = append(*entries, f.newDir(ctx, dir)) +} + +// Encrypt some directory entries. This alters entries returning it as newEntries. +func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) { + newEntries = entries[:0] // in place filter + for _, entry := range entries { + switch x := entry.(type) { + case fs.Object: + f.add(&newEntries, x) + case fs.Directory: + f.addDir(ctx, &newEntries, x) + default: + return nil, errors.Errorf("Unknown object type %T", entry) + } + } + return newEntries, nil +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir)) + if err != nil { + return nil, err + } + return f.encryptEntries(ctx, entries) +} + +// ListR lists the objects and directories of the Fs starting +// from dir recursively into out. +// +// dir should be "" to start from the root, and should not +// have trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +// +// It should call callback for each tranche of entries read. +// These need not be returned in any particular order. If +// callback returns an error then the listing will stop +// immediately. +// +// Don't implement this unless you have a more efficient way +// of listing recursively that doing a directory traversal. +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { + return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error { + newEntries, err := f.encryptEntries(ctx, entries) + if err != nil { + return err + } + return callback(newEntries) + }) +} + +// NewObject finds the Object at remote. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote)) + if err != nil { + return nil, err + } + return f.newObject(o), nil +} + +type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) + +// put implements Put or PutStream +func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) { + // Encrypt the data into wrappedIn + wrappedIn, encrypter, err := f.cipher.encryptData(in) + if err != nil { + return nil, err + } + + // Find a hash the destination supports to compute a hash of + // the encrypted data + ht := f.Fs.Hashes().GetOne() + var hasher *hash.MultiHasher + if ht != hash.None { + hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht)) + if err != nil { + return nil, err + } + // unwrap the accounting + var wrap accounting.WrapFn + wrappedIn, wrap = accounting.UnWrap(wrappedIn) + // add the hasher + wrappedIn = io.TeeReader(wrappedIn, hasher) + // wrap the accounting back on + wrappedIn = wrap(wrappedIn) + } + + // Transfer the data + o, err := put(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce), options...) + if err != nil { + return nil, err + } + + // Check the hashes of the encrypted data if we were comparing them + if ht != hash.None && hasher != nil { + srcHash := hasher.Sums()[ht] + var dstHash string + dstHash, err = o.Hash(ctx, ht) + if err != nil { + return nil, errors.Wrap(err, "failed to read destination hash") + } + if srcHash != "" && dstHash != "" && srcHash != dstHash { + // remove object + err = o.Remove(ctx) + if err != nil { + fs.Errorf(o, "Failed to remove corrupted object: %v", err) + } + return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash) + } + } + + return f.newObject(o), nil +} + +// Put in to the remote path with the modTime given of the given size +// +// May create the object even if it returns an error - if so +// will return the object and the error, otherwise will return +// nil and the error +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.put(ctx, in, src, options, f.Fs.Put) +} + +// PutStream uploads to the remote path with the modTime given of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.put(ctx, in, src, options, f.Fs.Features().PutStream) +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.None) +} + +// Mkdir makes the directory (container, bucket) +// +// Shouldn't return an error if it already exists +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir)) +} + +// Rmdir removes the directory (container, bucket) if empty +// +// Return an error if it doesn't exist or isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir)) +} + +// Purge all files in the directory specified +// +// Implement this if you have a way of deleting all the files +// quicker than just running Remove() on the result of List() +// +// Return an error if it doesn't exist +func (f *Fs) Purge(ctx context.Context, dir string) error { + do := f.Fs.Features().Purge + if do == nil { + return fs.ErrorCantPurge + } + return do(ctx, f.cipher.EncryptDirName(dir)) +} + +// Copy src to this remote using server-side copy operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + do := f.Fs.Features().Copy + if do == nil { + return nil, fs.ErrorCantCopy + } + o, ok := src.(*Object) + if !ok { + return nil, fs.ErrorCantCopy + } + oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) + if err != nil { + return nil, err + } + return f.newObject(oResult), nil +} + +// Move src to this remote using server-side move operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + do := f.Fs.Features().Move + if do == nil { + return nil, fs.ErrorCantMove + } + o, ok := src.(*Object) + if !ok { + return nil, fs.ErrorCantMove + } + oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote)) + if err != nil { + return nil, err + } + return f.newObject(oResult), nil +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server-side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + do := f.Fs.Features().DirMove + if do == nil { + return fs.ErrorCantDirMove + } + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote)) +} + +// PutUnchecked uploads the object +// +// This will create a duplicate if we upload a new file without +// checking to see if there is one already - use Put() for that. +func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + do := f.Fs.Features().PutUnchecked + if do == nil { + return nil, errors.New("can't PutUnchecked") + } + wrappedIn, encrypter, err := f.cipher.encryptData(in) + if err != nil { + return nil, err + } + o, err := do(ctx, wrappedIn, f.newObjectInfo(src, encrypter.nonce)) + if err != nil { + return nil, err + } + return f.newObject(o), nil +} + +// CleanUp the trash in the Fs +// +// Implement this if you have a way of emptying the trash or +// otherwise cleaning up old versions of files. +func (f *Fs) CleanUp(ctx context.Context) error { + do := f.Fs.Features().CleanUp + if do == nil { + return errors.New("can't CleanUp") + } + return do(ctx) +} + +// About gets quota information from the Fs +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { + do := f.Fs.Features().About + if do == nil { + return nil, errors.New("About not supported") + } + return do(ctx) +} + +// UnWrap returns the Fs that this Fs is wrapping +func (f *Fs) UnWrap() fs.Fs { + return f.Fs +} + +// WrapFs returns the Fs that is wrapping this Fs +func (f *Fs) WrapFs() fs.Fs { + return f.wrapper +} + +// SetWrapper sets the Fs that is wrapping this Fs +func (f *Fs) SetWrapper(wrapper fs.Fs) { + f.wrapper = wrapper +} + +// EncryptFileName returns an encrypted file name +func (f *Fs) EncryptFileName(fileName string) string { + return f.cipher.EncryptFileName(fileName) +} + +// DecryptFileName returns a decrypted file name +func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) { + return f.cipher.DecryptFileName(encryptedFileName) +} + +// computeHashWithNonce takes the nonce and encrypts the contents of +// src with it, and calculates the hash given by HashType on the fly +// +// Note that we break lots of encapsulation in this function. +func (f *Fs) computeHashWithNonce(ctx context.Context, nonce nonce, src fs.Object, hashType hash.Type) (hashStr string, err error) { + // Open the src for input + in, err := src.Open(ctx) + if err != nil { + return "", errors.Wrap(err, "failed to open src") + } + defer fs.CheckClose(in, &err) + + // Now encrypt the src with the nonce + out, err := f.cipher.newEncrypter(in, &nonce) + if err != nil { + return "", errors.Wrap(err, "failed to make encrypter") + } + + // pipe into hash + m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType)) + if err != nil { + return "", errors.Wrap(err, "failed to make hasher") + } + _, err = io.Copy(m, out) + if err != nil { + return "", errors.Wrap(err, "failed to hash data") + } + + return m.Sums()[hashType], nil +} + +// ComputeHash takes the nonce from o, and encrypts the contents of +// src with it, and calculates the hash given by HashType on the fly +// +// Note that we break lots of encapsulation in this function. +func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) { + // Read the nonce - opening the file is sufficient to read the nonce in + // use a limited read so we only read the header + in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1}) + if err != nil { + return "", errors.Wrap(err, "failed to open object to read nonce") + } + d, err := f.cipher.newDecrypter(in) + if err != nil { + _ = in.Close() + return "", errors.Wrap(err, "failed to open object to read nonce") + } + nonce := d.nonce + // fs.Debugf(o, "Read nonce % 2x", nonce) + + // Check nonce isn't all zeros + isZero := true + for i := range nonce { + if nonce[i] != 0 { + isZero = false + } + } + if isZero { + fs.Errorf(o, "empty nonce read") + } + + // Close d (and hence in) once we have read the nonce + err = d.Close() + if err != nil { + return "", errors.Wrap(err, "failed to close nonce read") + } + + return f.computeHashWithNonce(ctx, nonce, src, hashType) +} + +// MergeDirs merges the contents of all the directories passed +// in into the first one and rmdirs the other directories. +func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error { + do := f.Fs.Features().MergeDirs + if do == nil { + return errors.New("MergeDirs not supported") + } + out := make([]fs.Directory, len(dirs)) + for i, dir := range dirs { + out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote())) + } + return do(ctx, out) +} + +// DirCacheFlush resets the directory cache - used in testing +// as an optional interface +func (f *Fs) DirCacheFlush() { + do := f.Fs.Features().DirCacheFlush + if do != nil { + do() + } +} + +// PublicLink generates a public link to the remote path (usually readable by anyone) +func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (string, error) { + do := f.Fs.Features().PublicLink + if do == nil { + return "", errors.New("PublicLink not supported") + } + o, err := f.NewObject(ctx, remote) + if err != nil { + // assume it is a directory + return do(ctx, f.cipher.EncryptDirName(remote), expire, unlink) + } + return do(ctx, o.(*Object).Object.Remote(), expire, unlink) +} + +// ChangeNotify calls the passed function with a path +// that has had changes. If the implementation +// uses polling, it should adhere to the given interval. +func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) { + do := f.Fs.Features().ChangeNotify + if do == nil { + return + } + wrappedNotifyFunc := func(path string, entryType fs.EntryType) { + // fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType) + var ( + err error + decrypted string + ) + switch entryType { + case fs.EntryDirectory: + decrypted, err = f.cipher.DecryptDirName(path) + case fs.EntryObject: + decrypted, err = f.cipher.DecryptFileName(path) + default: + fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType) + return + } + if err != nil { + fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err) + return + } + notifyFunc(decrypted, entryType) + } + do(ctx, wrappedNotifyFunc, pollIntervalChan) +} + +var commandHelp = []fs.CommandHelp{ + { + Name: "encode", + Short: "Encode the given filename(s)", + Long: `This encodes the filenames given as arguments returning a list of +strings of the encoded results. + +Usage Example: + + rclone backend encode crypt: file1 [file2...] + rclone rc backend/command command=encode fs=crypt: file1 [file2...] +`, + }, + { + Name: "decode", + Short: "Decode the given filename(s)", + Long: `This decodes the filenames given as arguments returning a list of +strings of the decoded results. It will return an error if any of the +inputs are invalid. + +Usage Example: + + rclone backend decode crypt: encryptedfile1 [encryptedfile2...] + rclone rc backend/command command=decode fs=crypt: encryptedfile1 [encryptedfile2...] +`, + }, +} + +// Command the backend to run a named command +// +// The command run is name +// args may be used to read arguments from +// opts may be used to read optional arguments from +// +// The result should be capable of being JSON encoded +// If it is a string or a []string it will be shown to the user +// otherwise it will be JSON encoded and shown to the user like that +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { + switch name { + case "decode": + out := make([]string, 0, len(arg)) + for _, encryptedFileName := range arg { + fileName, err := f.DecryptFileName(encryptedFileName) + if err != nil { + return out, errors.Wrap(err, fmt.Sprintf("Failed to decrypt : %s", encryptedFileName)) + } + out = append(out, fileName) + } + return out, nil + case "encode": + out := make([]string, 0, len(arg)) + for _, fileName := range arg { + encryptedFileName := f.EncryptFileName(fileName) + out = append(out, encryptedFileName) + } + return out, nil + default: + return nil, fs.ErrorCommandNotFound + } +} + +// Object describes a wrapped for being read from the Fs +// +// This decrypts the remote name and decrypts the data +type Object struct { + fs.Object + f *Fs +} + +func (f *Fs) newObject(o fs.Object) *Object { + return &Object{ + Object: o, + f: f, + } +} + +// Fs returns read only access to the Fs that this object is part of +func (o *Object) Fs() fs.Info { + return o.f +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.Remote() +} + +// Remote returns the remote path +func (o *Object) Remote() string { + remote := o.Object.Remote() + decryptedName, err := o.f.cipher.DecryptFileName(remote) + if err != nil { + fs.Debugf(remote, "Undecryptable file name: %v", err) + return remote + } + return decryptedName +} + +// Size returns the size of the file +func (o *Object) Size() int64 { + size, err := o.f.cipher.DecryptedSize(o.Object.Size()) + if err != nil { + fs.Debugf(o, "Bad size for decrypt: %v", err) + } + return size +} + +// Hash returns the selected checksum of the file +// If no checksum is available it returns "" +func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) { + return "", hash.ErrUnsupported +} + +// UnWrap returns the wrapped Object +func (o *Object) UnWrap() fs.Object { + return o.Object +} + +// Open opens the file for read. Call Close() on the returned io.ReadCloser +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) { + var openOptions []fs.OpenOption + var offset, limit int64 = 0, -1 + for _, option := range options { + switch x := option.(type) { + case *fs.SeekOption: + offset = x.Offset + case *fs.RangeOption: + offset, limit = x.Decode(o.Size()) + default: + // pass on Options to underlying open if appropriate + openOptions = append(openOptions, option) + } + } + rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) { + if underlyingOffset == 0 && underlyingLimit < 0 { + // Open with no seek + return o.Object.Open(ctx, openOptions...) + } + // Open stream with a range of underlyingOffset, underlyingLimit + end := int64(-1) + if underlyingLimit >= 0 { + end = underlyingOffset + underlyingLimit - 1 + if end >= o.Object.Size() { + end = -1 + } + } + newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end}) + return o.Object.Open(ctx, newOpenOptions...) + }, offset, limit) + if err != nil { + return nil, err + } + return rc, nil +} + +// Update in to the object with the modTime given of the given size +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return o.Object, o.Object.Update(ctx, in, src, options...) + } + _, err := o.f.put(ctx, in, src, options, update) + return err +} + +// newDir returns a dir with the Name decrypted +func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory { + newDir := fs.NewDirCopy(ctx, dir) + remote := dir.Remote() + decryptedRemote, err := f.cipher.DecryptDirName(remote) + if err != nil { + fs.Debugf(remote, "Undecryptable dir name: %v", err) + } else { + newDir.SetRemote(decryptedRemote) + } + return newDir +} + +// UserInfo returns info about the connected user +func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) { + do := f.Fs.Features().UserInfo + if do == nil { + return nil, fs.ErrorNotImplemented + } + return do(ctx) +} + +// Disconnect the current user +func (f *Fs) Disconnect(ctx context.Context) error { + do := f.Fs.Features().Disconnect + if do == nil { + return fs.ErrorNotImplemented + } + return do(ctx) +} + +// Shutdown the backend, closing any background tasks and any +// cached connections. +func (f *Fs) Shutdown(ctx context.Context) error { + do := f.Fs.Features().Shutdown + if do == nil { + return nil + } + return do(ctx) +} + +// ObjectInfo describes a wrapped fs.ObjectInfo for being the source +// +// This encrypts the remote name and adjusts the size +type ObjectInfo struct { + fs.ObjectInfo + f *Fs + nonce nonce +} + +func (f *Fs) newObjectInfo(src fs.ObjectInfo, nonce nonce) *ObjectInfo { + return &ObjectInfo{ + ObjectInfo: src, + f: f, + nonce: nonce, + } +} + +// Fs returns read only access to the Fs that this object is part of +func (o *ObjectInfo) Fs() fs.Info { + return o.f +} + +// Remote returns the remote path +func (o *ObjectInfo) Remote() string { + return o.f.cipher.EncryptFileName(o.ObjectInfo.Remote()) +} + +// Size returns the size of the file +func (o *ObjectInfo) Size() int64 { + size := o.ObjectInfo.Size() + if size < 0 { + return size + } + return o.f.cipher.EncryptedSize(size) +} + +// Hash returns the selected checksum of the file +// If no checksum is available it returns "" +func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) { + var srcObj fs.Object + var ok bool + // Get the underlying object if there is one + if srcObj, ok = o.ObjectInfo.(fs.Object); ok { + // Prefer direct interface assertion + } else if do, ok := o.ObjectInfo.(fs.ObjectUnWrapper); ok { + // Otherwise likely is an operations.OverrideRemote + srcObj = do.UnWrap() + } else { + return "", nil + } + // if this is wrapping a local object then we work out the hash + if srcObj.Fs().Features().IsLocal { + // Read the data and encrypt it to calculate the hash + fs.Debugf(o, "Computing %v hash of encrypted source", hash) + return o.f.computeHashWithNonce(ctx, o.nonce, srcObj, hash) + } + return "", nil +} + +// ID returns the ID of the Object if known, or "" if not +func (o *Object) ID() string { + do, ok := o.Object.(fs.IDer) + if !ok { + return "" + } + return do.ID() +} + +// SetTier performs changing storage tier of the Object if +// multiple storage classes supported +func (o *Object) SetTier(tier string) error { + do, ok := o.Object.(fs.SetTierer) + if !ok { + return errors.New("crypt: underlying remote does not support SetTier") + } + return do.SetTier(tier) +} + +// GetTier returns storage tier or class of the Object +func (o *Object) GetTier() string { + do, ok := o.Object.(fs.GetTierer) + if !ok { + return "" + } + return do.GetTier() +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = (*Fs)(nil) + _ fs.Purger = (*Fs)(nil) + _ fs.Copier = (*Fs)(nil) + _ fs.Mover = (*Fs)(nil) + _ fs.DirMover = (*Fs)(nil) + _ fs.Commander = (*Fs)(nil) + _ fs.PutUncheckeder = (*Fs)(nil) + _ fs.PutStreamer = (*Fs)(nil) + _ fs.CleanUpper = (*Fs)(nil) + _ fs.UnWrapper = (*Fs)(nil) + _ fs.ListRer = (*Fs)(nil) + _ fs.Abouter = (*Fs)(nil) + _ fs.Wrapper = (*Fs)(nil) + _ fs.MergeDirser = (*Fs)(nil) + _ fs.DirCacheFlusher = (*Fs)(nil) + _ fs.ChangeNotifier = (*Fs)(nil) + _ fs.PublicLinker = (*Fs)(nil) + _ fs.UserInfoer = (*Fs)(nil) + _ fs.Disconnecter = (*Fs)(nil) + _ fs.Shutdowner = (*Fs)(nil) + _ fs.ObjectInfo = (*ObjectInfo)(nil) + _ fs.Object = (*Object)(nil) + _ fs.ObjectUnWrapper = (*Object)(nil) + _ fs.IDer = (*Object)(nil) + _ fs.SetTierer = (*Object)(nil) + _ fs.GetTierer = (*Object)(nil) +) diff --git a/vendor/github.com/rclone/rclone/backend/crypt/pkcs7/pkcs7.go b/vendor/github.com/rclone/rclone/backend/crypt/pkcs7/pkcs7.go new file mode 100644 index 00000000000..e6d9d0fd90a --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/crypt/pkcs7/pkcs7.go @@ -0,0 +1,63 @@ +// Package pkcs7 implements PKCS#7 padding +// +// This is a standard way of encoding variable length buffers into +// buffers which are a multiple of an underlying crypto block size. +package pkcs7 + +import "github.com/pkg/errors" + +// Errors Unpad can return +var ( + ErrorPaddingNotFound = errors.New("Bad PKCS#7 padding - not padded") + ErrorPaddingNotAMultiple = errors.New("Bad PKCS#7 padding - not a multiple of blocksize") + ErrorPaddingTooLong = errors.New("Bad PKCS#7 padding - too long") + ErrorPaddingTooShort = errors.New("Bad PKCS#7 padding - too short") + ErrorPaddingNotAllTheSame = errors.New("Bad PKCS#7 padding - not all the same") +) + +// Pad buf using PKCS#7 to a multiple of n. +// +// Appends the padding to buf - make a copy of it first if you don't +// want it modified. +func Pad(n int, buf []byte) []byte { + if n <= 1 || n >= 256 { + panic("bad multiple") + } + length := len(buf) + padding := n - (length % n) + for i := 0; i < padding; i++ { + buf = append(buf, byte(padding)) + } + if (len(buf) % n) != 0 { + panic("padding failed") + } + return buf +} + +// Unpad buf using PKCS#7 from a multiple of n returning a slice of +// buf or an error if malformed. +func Unpad(n int, buf []byte) ([]byte, error) { + if n <= 1 || n >= 256 { + panic("bad multiple") + } + length := len(buf) + if length == 0 { + return nil, ErrorPaddingNotFound + } + if (length % n) != 0 { + return nil, ErrorPaddingNotAMultiple + } + padding := int(buf[length-1]) + if padding > n { + return nil, ErrorPaddingTooLong + } + if padding == 0 { + return nil, ErrorPaddingTooShort + } + for i := 0; i < padding; i++ { + if buf[length-1-i] != byte(padding) { + return nil, ErrorPaddingNotAllTheSame + } + } + return buf[:length-padding], nil +} diff --git a/vendor/github.com/rclone/rclone/backend/googlecloudstorage/googlecloudstorage.go b/vendor/github.com/rclone/rclone/backend/googlecloudstorage/googlecloudstorage.go new file mode 100644 index 00000000000..e96e4f5d9ad --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/googlecloudstorage/googlecloudstorage.go @@ -0,0 +1,1212 @@ +// Package googlecloudstorage provides an interface to Google Cloud Storage +package googlecloudstorage + +/* +Notes + +Can't set Updated but can set Metadata on object creation + +Patch needs full_control not just read_write + +FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error +- https://code.google.com/p/google-api-go-client/issues/detail?id=64 +*/ + +import ( + "context" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "path" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/walk" + "github.com/rclone/rclone/lib/bucket" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/env" + "github.com/rclone/rclone/lib/oauthutil" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/pool" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/googleapi" + + // NOTE: This API is deprecated + storage "google.golang.org/api/storage/v1" +) + +const ( + rcloneClientID = "202264815644.apps.googleusercontent.com" + rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw" + timeFormatIn = time.RFC3339 + timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00" + metaMtime = "mtime" // key to store mtime under in metadata + listChunks = 1000 // chunk size to read directory listings + minSleep = 10 * time.Millisecond + + memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long + memoryPoolUseMmap = false +) + +var ( + // Description of how to auth for this app + storageConfig = &oauth2.Config{ + Scopes: []string{storage.DevstorageReadWriteScope}, + Endpoint: google.Endpoint, + ClientID: rcloneClientID, + ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret), + RedirectURL: oauthutil.TitleBarRedirectURL, + } +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "google cloud storage", + Prefix: "gcs", + Description: "Google Cloud Storage (this is not Google Drive)", + NewFs: NewFs, + Config: func(ctx context.Context, name string, m configmap.Mapper) { + saFile, _ := m.Get("service_account_file") + saCreds, _ := m.Get("service_account_credentials") + anonymous, _ := m.Get("anonymous") + if saFile != "" || saCreds != "" || anonymous == "true" { + return + } + err := oauthutil.Config(ctx, "google cloud storage", name, m, storageConfig, nil) + if err != nil { + log.Fatalf("Failed to configure token: %v", err) + } + }, + Options: append(oauthutil.SharedOptions, []fs.Option{{ + Name: "project_number", + Help: "Project number.\nOptional - needed only for list/create/delete buckets - see your developer console.", + }, { + Name: "service_account_file", + Help: "Service Account Credentials JSON file path\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login." + env.ShellExpandHelp, + }, { + Name: "service_account_credentials", + Help: "Service Account Credentials JSON blob\nLeave blank normally.\nNeeded only if you want use SA instead of interactive login.", + Hide: fs.OptionHideBoth, + }, { + Name: "anonymous", + Help: "Access public buckets and objects without credentials\nSet to 'true' if you just want to download files and don't configure credentials.", + Default: false, + }, { + Name: "object_acl", + Help: "Access Control List for new objects.", + Examples: []fs.OptionExample{{ + Value: "authenticatedRead", + Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.", + }, { + Value: "bucketOwnerFullControl", + Help: "Object owner gets OWNER access, and project team owners get OWNER access.", + }, { + Value: "bucketOwnerRead", + Help: "Object owner gets OWNER access, and project team owners get READER access.", + }, { + Value: "private", + Help: "Object owner gets OWNER access [default if left blank].", + }, { + Value: "projectPrivate", + Help: "Object owner gets OWNER access, and project team members get access according to their roles.", + }, { + Value: "publicRead", + Help: "Object owner gets OWNER access, and all Users get READER access.", + }}, + }, { + Name: "bucket_acl", + Help: "Access Control List for new buckets.", + Examples: []fs.OptionExample{{ + Value: "authenticatedRead", + Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.", + }, { + Value: "private", + Help: "Project team owners get OWNER access [default if left blank].", + }, { + Value: "projectPrivate", + Help: "Project team members get access according to their roles.", + }, { + Value: "publicRead", + Help: "Project team owners get OWNER access, and all Users get READER access.", + }, { + Value: "publicReadWrite", + Help: "Project team owners get OWNER access, and all Users get WRITER access.", + }}, + }, { + Name: "bucket_policy_only", + Help: `Access checks should use bucket-level IAM policies. + +If you want to upload objects to a bucket with Bucket Policy Only set +then you will need to set this. + +When it is set, rclone: + +- ignores ACLs set on buckets +- ignores ACLs set on objects +- creates buckets with Bucket Policy Only set + +Docs: https://cloud.google.com/storage/docs/bucket-policy-only +`, + Default: false, + }, { + Name: "location", + Help: "Location for the newly created buckets.", + Examples: []fs.OptionExample{{ + Value: "", + Help: "Empty for default location (US).", + }, { + Value: "asia", + Help: "Multi-regional location for Asia.", + }, { + Value: "eu", + Help: "Multi-regional location for Europe.", + }, { + Value: "us", + Help: "Multi-regional location for United States.", + }, { + Value: "asia-east1", + Help: "Taiwan.", + }, { + Value: "asia-east2", + Help: "Hong Kong.", + }, { + Value: "asia-northeast1", + Help: "Tokyo.", + }, { + Value: "asia-south1", + Help: "Mumbai.", + }, { + Value: "asia-southeast1", + Help: "Singapore.", + }, { + Value: "australia-southeast1", + Help: "Sydney.", + }, { + Value: "europe-north1", + Help: "Finland.", + }, { + Value: "europe-west1", + Help: "Belgium.", + }, { + Value: "europe-west2", + Help: "London.", + }, { + Value: "europe-west3", + Help: "Frankfurt.", + }, { + Value: "europe-west4", + Help: "Netherlands.", + }, { + Value: "us-central1", + Help: "Iowa.", + }, { + Value: "us-east1", + Help: "South Carolina.", + }, { + Value: "us-east4", + Help: "Northern Virginia.", + }, { + Value: "us-west1", + Help: "Oregon.", + }, { + Value: "us-west2", + Help: "California.", + }}, + }, { + Name: "storage_class", + Help: "The storage class to use when storing objects in Google Cloud Storage.", + Examples: []fs.OptionExample{{ + Value: "", + Help: "Default", + }, { + Value: "MULTI_REGIONAL", + Help: "Multi-regional storage class", + }, { + Value: "REGIONAL", + Help: "Regional storage class", + }, { + Value: "NEARLINE", + Help: "Nearline storage class", + }, { + Value: "COLDLINE", + Help: "Coldline storage class", + }, { + Value: "ARCHIVE", + Help: "Archive storage class", + }, { + Value: "DURABLE_REDUCED_AVAILABILITY", + Help: "Durable reduced availability storage class", + }}, + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: (encoder.Base | + encoder.EncodeCrLf | + encoder.EncodeInvalidUtf8), + }, { + Name: "memory_pool_flush_time", + Default: memoryPoolFlushTime, + Advanced: true, + Help: `How often internal memory buffer pools will be flushed. +Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. +This option controls how often unused buffers will be removed from the pool.`, + }, { + Name: "memory_pool_use_mmap", + Default: memoryPoolUseMmap, + Advanced: true, + Help: `Whether to use mmap buffers in internal memory pool.`, + }, { + Name: "chunk_size", + Help: `Chunk size to use for uploading. + +When uploading large files or files with unknown +size (eg from "rclone rcat" or uploaded with "rclone mount" or google +photos or google docs) they will be uploaded as multi chunk uploads +using this chunk size. + +Files which contains fewer than size bytes will be uploaded in a single request. +Files which contains size bytes or more will be uploaded in separate chunks. +If size is zero, media will be uploaded in a single request. +`, + Default: googleapi.DefaultUploadChunkSize, + Advanced: true, + }, { + Name: "list_chunk", + Help: `How many items are returned in one chunk during directory listing`, + Advanced: true, + Default: listChunks, + }, { + Name: "allow_create_bucket", + Help: `Whether to create bucket if it doesn't exists. +If bucket doesn't exists, error will be returned.'`, + Default: true, + }}...), + }) +} + +// Options defines the configuration for this backend +type Options struct { + ProjectNumber string `config:"project_number"` + ServiceAccountFile string `config:"service_account_file"` + ServiceAccountCredentials string `config:"service_account_credentials"` + Anonymous bool `config:"anonymous"` + ObjectACL string `config:"object_acl"` + BucketACL string `config:"bucket_acl"` + BucketPolicyOnly bool `config:"bucket_policy_only"` + Location string `config:"location"` + StorageClass string `config:"storage_class"` + Enc encoder.MultiEncoder `config:"encoding"` + MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"` + MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + AllowCreateBucket bool `config:"allow_create_bucket"` +} + +// Fs represents a remote storage server +type Fs struct { + name string // name of this remote + root string // the path we are working on if any + opt Options // parsed options + features *fs.Features // optional features + svc *storage.Service // the connection to the storage server + client *http.Client // authorized client + rootBucket string // bucket part of root (if any) + rootDirectory string // directory part of root (if any) + cache *bucket.Cache // cache of bucket status + pacer *fs.Pacer // To pace the API calls + pool *pool.Pool +} + +// Object describes a storage object +// +// Will definitely have info but maybe not meta +type Object struct { + fs *Fs // what this object is part of + remote string // The remote path + url string // download path + md5sum string // The MD5Sum of the object + bytes int64 // Bytes in the object + modTime time.Time // Modified time of the object + mimeType string +} + +// ------------------------------------------------------------ + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + if f.rootBucket == "" { + return fmt.Sprintf("GCS root") + } + if f.rootDirectory == "" { + return fmt.Sprintf("GCS bucket %s", f.rootBucket) + } + return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// shouldRetry determines whether a given err rates being retried +func shouldRetry(err error) (again bool, errOut error) { + again = false + if err != nil { + if fserrors.ShouldRetry(err) { + again = true + } else { + switch gerr := err.(type) { + case *googleapi.Error: + if gerr.Code >= 500 && gerr.Code < 600 { + // All 5xx errors should be retried + again = true + } else if len(gerr.Errors) > 0 { + reason := gerr.Errors[0].Reason + if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" { + again = true + } + } + } + } + } + return again, err +} + +// parsePath parses a remote 'url' +func parsePath(path string) (root string) { + root = strings.Trim(path, "/") + return +} + +// split returns bucket and bucketPath from the rootRelativePath +// relative to f.root +func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { + bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath)) + return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath) +} + +// split returns bucket and bucketPath from the object +func (o *Object) split() (bucket, bucketPath string) { + return o.fs.split(o.remote) +} + +func getServiceAccountClient(ctx context.Context, credentialsData []byte) (*http.Client, error) { + conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...) + if err != nil { + return nil, errors.Wrap(err, "error processing credentials") + } + ctxWithSpecialClient := oauthutil.Context(ctx, fshttp.NewClient(ctx)) + return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil +} + +// setRoot changes the root of the Fs +func (f *Fs) setRoot(root string) { + f.root = parsePath(root) + f.rootBucket, f.rootDirectory = bucket.Split(f.root) +} + +// NewFs constructs an Fs from the path, bucket:path +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + ci := fs.GetConfig(ctx) + + var oAuthClient *http.Client + + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + if opt.ObjectACL == "" { + opt.ObjectACL = "private" + } + if opt.BucketACL == "" { + opt.BucketACL = "private" + } + + // try loading service account credentials from env variable, then from a file + if opt.ServiceAccountCredentials == "" && opt.ServiceAccountFile != "" { + loadedCreds, err := ioutil.ReadFile(env.ShellExpand(opt.ServiceAccountFile)) + if err != nil { + return nil, errors.Wrap(err, "error opening service account credentials file") + } + opt.ServiceAccountCredentials = string(loadedCreds) + } + if opt.Anonymous { + oAuthClient = fshttp.NewClient(ctx) + } else if opt.ServiceAccountCredentials != "" { + oAuthClient, err = getServiceAccountClient(ctx, []byte(opt.ServiceAccountCredentials)) + if err != nil { + return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account") + } + } else { + oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig) + if err != nil { + ctx := context.Background() + oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope) + if err != nil { + return nil, errors.Wrap(err, "failed to configure Google Cloud Storage") + } + } + } + + f := &Fs{ + name: name, + root: root, + opt: *opt, + pacer: fs.NewPacer(ctx, pacer.NewGoogleDrive(pacer.MinSleep(minSleep))), + cache: bucket.NewCache(), + pool: pool.New( + time.Duration(opt.MemoryPoolFlushTime), + int(opt.ChunkSize), + ci.Transfers, + opt.MemoryPoolUseMmap, + ), + } + f.setRoot(root) + f.features = (&fs.Features{ + ReadMimeType: true, + WriteMimeType: true, + BucketBased: true, + BucketBasedRootOK: true, + }).Fill(ctx, f) + + // Create a new authorized Drive client. + f.client = oAuthClient + f.svc, err = storage.New(f.client) + if err != nil { + return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client") + } + + if f.rootBucket != "" && f.rootDirectory != "" { + // Check to see if the object exists + encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory) + err = f.pacer.Call(func() (bool, error) { + _, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do() + return shouldRetry(err) + }) + if err == nil { + newRoot := path.Dir(f.root) + if newRoot == "." { + newRoot = "" + } + f.setRoot(newRoot) + // return an error with an fs which points to the parent + return f, fs.ErrorIsFile + } + } + return f, nil +} + +// Return an Object from a path +// +// If it can't be found it returns the error fs.ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + if info != nil { + o.setMetaData(info) + } else { + err := o.readMetaData(ctx) // reads info and meta, returning an error + if err != nil { + return nil, err + } + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) +} + +// listFn is called from list to handle an object. +type listFn func(remote string, object *storage.Object, isDirectory bool) error + +// list the objects into the function supplied +// +// dir is the starting directory, "" for root +// +// Set recurse to read sub directories +// +// The remote has prefix removed from it and if addBucket is set +// then it adds the bucket to the start. +func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) { + if prefix != "" { + prefix += "/" + } + if directory != "" { + directory += "/" + } + list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks) + if !recurse { + list = list.Delimiter("/") + } + for { + var objects *storage.Objects + err = f.pacer.Call(func() (bool, error) { + objects, err = list.Context(ctx).Do() + return shouldRetry(err) + }) + if err != nil { + if gErr, ok := err.(*googleapi.Error); ok { + if gErr.Code == http.StatusNotFound { + err = fs.ErrorDirNotFound + } + } + return err + } + if !recurse { + var object storage.Object + for _, remote := range objects.Prefixes { + if !strings.HasSuffix(remote, "/") { + continue + } + remote = f.opt.Enc.ToStandardPath(remote) + if !strings.HasPrefix(remote, prefix) { + fs.Logf(f, "Odd name received %q", remote) + continue + } + remote = remote[len(prefix) : len(remote)-1] + if addBucket { + remote = path.Join(bucket, remote) + } + err = fn(remote, &object, true) + if err != nil { + return err + } + } + } + for _, object := range objects.Items { + remote := f.opt.Enc.ToStandardPath(object.Name) + if !strings.HasPrefix(remote, prefix) { + fs.Logf(f, "Odd name received %q", object.Name) + continue + } + remote = remote[len(prefix):] + isDirectory := remote == "" || strings.HasSuffix(remote, "/") + if addBucket { + remote = path.Join(bucket, remote) + } + // is this a directory marker? + if isDirectory { + continue // skip directory marker + } + err = fn(remote, object, false) + if err != nil { + return err + } + } + if objects.NextPageToken == "" { + break + } + list.PageToken(objects.NextPageToken) + } + return nil +} + +// Convert a list item into a DirEntry +func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) { + if isDirectory { + d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size)) + return d, nil + } + o, err := f.newObjectWithInfo(ctx, remote, object) + if err != nil { + return nil, err + } + return o, nil +} + +// listDir lists a single directory +func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) { + // List the objects + err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error { + entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) + if err != nil { + return err + } + if entry != nil { + entries = append(entries, entry) + } + return nil + }) + if err != nil { + return nil, err + } + // bucket must be present if listing succeeded + f.cache.MarkOK(bucket) + return entries, err +} + +// listBuckets lists the buckets +func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { + if f.opt.ProjectNumber == "" { + return nil, errors.New("can't list buckets without project number") + } + listBuckets := f.svc.Buckets.List(f.opt.ProjectNumber).MaxResults(listChunks) + for { + var buckets *storage.Buckets + err = f.pacer.Call(func() (bool, error) { + buckets, err = listBuckets.Context(ctx).Do() + return shouldRetry(err) + }) + if err != nil { + return nil, err + } + for _, bucket := range buckets.Items { + d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{}) + entries = append(entries, d) + } + if buckets.NextPageToken == "" { + break + } + listBuckets.PageToken(buckets.NextPageToken) + } + return entries, nil +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + bucket, directory := f.split(dir) + if bucket == "" { + if directory != "" { + return nil, fs.ErrorListBucketRequired + } + return f.listBuckets(ctx) + } + return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "") +} + +// ListR lists the objects and directories of the Fs starting +// from dir recursively into out. +// +// dir should be "" to start from the root, and should not +// have trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +// +// It should call callback for each tranche of entries read. +// These need not be returned in any particular order. If +// callback returns an error then the listing will stop +// immediately. +// +// Don't implement this unless you have a more efficient way +// of listing recursively that doing a directory traversal. +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { + bucket, directory := f.split(dir) + list := walk.NewListRHelper(callback) + listR := func(bucket, directory, prefix string, addBucket bool) error { + return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error { + entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) + if err != nil { + return err + } + return list.Add(entry) + }) + } + if bucket == "" { + entries, err := f.listBuckets(ctx) + if err != nil { + return err + } + for _, entry := range entries { + err = list.Add(entry) + if err != nil { + return err + } + bucket := entry.Remote() + err = listR(bucket, "", f.rootDirectory, true) + if err != nil { + return err + } + // bucket must be present if listing succeeded + f.cache.MarkOK(bucket) + } + } else { + err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "") + if err != nil { + return err + } + // bucket must be present if listing succeeded + f.cache.MarkOK(bucket) + } + return list.Flush() +} + +// Put the object into the bucket +// +// Copy the reader in to the new object which is returned +// +// The new object may have been created if an error is returned +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + // Temporary Object under construction + o := &Object{ + fs: f, + remote: src.Remote(), + } + return o, o.Update(ctx, in, src, options...) +} + +// PutStream uploads to the remote path with the modTime given of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + +// Mkdir creates the bucket if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) { + bucket, _ := f.split(dir) + return f.makeBucket(ctx, bucket) +} + +// makeBucket creates the bucket if it doesn't exist +func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) { + return f.cache.Create(bucket, func() error { + // List something from the bucket to see if it exists. Doing it like this enables the use of a + // service account that only has the "Storage Object Admin" role. See #2193 for details. + err = f.pacer.Call(func() (bool, error) { + _, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do() + return shouldRetry(err) + }) + if err == nil { + // Bucket already exists + return nil + } else if gErr, ok := err.(*googleapi.Error); ok { + if gErr.Code != http.StatusNotFound { + return errors.Wrap(err, "failed to get bucket") + } + if !f.opt.AllowCreateBucket { + return errors.Wrapf(err, "bucket %s does not exist", bucket) + } + } else { + return errors.Wrap(err, "failed to get bucket") + } + + if f.opt.ProjectNumber == "" { + return errors.New("can't make bucket without project number") + } + + bucket := storage.Bucket{ + Name: bucket, + Location: f.opt.Location, + StorageClass: f.opt.StorageClass, + } + if f.opt.BucketPolicyOnly { + bucket.IamConfiguration = &storage.BucketIamConfiguration{ + BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{ + Enabled: true, + }, + } + } + return f.pacer.Call(func() (bool, error) { + insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket) + if !f.opt.BucketPolicyOnly { + insertBucket.PredefinedAcl(f.opt.BucketACL) + } + _, err = insertBucket.Context(ctx).Do() + return shouldRetry(err) + }) + }, nil) +} + +// Rmdir deletes the bucket if the fs is at the root +// +// Returns an error if it isn't empty: Error 409: The bucket you tried +// to delete was not empty. +func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) { + bucket, directory := f.split(dir) + if bucket == "" || directory != "" { + return nil + } + return f.cache.Remove(bucket, func() error { + return f.pacer.Call(func() (bool, error) { + err = f.svc.Buckets.Delete(bucket).Context(ctx).Do() + return shouldRetry(err) + }) + }) +} + +// Precision returns the precision +func (f *Fs) Precision() time.Duration { + return time.Nanosecond +} + +// Copy src to this remote using server-side copy operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + dstBucket, dstPath := f.split(remote) + err := f.makeBucket(ctx, dstBucket) + if err != nil { + return nil, err + } + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + srcBucket, srcPath := srcObj.split() + + // Temporary Object under construction + dstObj := &Object{ + fs: f, + remote: remote, + } + + rewriteRequest := f.svc.Objects.Rewrite(srcBucket, srcPath, dstBucket, dstPath, nil) + if !f.opt.BucketPolicyOnly { + rewriteRequest.DestinationPredefinedAcl(f.opt.ObjectACL) + } + var rewriteResponse *storage.RewriteResponse + for { + err = f.pacer.Call(func() (bool, error) { + rewriteResponse, err = rewriteRequest.Context(ctx).Do() + return shouldRetry(err) + }) + if err != nil { + return nil, err + } + if rewriteResponse.Done { + break + } + rewriteRequest.RewriteToken(rewriteResponse.RewriteToken) + fs.Debugf(dstObj, "Continuing rewrite %d bytes done", rewriteResponse.TotalBytesRewritten) + } + // Set the metadata for the new object while we have it + dstObj.setMetaData(rewriteResponse.Resource) + return dstObj, nil +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.MD5) +} + +// ------------------------------------------------------------ + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Hash returns the Md5sum of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + if t != hash.MD5 { + return "", hash.ErrUnsupported + } + return o.md5sum, nil +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.bytes +} + +// setMetaData sets the fs data from a storage.Object +func (o *Object) setMetaData(info *storage.Object) { + o.url = info.MediaLink + o.bytes = int64(info.Size) + o.mimeType = info.ContentType + + // Read md5sum + md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash) + if err != nil { + fs.Logf(o, "Bad MD5 decode: %v", err) + } else { + o.md5sum = hex.EncodeToString(md5sumData) + } + + // read mtime out of metadata if available + mtimeString, ok := info.Metadata[metaMtime] + if ok { + modTime, err := time.Parse(timeFormatIn, mtimeString) + if err == nil { + o.modTime = modTime + return + } + fs.Debugf(o, "Failed to read mtime from metadata: %s", err) + } + + // Fallback to the Updated time + modTime, err := time.Parse(timeFormatIn, info.Updated) + if err != nil { + fs.Logf(o, "Bad time decode: %v", err) + } else { + o.modTime = modTime + } +} + +// readObjectInfo reads the definition for an object +func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) { + bucket, bucketPath := o.split() + err = o.fs.pacer.Call(func() (bool, error) { + object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do() + return shouldRetry(err) + }) + if err != nil { + if gErr, ok := err.(*googleapi.Error); ok { + if gErr.Code == http.StatusNotFound { + return nil, fs.ErrorObjectNotFound + } + } + return nil, err + } + return object, nil +} + +// readMetaData gets the metadata if it hasn't already been fetched +// +// it also sets the info +func (o *Object) readMetaData(ctx context.Context) (err error) { + if !o.modTime.IsZero() { + return nil + } + object, err := o.readObjectInfo(ctx) + if err != nil { + return err + } + o.setMetaData(object) + return nil +} + +// ModTime returns the modification time of the object +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + err := o.readMetaData(ctx) + if err != nil { + // fs.Logf(o, "Failed to read metadata: %v", err) + return time.Now() + } + return o.modTime +} + +// Returns metadata for an object +func metadataFromModTime(modTime time.Time) map[string]string { + metadata := make(map[string]string, 1) + metadata[metaMtime] = modTime.Format(timeFormatOut) + return metadata +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) { + // read the complete existing object first + object, err := o.readObjectInfo(ctx) + if err != nil { + return err + } + // Add the mtime to the existing metadata + mtime := modTime.Format(timeFormatOut) + if object.Metadata == nil { + object.Metadata = make(map[string]string, 1) + } + object.Metadata[metaMtime] = mtime + // Copy the object to itself to update the metadata + // Using PATCH requires too many permissions + bucket, bucketPath := o.split() + var newObject *storage.Object + err = o.fs.pacer.Call(func() (bool, error) { + copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object) + if !o.fs.opt.BucketPolicyOnly { + copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL) + } + newObject, err = copyObject.Context(ctx).Do() + return shouldRetry(err) + }) + if err != nil { + return err + } + o.setMetaData(newObject) + return nil +} + +// Storable returns a boolean as to whether this object is storable +func (o *Object) Storable() bool { + return true +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + req, err := http.NewRequest("GET", o.url, nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext + fs.FixRangeOption(options, o.bytes) + fs.OpenOptionAddHTTPHeaders(req.Header, options) + var res *http.Response + err = o.fs.pacer.Call(func() (bool, error) { + res, err = o.fs.client.Do(req) + if err == nil { + err = googleapi.CheckResponse(res) + if err != nil { + _ = res.Body.Close() // ignore error + } + } + return shouldRetry(err) + }) + if err != nil { + return nil, err + } + _, isRanging := req.Header["Range"] + if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) { + _ = res.Body.Close() // ignore error + return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status) + } + return res.Body, nil +} + +// Update the object with the contents of the io.Reader, modTime and size +// +// The new object may have been created if an error is returned +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + bucket, bucketPath := o.split() + err := o.fs.makeBucket(ctx, bucket) + if err != nil { + return err + } + modTime := src.ModTime(ctx) + + object := storage.Object{ + Bucket: bucket, + Name: bucketPath, + ContentType: fs.MimeType(ctx, src), + Metadata: metadataFromModTime(modTime), + } + // Apply upload options + for _, option := range options { + key, value := option.Header() + lowerKey := strings.ToLower(key) + switch lowerKey { + case "": + // ignore + case "cache-control": + object.CacheControl = value + case "content-disposition": + object.ContentDisposition = value + case "content-encoding": + object.ContentEncoding = value + case "content-language": + object.ContentLanguage = value + case "content-type": + object.ContentType = value + case "x-goog-storage-class": + object.StorageClass = value + default: + const googMetaPrefix = "x-goog-meta-" + if strings.HasPrefix(lowerKey, googMetaPrefix) { + metaKey := lowerKey[len(googMetaPrefix):] + object.Metadata[metaKey] = value + } else { + fs.Errorf(o, "Don't know how to set key %q on upload", key) + } + } + } + + buf := o.fs.pool.Get() + defer o.fs.pool.Put(buf) + + var newObject *storage.Object + err = o.fs.pacer.CallNoRetry(func() (bool, error) { + mediaOpts := []googleapi.MediaOption{ + googleapi.ContentType(""), + googleapi.ChunkSize(int(o.fs.opt.ChunkSize)), + googleapi.WithBuffer(buf), + } + insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, mediaOpts...).Name(object.Name) + if !o.fs.opt.BucketPolicyOnly { + insertObject.PredefinedAcl(o.fs.opt.ObjectACL) + } + newObject, err = insertObject.Context(ctx).Do() + return shouldRetry(err) + }) + if err != nil { + return err + } + // Set the metadata for the new object while we have it + o.setMetaData(newObject) + return nil +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) (err error) { + bucket, bucketPath := o.split() + err = o.fs.pacer.Call(func() (bool, error) { + err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do() + return shouldRetry(err) + }) + return err +} + +// MimeType of an Object if known, "" otherwise +func (o *Object) MimeType(ctx context.Context) string { + return o.mimeType +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = &Fs{} + _ fs.Copier = &Fs{} + _ fs.PutStreamer = &Fs{} + _ fs.ListRer = &Fs{} + _ fs.Object = &Object{} + _ fs.MimeTyper = &Object{} +) diff --git a/vendor/github.com/rclone/rclone/backend/local/about_unix.go b/vendor/github.com/rclone/rclone/backend/local/about_unix.go new file mode 100644 index 00000000000..3c31d17a1cf --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/about_unix.go @@ -0,0 +1,34 @@ +// +build darwin dragonfly freebsd linux + +package local + +import ( + "context" + "os" + "syscall" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" +) + +// About gets quota information +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { + var s syscall.Statfs_t + err := syscall.Statfs(f.root, &s) + if err != nil { + if os.IsNotExist(err) { + return nil, fs.ErrorDirNotFound + } + return nil, errors.Wrap(err, "failed to read disk usage") + } + bs := int64(s.Bsize) // nolint: unconvert + usage := &fs.Usage{ + Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used + Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use + Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota + } + return usage, nil +} + +// check interface +var _ fs.Abouter = &Fs{} diff --git a/vendor/github.com/rclone/rclone/backend/local/about_windows.go b/vendor/github.com/rclone/rclone/backend/local/about_windows.go new file mode 100644 index 00000000000..32793e71947 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/about_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package local + +import ( + "context" + "syscall" + "unsafe" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" +) + +var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW") + +// About gets quota information +func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { + var available, total, free int64 + _, _, e1 := getFreeDiskSpace.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))), + uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user + uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes + uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes + ) + if e1 != syscall.Errno(0) { + return nil, errors.Wrap(e1, "failed to read disk usage") + } + usage := &fs.Usage{ + Total: fs.NewUsageValue(total), // quota of bytes that can be used + Used: fs.NewUsageValue(total - free), // bytes in use + Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota + } + return usage, nil +} + +// check interface +var _ fs.Abouter = &Fs{} diff --git a/vendor/github.com/rclone/rclone/backend/local/encode_darwin.go b/vendor/github.com/rclone/rclone/backend/local/encode_darwin.go new file mode 100644 index 00000000000..e5615b3181c --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/encode_darwin.go @@ -0,0 +1,11 @@ +//+build darwin + +package local + +import "github.com/rclone/rclone/lib/encoder" + +// This is the encoding used by the local backend for macOS +// +// macOS can't store invalid UTF-8, it converts them into %XX encoding +const defaultEnc = (encoder.Base | + encoder.EncodeInvalidUtf8) diff --git a/vendor/github.com/rclone/rclone/backend/local/encode_other.go b/vendor/github.com/rclone/rclone/backend/local/encode_other.go new file mode 100644 index 00000000000..2fcafe84e4f --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/encode_other.go @@ -0,0 +1,8 @@ +//+build !windows,!darwin + +package local + +import "github.com/rclone/rclone/lib/encoder" + +// This is the encoding used by the local backend for non windows platforms +const defaultEnc = encoder.Base diff --git a/vendor/github.com/rclone/rclone/backend/local/encode_windows.go b/vendor/github.com/rclone/rclone/backend/local/encode_windows.go new file mode 100644 index 00000000000..68e87a9148b --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/encode_windows.go @@ -0,0 +1,33 @@ +//+build windows + +package local + +import "github.com/rclone/rclone/lib/encoder" + +// This is the encoding used by the local backend for windows platforms +// +// List of replaced characters: +// < (less than) -> '<' // FULLWIDTH LESS-THAN SIGN +// > (greater than) -> '>' // FULLWIDTH GREATER-THAN SIGN +// : (colon) -> ':' // FULLWIDTH COLON +// " (double quote) -> '"' // FULLWIDTH QUOTATION MARK +// \ (backslash) -> '\' // FULLWIDTH REVERSE SOLIDUS +// | (vertical line) -> '|' // FULLWIDTH VERTICAL LINE +// ? (question mark) -> '?' // FULLWIDTH QUESTION MARK +// * (asterisk) -> '*' // FULLWIDTH ASTERISK +// +// Additionally names can't end with a period (.) or space ( ). +// List of replaced characters: +// . (period) -> '.' // FULLWIDTH FULL STOP +// (space) -> '␠' // SYMBOL FOR SPACE +// +// Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16. +// +// https://docs.microsoft.com/de-de/windows/desktop/FileIO/naming-a-file#naming-conventions +const defaultEnc = (encoder.Base | + encoder.EncodeWin | + encoder.EncodeBackSlash | + encoder.EncodeCtl | + encoder.EncodeRightSpace | + encoder.EncodeRightPeriod | + encoder.EncodeInvalidUtf8) diff --git a/vendor/github.com/rclone/rclone/backend/local/fadvise_other.go b/vendor/github.com/rclone/rclone/backend/local/fadvise_other.go new file mode 100644 index 00000000000..18ac0d750bd --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/fadvise_other.go @@ -0,0 +1,12 @@ +//+build !linux + +package local + +import ( + "io" + "os" +) + +func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser { + return f +} diff --git a/vendor/github.com/rclone/rclone/backend/local/fadvise_unix.go b/vendor/github.com/rclone/rclone/backend/local/fadvise_unix.go new file mode 100644 index 00000000000..fce82897199 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/fadvise_unix.go @@ -0,0 +1,165 @@ +//+build linux + +package local + +import ( + "io" + "os" + + "github.com/rclone/rclone/fs" + "golang.org/x/sys/unix" +) + +// fadvise provides means to automate freeing pages in kernel page cache for +// a given file descriptor as the file is sequentially processed (read or +// written). +// +// When copying a file to a remote backend all the file content is read by +// kernel and put to page cache to make future reads faster. +// This causes memory pressure visible in both memory usage and CPU consumption +// and can even cause OOM errors in applications consuming large amounts memory. +// +// In case of an upload to a remote backend, there is no benefits from caching. +// +// fadvise would orchestrate calling POSIX_FADV_DONTNEED +// +// POSIX_FADV_DONTNEED attempts to free cached pages associated +// with the specified region. This is useful, for example, while +// streaming large files. A program may periodically request the +// kernel to free cached data that has already been used, so that +// more useful cached pages are not discarded instead. +// +// Requests to discard partial pages are ignored. It is +// preferable to preserve needed data than discard unneeded data. +// If the application requires that data be considered for +// discarding, then offset and len must be page-aligned. +// +// The implementation may attempt to write back dirty pages in +// the specified region, but this is not guaranteed. Any +// unwritten dirty pages will not be freed. If the application +// wishes to ensure that dirty pages will be released, it should +// call fsync(2) or fdatasync(2) first. +type fadvise struct { + o *Object + fd int + lastPos int64 + curPos int64 + windowSize int64 + + freePagesCh chan offsetLength + doneCh chan struct{} +} + +type offsetLength struct { + offset int64 + length int64 +} + +const ( + defaultAllowPages = 32 + defaultWorkerQueueSize = 64 +) + +func newFadvise(o *Object, fd int, offset int64) *fadvise { + f := &fadvise{ + o: o, + fd: fd, + lastPos: offset, + curPos: offset, + windowSize: int64(os.Getpagesize()) * defaultAllowPages, + + freePagesCh: make(chan offsetLength, defaultWorkerQueueSize), + doneCh: make(chan struct{}), + } + go f.worker() + + return f +} + +// sequential configures readahead strategy in Linux kernel. +// +// Under Linux, POSIX_FADV_NORMAL sets the readahead window to the +// default size for the backing device; POSIX_FADV_SEQUENTIAL doubles +// this size, and POSIX_FADV_RANDOM disables file readahead entirely. +func (f *fadvise) sequential(limit int64) bool { + l := int64(0) + if limit > 0 { + l = limit + } + if err := unix.Fadvise(f.fd, f.curPos, l, unix.FADV_SEQUENTIAL); err != nil { + fs.Debugf(f.o, "fadvise sequential failed on file descriptor %d: %s", f.fd, err) + return false + } + + return true +} + +func (f *fadvise) next(n int) { + f.curPos += int64(n) + f.freePagesIfNeeded() +} + +func (f *fadvise) freePagesIfNeeded() { + if f.curPos >= f.lastPos+f.windowSize { + f.freePages() + } +} + +func (f *fadvise) freePages() { + f.freePagesCh <- offsetLength{f.lastPos, f.curPos - f.lastPos} + f.lastPos = f.curPos +} + +func (f *fadvise) worker() { + for p := range f.freePagesCh { + if err := unix.Fadvise(f.fd, p.offset, p.length, unix.FADV_DONTNEED); err != nil { + fs.Debugf(f.o, "fadvise dontneed failed on file descriptor %d: %s", f.fd, err) + } + } + + close(f.doneCh) +} + +func (f *fadvise) wait() { + close(f.freePagesCh) + <-f.doneCh +} + +type fadviseReadCloser struct { + *fadvise + inner io.ReadCloser +} + +// newFadviseReadCloser wraps os.File so that reading from that file would +// remove already consumed pages from kernel page cache. +// In addition to that it instructs kernel to double the readahead window to +// make sequential reads faster. +// See also fadvise. +func newFadviseReadCloser(o *Object, f *os.File, offset, limit int64) io.ReadCloser { + r := fadviseReadCloser{ + fadvise: newFadvise(o, int(f.Fd()), offset), + inner: f, + } + + // If syscall failed it's likely that the subsequent syscalls to that + // file descriptor would also fail. In that case return the provided os.File + // pointer. + if !r.sequential(limit) { + r.wait() + return f + } + + return r +} + +func (f fadviseReadCloser) Read(p []byte) (n int, err error) { + n, err = f.inner.Read(p) + f.next(n) + return +} + +func (f fadviseReadCloser) Close() error { + f.freePages() + f.wait() + return f.inner.Close() +} diff --git a/vendor/github.com/rclone/rclone/backend/local/lchtimes.go b/vendor/github.com/rclone/rclone/backend/local/lchtimes.go new file mode 100644 index 00000000000..a845e3a408e --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/lchtimes.go @@ -0,0 +1,20 @@ +// +build windows plan9 js + +package local + +import ( + "time" +) + +const haveLChtimes = false + +// lChtimes changes the access and modification times of the named +// link, similar to the Unix utime() or utimes() functions. +// +// The underlying filesystem may truncate or round the values to a +// less precise time unit. +// If there is an error, it will be of type *PathError. +func lChtimes(name string, atime time.Time, mtime time.Time) error { + // Does nothing + return nil +} diff --git a/vendor/github.com/rclone/rclone/backend/local/lchtimes_unix.go b/vendor/github.com/rclone/rclone/backend/local/lchtimes_unix.go new file mode 100644 index 00000000000..9aec5c43134 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/lchtimes_unix.go @@ -0,0 +1,28 @@ +// +build !windows,!plan9,!js + +package local + +import ( + "os" + "time" + + "golang.org/x/sys/unix" +) + +const haveLChtimes = true + +// lChtimes changes the access and modification times of the named +// link, similar to the Unix utime() or utimes() functions. +// +// The underlying filesystem may truncate or round the values to a +// less precise time unit. +// If there is an error, it will be of type *PathError. +func lChtimes(name string, atime time.Time, mtime time.Time) error { + var utimes [2]unix.Timespec + utimes[0] = unix.NsecToTimespec(atime.UnixNano()) + utimes[1] = unix.NsecToTimespec(mtime.UnixNano()) + if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); e != nil { + return &os.PathError{Op: "lchtimes", Path: name, Err: e} + } + return nil +} diff --git a/vendor/github.com/rclone/rclone/backend/local/local.go b/vendor/github.com/rclone/rclone/backend/local/local.go new file mode 100644 index 00000000000..3daeb4f0a30 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/local.go @@ -0,0 +1,1314 @@ +// Package local provides a filesystem interface +package local + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + "unicode/utf8" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/file" + "github.com/rclone/rclone/lib/readers" +) + +// Constants +const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset +const linkSuffix = ".rclonelink" // The suffix added to a translated symbolic link +const useReadDir = (runtime.GOOS == "windows" || runtime.GOOS == "plan9") // these OSes read FileInfos directly + +// Register with Fs +func init() { + fsi := &fs.RegInfo{ + Name: "local", + Description: "Local Disk", + NewFs: NewFs, + CommandHelp: commandHelp, + Options: []fs.Option{{ + Name: "nounc", + Help: "Disable UNC (long path names) conversion on Windows", + Examples: []fs.OptionExample{{ + Value: "true", + Help: "Disables long file names", + }}, + }, { + Name: "copy_links", + Help: "Follow symlinks and copy the pointed to item.", + Default: false, + NoPrefix: true, + ShortOpt: "L", + Advanced: true, + }, { + Name: "links", + Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension", + Default: false, + NoPrefix: true, + ShortOpt: "l", + Advanced: true, + }, { + Name: "skip_links", + Help: `Don't warn about skipped symlinks. +This flag disables warning messages on skipped symlinks or junction +points, as you explicitly acknowledge that they should be skipped.`, + Default: false, + NoPrefix: true, + Advanced: true, + }, { + Name: "zero_size_links", + Help: `Assume the Stat size of links is zero (and read them instead) + +On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0. +However, on unix it reads as the length of the text in the link. This may cause errors like this when +syncing: + + Failed to copy: corrupted on transfer: sizes differ 0 vs 13 + +Setting this flag causes rclone to read the link and use that as the size of the link +instead of 0 which in most cases fixes the problem.`, + Default: false, + Advanced: true, + }, { + Name: "no_unicode_normalization", + Help: `Don't apply unicode normalization to paths and filenames (Deprecated) + +This flag is deprecated now. Rclone no longer normalizes unicode file +names, but it compares them with unicode normalization in the sync +routine instead.`, + Default: false, + Advanced: true, + }, { + Name: "no_check_updated", + Help: `Don't check to see if the files change during upload + +Normally rclone checks the size and modification time of files as they +are being uploaded and aborts with a message which starts "can't copy +- source file is being updated" if the file changes during upload. + +However on some file systems this modification time check may fail (e.g. +[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this +check can be disabled with this flag. + +If this flag is set, rclone will use its best efforts to transfer a +file which is being updated. If the file is only having things +appended to it (e.g. a log) then rclone will transfer the log file with +the size it had the first time rclone saw it. + +If the file is being modified throughout (not just appended to) then +the transfer may fail with a hash check failure. + +In detail, once the file has had stat() called on it for the first +time we: + +- Only transfer the size that stat gave +- Only checksum the size that stat gave +- Don't update the stat info for the file + +`, + Default: false, + Advanced: true, + }, { + Name: "one_file_system", + Help: "Don't cross filesystem boundaries (unix/macOS only).", + Default: false, + NoPrefix: true, + ShortOpt: "x", + Advanced: true, + }, { + Name: "case_sensitive", + Help: `Force the filesystem to report itself as case sensitive. + +Normally the local backend declares itself as case insensitive on +Windows/macOS and case sensitive for everything else. Use this flag +to override the default choice.`, + Default: false, + Advanced: true, + }, { + Name: "case_insensitive", + Help: `Force the filesystem to report itself as case insensitive + +Normally the local backend declares itself as case insensitive on +Windows/macOS and case sensitive for everything else. Use this flag +to override the default choice.`, + Default: false, + Advanced: true, + }, { + Name: "no_sparse", + Help: `Disable sparse files for multi-thread downloads + +On Windows platforms rclone will make sparse files when doing +multi-thread downloads. This avoids long pauses on large files where +the OS zeros the file. However sparse files may be undesirable as they +cause disk fragmentation and can be slow to work with.`, + Default: false, + Advanced: true, + }, { + Name: "no_set_modtime", + Help: `Disable setting modtime + +Normally rclone updates modification time of files after they are done +uploading. This can cause permissions issues on Linux platforms when +the user rclone is running as does not own the file uploaded, such as +when copying to a CIFS mount owned by another user. If this option is +enabled, rclone will no longer update the modtime after copying a file.`, + Default: false, + Advanced: true, + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + Default: defaultEnc, + }}, + } + fs.Register(fsi) +} + +// Options defines the configuration for this backend +type Options struct { + FollowSymlinks bool `config:"copy_links"` + TranslateSymlinks bool `config:"links"` + SkipSymlinks bool `config:"skip_links"` + ZeroSizeLinks bool `config:"zero_size_links"` + NoUTFNorm bool `config:"no_unicode_normalization"` + NoCheckUpdated bool `config:"no_check_updated"` + NoUNC bool `config:"nounc"` + OneFileSystem bool `config:"one_file_system"` + CaseSensitive bool `config:"case_sensitive"` + CaseInsensitive bool `config:"case_insensitive"` + NoSparse bool `config:"no_sparse"` + NoSetModTime bool `config:"no_set_modtime"` + Enc encoder.MultiEncoder `config:"encoding"` +} + +// Fs represents a local filesystem rooted at root +type Fs struct { + name string // the name of the remote + root string // The root directory (OS path) + opt Options // parsed config options + features *fs.Features // optional features + dev uint64 // device number of root node + precisionOk sync.Once // Whether we need to read the precision + precision time.Duration // precision of local filesystem + warnedMu sync.Mutex // used for locking access to 'warned'. + warned map[string]struct{} // whether we have warned about this string + + // do os.Lstat or os.Stat + lstat func(name string) (os.FileInfo, error) + objectMetaMu sync.RWMutex // global lock for Object metadata +} + +// Object represents a local filesystem object +type Object struct { + fs *Fs // The Fs this object is part of + remote string // The remote path (encoded path) + path string // The local path (OS path) + // When using these items the fs.objectMetaMu must be held + size int64 // file metadata - always present + mode os.FileMode + modTime time.Time + hashes map[hash.Type]string // Hashes + // these are read only and don't need the mutex held + translatedLink bool // Is this object a translated link +} + +// ------------------------------------------------------------ + +var errLinksAndCopyLinks = errors.New("can't use -l/--links with -L/--copy-links") + +// NewFs constructs an Fs from the path +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + if opt.TranslateSymlinks && opt.FollowSymlinks { + return nil, errLinksAndCopyLinks + } + + if opt.NoUTFNorm { + fs.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed") + } + + f := &Fs{ + name: name, + opt: *opt, + warned: make(map[string]struct{}), + dev: devUnset, + lstat: os.Lstat, + } + f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc) + f.features = (&fs.Features{ + CaseInsensitive: f.caseInsensitive(), + CanHaveEmptyDirectories: true, + IsLocal: true, + SlowHash: true, + }).Fill(ctx, f) + if opt.FollowSymlinks { + f.lstat = os.Stat + } + + // Check to see if this points to a file + fi, err := f.lstat(f.root) + if err == nil { + f.dev = readDevice(fi, f.opt.OneFileSystem) + } + if err == nil && f.isRegular(fi.Mode()) { + // It is a file, so use the parent as the root + f.root = filepath.Dir(f.root) + // return an error with an fs which points to the parent + return f, fs.ErrorIsFile + } + return f, nil +} + +// Determine whether a file is a 'regular' file, +// Symlinks are regular files, only if the TranslateSymlink +// option is in-effect +func (f *Fs) isRegular(mode os.FileMode) bool { + if !f.opt.TranslateSymlinks { + return mode.IsRegular() + } + + // fi.Mode().IsRegular() tests that all mode bits are zero + // Since symlinks are accepted, test that all other bits are zero, + // except the symlink bit + return mode&os.ModeType&^os.ModeSymlink == 0 +} + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.opt.Enc.ToStandardPath(filepath.ToSlash(f.root)) +} + +// String converts this Fs to a string +func (f *Fs) String() string { + return fmt.Sprintf("Local file system at %s", f.Root()) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// caseInsensitive returns whether the remote is case insensitive or not +func (f *Fs) caseInsensitive() bool { + if f.opt.CaseSensitive { + return false + } + if f.opt.CaseInsensitive { + return true + } + // FIXME not entirely accurate since you can have case + // sensitive Fses on darwin and case insensitive Fses on linux. + // Should probably check but that would involve creating a + // file in the remote to be most accurate which probably isn't + // desirable. + return runtime.GOOS == "windows" || runtime.GOOS == "darwin" +} + +// translateLink checks whether the remote is a translated link +// and returns a new path, removing the suffix as needed, +// It also returns whether this is a translated link at all +// +// for regular files, localPath is returned unchanged +func translateLink(remote, localPath string) (newLocalPath string, isTranslatedLink bool) { + isTranslatedLink = strings.HasSuffix(remote, linkSuffix) + newLocalPath = strings.TrimSuffix(localPath, linkSuffix) + return newLocalPath, isTranslatedLink +} + +// newObject makes a half completed Object +func (f *Fs) newObject(remote string) *Object { + translatedLink := false + localPath := f.localPath(remote) + + if f.opt.TranslateSymlinks { + // Possibly receive a new name for localPath + localPath, translatedLink = translateLink(remote, localPath) + } + + return &Object{ + fs: f, + remote: remote, + path: localPath, + translatedLink: translatedLink, + } +} + +// Return an Object from a path +// +// May return nil if an error occurred +func (f *Fs) newObjectWithInfo(remote string, info os.FileInfo) (fs.Object, error) { + o := f.newObject(remote) + if info != nil { + o.setMetadata(info) + } else { + err := o.lstat() + if err != nil { + if os.IsNotExist(err) { + return nil, fs.ErrorObjectNotFound + } + if os.IsPermission(err) { + return nil, fs.ErrorPermissionDenied + } + return nil, err + } + // Handle the odd case, that a symlink was specified by name without the link suffix + if o.fs.opt.TranslateSymlinks && o.mode&os.ModeSymlink != 0 && !o.translatedLink { + return nil, fs.ErrorObjectNotFound + } + + } + if o.mode.IsDir() { + return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote) + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(remote, nil) +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + fsDirPath := f.localPath(dir) + _, err = os.Stat(fsDirPath) + if err != nil { + return nil, fs.ErrorDirNotFound + } + + fd, err := os.Open(fsDirPath) + if err != nil { + isPerm := os.IsPermission(err) + err = errors.Wrapf(err, "failed to open directory %q", dir) + fs.Errorf(dir, "%v", err) + if isPerm { + _ = accounting.Stats(ctx).Error(fserrors.NoRetryError(err)) + err = nil // ignore error but fail sync + } + return nil, err + } + defer func() { + cerr := fd.Close() + if cerr != nil && err == nil { + err = errors.Wrapf(cerr, "failed to close directory %q:", dir) + } + }() + + for { + var fis []os.FileInfo + if useReadDir { + // Windows and Plan9 read the directory entries with the stat information in which + // shouldn't fail because of unreadable entries. + fis, err = fd.Readdir(1024) + if err == io.EOF && len(fis) == 0 { + break + } + } else { + // For other OSes we read the names only (which shouldn't fail) then stat the + // individual ourselves so we can log errors but not fail the directory read. + var names []string + names, err = fd.Readdirnames(1024) + if err == io.EOF && len(names) == 0 { + break + } + if err == nil { + for _, name := range names { + namepath := filepath.Join(fsDirPath, name) + fi, fierr := os.Lstat(namepath) + if fierr != nil { + err = errors.Wrapf(err, "failed to read directory %q", namepath) + fs.Errorf(dir, "%v", fierr) + _ = accounting.Stats(ctx).Error(fserrors.NoRetryError(fierr)) // fail the sync + continue + } + fis = append(fis, fi) + } + } + } + if err != nil { + return nil, errors.Wrap(err, "failed to read directory entry") + } + + for _, fi := range fis { + name := fi.Name() + mode := fi.Mode() + newRemote := f.cleanRemote(dir, name) + // Follow symlinks if required + if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 { + localPath := filepath.Join(fsDirPath, name) + fi, err = os.Stat(localPath) + if os.IsNotExist(err) || isCircularSymlinkError(err) { + // Skip bad symlinks and circular symlinks + err = fserrors.NoRetryError(errors.Wrap(err, "symlink")) + fs.Errorf(newRemote, "Listing error: %v", err) + err = accounting.Stats(ctx).Error(err) + continue + } + if err != nil { + return nil, err + } + mode = fi.Mode() + } + if fi.IsDir() { + // Ignore directories which are symlinks. These are junction points under windows which + // are kind of a souped up symlink. Unix doesn't have directories which are symlinks. + if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi, f.opt.OneFileSystem) { + d := fs.NewDir(newRemote, fi.ModTime()) + entries = append(entries, d) + } + } else { + // Check whether this link should be translated + if f.opt.TranslateSymlinks && fi.Mode()&os.ModeSymlink != 0 { + newRemote += linkSuffix + } + fso, err := f.newObjectWithInfo(newRemote, fi) + if err != nil { + return nil, err + } + if fso.Storable() { + entries = append(entries, fso) + } + } + } + } + return entries, nil +} + +func (f *Fs) cleanRemote(dir, filename string) (remote string) { + remote = path.Join(dir, f.opt.Enc.ToStandardName(filename)) + + if !utf8.ValidString(filename) { + f.warnedMu.Lock() + if _, ok := f.warned[remote]; !ok { + fs.Logf(f, "Replacing invalid UTF-8 characters in %q", remote) + f.warned[remote] = struct{}{} + } + f.warnedMu.Unlock() + } + return +} + +func (f *Fs) localPath(name string) string { + return filepath.Join(f.root, filepath.FromSlash(f.opt.Enc.FromStandardPath(name))) +} + +// Put the Object to the local filesystem +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + // Temporary Object under construction - info filled in by Update() + o := f.newObject(src.Remote()) + err := o.Update(ctx, in, src, options...) + if err != nil { + return nil, err + } + return o, nil +} + +// PutStream uploads to the remote path with the modTime given of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + +// Mkdir creates the directory if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + // FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go + localPath := f.localPath(dir) + err := os.MkdirAll(localPath, 0777) + if err != nil { + return err + } + if dir == "" { + fi, err := f.lstat(localPath) + if err != nil { + return err + } + f.dev = readDevice(fi, f.opt.OneFileSystem) + } + return nil +} + +// Rmdir removes the directory +// +// If it isn't empty it will return an error +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + return os.Remove(f.localPath(dir)) +} + +// Precision of the file system +func (f *Fs) Precision() (precision time.Duration) { + if f.opt.NoSetModTime { + return fs.ModTimeNotSupported + } + + f.precisionOk.Do(func() { + f.precision = f.readPrecision() + }) + return f.precision +} + +// Read the precision +func (f *Fs) readPrecision() (precision time.Duration) { + // Default precision of 1s + precision = time.Second + + // Create temporary file and test it + fd, err := ioutil.TempFile("", "rclone") + if err != nil { + // If failed return 1s + // fmt.Println("Failed to create temp file", err) + return time.Second + } + path := fd.Name() + // fmt.Println("Created temp file", path) + err = fd.Close() + if err != nil { + return time.Second + } + + // Delete it on return + defer func() { + // fmt.Println("Remove temp file") + _ = os.Remove(path) // ignore error + }() + + // Find the minimum duration we can detect + for duration := time.Duration(1); duration < time.Second; duration *= 10 { + // Current time with delta + t := time.Unix(time.Now().Unix(), int64(duration)) + err := os.Chtimes(path, t, t) + if err != nil { + // fmt.Println("Failed to Chtimes", err) + break + } + + // Read the actual time back + fi, err := os.Stat(path) + if err != nil { + // fmt.Println("Failed to Stat", err) + break + } + + // If it matches - have found the precision + // fmt.Println("compare", fi.ModTime(ctx), t) + if fi.ModTime().Equal(t) { + // fmt.Println("Precision detected as", duration) + return duration + } + } + return +} + +// Purge deletes all the files in the directory +// +// Optional interface: Only implement this if you have a way of +// deleting all the files quicker than just running Remove() on the +// result of List() +func (f *Fs) Purge(ctx context.Context, dir string) error { + dir = f.localPath(dir) + fi, err := f.lstat(dir) + if err != nil { + // already purged + if os.IsNotExist(err) { + return fs.ErrorDirNotFound + } + return err + } + if !fi.Mode().IsDir() { + return errors.Errorf("can't purge non directory: %q", dir) + } + return os.RemoveAll(dir) +} + +// Move src to this remote using server-side move operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantMove +func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't move - not same remote type") + return nil, fs.ErrorCantMove + } + + // Temporary Object under construction + dstObj := f.newObject(remote) + dstObj.fs.objectMetaMu.RLock() + dstObjMode := dstObj.mode + dstObj.fs.objectMetaMu.RUnlock() + + // Check it is a file if it exists + err := dstObj.lstat() + if os.IsNotExist(err) { + // OK + } else if err != nil { + return nil, err + } else if !dstObj.fs.isRegular(dstObjMode) { + // It isn't a file + return nil, errors.New("can't move file onto non-file") + } + + // Create destination + err = dstObj.mkdirAll() + if err != nil { + return nil, err + } + + // Do the move + err = os.Rename(srcObj.path, dstObj.path) + if os.IsNotExist(err) { + // race condition, source was deleted in the meantime + return nil, err + } else if os.IsPermission(err) { + // not enough rights to write to dst + return nil, err + } else if err != nil { + // not quite clear, but probably trying to move a file across file system + // boundaries. Copying might still work. + fs.Debugf(src, "Can't move: %v: trying copy", err) + return nil, fs.ErrorCantMove + } + + // Update the info + err = dstObj.lstat() + if err != nil { + return nil, err + } + + return dstObj, nil +} + +// DirMove moves src, srcRemote to this remote at dstRemote +// using server-side move operations. +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantDirMove +// +// If destination exists then return fs.ErrorDirExists +func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error { + srcFs, ok := src.(*Fs) + if !ok { + fs.Debugf(srcFs, "Can't move directory - not same remote type") + return fs.ErrorCantDirMove + } + srcPath := srcFs.localPath(srcRemote) + dstPath := f.localPath(dstRemote) + + // Check if destination exists + _, err := os.Lstat(dstPath) + if !os.IsNotExist(err) { + return fs.ErrorDirExists + } + + // Create parent of destination + dstParentPath := filepath.Dir(dstPath) + err = os.MkdirAll(dstParentPath, 0777) + if err != nil { + return err + } + + // Do the move + err = os.Rename(srcPath, dstPath) + if os.IsNotExist(err) { + // race condition, source was deleted in the meantime + return err + } else if os.IsPermission(err) { + // not enough rights to write to dst + return err + } else if err != nil { + // not quite clear, but probably trying to move directory across file system + // boundaries. Copying might still work. + fs.Debugf(src, "Can't move dir: %v: trying copy", err) + return fs.ErrorCantDirMove + } + return nil +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Supported() +} + +var commandHelp = []fs.CommandHelp{ + { + Name: "noop", + Short: "A null operation for testing backend commands", + Long: `This is a test command which has some options +you can try to change the output.`, + Opts: map[string]string{ + "echo": "echo the input arguments", + "error": "return an error based on option value", + }, + }, +} + +// Command the backend to run a named command +// +// The command run is name +// args may be used to read arguments from +// opts may be used to read optional arguments from +// +// The result should be capable of being JSON encoded +// If it is a string or a []string it will be shown to the user +// otherwise it will be JSON encoded and shown to the user like that +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) { + switch name { + case "noop": + if txt, ok := opt["error"]; ok { + if txt == "" { + txt = "unspecified error" + } + return nil, errors.New(txt) + } + if _, ok := opt["echo"]; ok { + out := map[string]interface{}{} + out["name"] = name + out["arg"] = arg + out["opt"] = opt + return out, nil + } + return nil, nil + default: + return nil, fs.ErrorCommandNotFound + } +} + +// ------------------------------------------------------------ + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +// Hash returns the requested hash of a file as a lowercase hex string +func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) { + // Check that the underlying file hasn't changed + o.fs.objectMetaMu.RLock() + oldtime := o.modTime + oldsize := o.size + o.fs.objectMetaMu.RUnlock() + err := o.lstat() + var changed bool + if err != nil { + if os.IsNotExist(errors.Cause(err)) { + // If file not found then we assume any accumulated + // hashes are OK - this will error on Open + changed = true + } else { + return "", errors.Wrap(err, "hash: failed to stat") + } + } else { + o.fs.objectMetaMu.RLock() + changed = !o.modTime.Equal(oldtime) || oldsize != o.size + o.fs.objectMetaMu.RUnlock() + } + + o.fs.objectMetaMu.RLock() + hashValue, hashFound := o.hashes[r] + o.fs.objectMetaMu.RUnlock() + + if changed || !hashFound { + var in io.ReadCloser + + if !o.translatedLink { + var fd *os.File + fd, err = file.Open(o.path) + if fd != nil { + in = newFadviseReadCloser(o, fd, 0, 0) + } + } else { + in, err = o.openTranslatedLink(0, -1) + } + // If not checking for updates, only read size given + if o.fs.opt.NoCheckUpdated { + in = readers.NewLimitedReadCloser(in, o.size) + } + if err != nil { + return "", errors.Wrap(err, "hash: failed to open") + } + var hashes map[hash.Type]string + hashes, err = hash.StreamTypes(in, hash.NewHashSet(r)) + closeErr := in.Close() + if err != nil { + return "", errors.Wrap(err, "hash: failed to read") + } + if closeErr != nil { + return "", errors.Wrap(closeErr, "hash: failed to close") + } + hashValue = hashes[r] + o.fs.objectMetaMu.Lock() + if o.hashes == nil { + o.hashes = hashes + } else { + o.hashes[r] = hashValue + } + o.fs.objectMetaMu.Unlock() + } + return hashValue, nil +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + o.fs.objectMetaMu.RLock() + defer o.fs.objectMetaMu.RUnlock() + return o.size +} + +// ModTime returns the modification time of the object +func (o *Object) ModTime(ctx context.Context) time.Time { + o.fs.objectMetaMu.RLock() + defer o.fs.objectMetaMu.RUnlock() + return o.modTime +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + if o.fs.opt.NoSetModTime { + return nil + } + var err error + if o.translatedLink { + err = lChtimes(o.path, modTime, modTime) + } else { + err = os.Chtimes(o.path, modTime, modTime) + } + if err != nil { + return err + } + // Re-read metadata + return o.lstat() +} + +// Storable returns a boolean showing if this object is storable +func (o *Object) Storable() bool { + o.fs.objectMetaMu.RLock() + mode := o.mode + o.fs.objectMetaMu.RUnlock() + if mode&os.ModeSymlink != 0 && !o.fs.opt.TranslateSymlinks { + if !o.fs.opt.SkipSymlinks { + fs.Logf(o, "Can't follow symlink without -L/--copy-links") + } + return false + } else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { + fs.Logf(o, "Can't transfer non file/directory") + return false + } else if mode&os.ModeDir != 0 { + // fs.Debugf(o, "Skipping directory") + return false + } + return true +} + +// localOpenFile wraps an io.ReadCloser and updates the md5sum of the +// object that is read +type localOpenFile struct { + o *Object // object that is open + in io.ReadCloser // handle we are wrapping + hash *hash.MultiHasher // currently accumulating hashes + fd *os.File // file object reference +} + +// Read bytes from the object - see io.Reader +func (file *localOpenFile) Read(p []byte) (n int, err error) { + if !file.o.fs.opt.NoCheckUpdated { + // Check if file has the same size and modTime + fi, err := file.fd.Stat() + if err != nil { + return 0, errors.Wrap(err, "can't read status of source file while transferring") + } + file.o.fs.objectMetaMu.RLock() + oldtime := file.o.modTime + oldsize := file.o.size + file.o.fs.objectMetaMu.RUnlock() + if oldsize != fi.Size() { + return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", oldsize, fi.Size())) + } + if !oldtime.Equal(fi.ModTime()) { + return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", oldtime, fi.ModTime())) + } + } + + n, err = file.in.Read(p) + if n > 0 { + // Hash routines never return an error + _, _ = file.hash.Write(p[:n]) + } + return +} + +// Close the object and update the hashes +func (file *localOpenFile) Close() (err error) { + err = file.in.Close() + if err == nil { + if file.hash.Size() == file.o.Size() { + file.o.fs.objectMetaMu.Lock() + file.o.hashes = file.hash.Sums() + file.o.fs.objectMetaMu.Unlock() + } + } + return err +} + +// Returns a ReadCloser() object that contains the contents of a symbolic link +func (o *Object) openTranslatedLink(offset, limit int64) (lrc io.ReadCloser, err error) { + // Read the link and return the destination it as the contents of the object + linkdst, err := os.Readlink(o.path) + if err != nil { + return nil, err + } + return readers.NewLimitedReadCloser(ioutil.NopCloser(strings.NewReader(linkdst[offset:])), limit), nil +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + var offset, limit int64 = 0, -1 + var hasher *hash.MultiHasher + for _, option := range options { + switch x := option.(type) { + case *fs.SeekOption: + offset = x.Offset + case *fs.RangeOption: + offset, limit = x.Decode(o.Size()) + case *fs.HashesOption: + if x.Hashes.Count() > 0 { + hasher, err = hash.NewMultiHasherTypes(x.Hashes) + if err != nil { + return nil, err + } + } + default: + if option.Mandatory() { + fs.Logf(o, "Unsupported mandatory option: %v", option) + } + } + } + + // If not checking updated then limit to current size. This means if + // file is being extended, readers will read a o.Size() bytes rather + // than the new size making for a consistent upload. + if limit < 0 && o.fs.opt.NoCheckUpdated { + limit = o.size + } + + // Handle a translated link + if o.translatedLink { + return o.openTranslatedLink(offset, limit) + } + + fd, err := file.Open(o.path) + if err != nil { + return + } + wrappedFd := readers.NewLimitedReadCloser(newFadviseReadCloser(o, fd, offset, limit), limit) + if offset != 0 { + // seek the object + _, err = fd.Seek(offset, io.SeekStart) + // don't attempt to make checksums + return wrappedFd, err + } + if hasher == nil { + // no need to wrap since we don't need checksums + return wrappedFd, nil + } + // Update the hashes as we go along + in = &localOpenFile{ + o: o, + in: wrappedFd, + hash: hasher, + fd: fd, + } + return in, nil +} + +// mkdirAll makes all the directories needed to store the object +func (o *Object) mkdirAll() error { + dir := filepath.Dir(o.path) + return os.MkdirAll(dir, 0777) +} + +type nopWriterCloser struct { + *bytes.Buffer +} + +func (nwc nopWriterCloser) Close() error { + // noop + return nil +} + +// Update the object from in with modTime and size +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + var out io.WriteCloser + var hasher *hash.MultiHasher + + for _, option := range options { + switch x := option.(type) { + case *fs.HashesOption: + if x.Hashes.Count() > 0 { + hasher, err = hash.NewMultiHasherTypes(x.Hashes) + if err != nil { + return err + } + } + } + } + + err = o.mkdirAll() + if err != nil { + return err + } + + var symlinkData bytes.Buffer + // If the object is a regular file, create it. + // If it is a translated link, just read in the contents, and + // then create a symlink + if !o.translatedLink { + f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + if runtime.GOOS == "windows" && os.IsPermission(err) { + // If permission denied on Windows might be trying to update a + // hidden file, in which case try opening without CREATE + // See: https://stackoverflow.com/questions/13215716/ioerror-errno-13-permission-denied-when-trying-to-open-hidden-file-in-w-mod + f, err = file.OpenFile(o.path, os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + return err + } + } else { + return err + } + } + // Pre-allocate the file for performance reasons + err = file.PreAllocate(src.Size(), f) + if err != nil { + fs.Debugf(o, "Failed to pre-allocate: %v", err) + } + out = f + } else { + out = nopWriterCloser{&symlinkData} + } + + // Calculate the hash of the object we are reading as we go along + if hasher != nil { + in = io.TeeReader(in, hasher) + } + + _, err = io.Copy(out, in) + closeErr := out.Close() + if err == nil { + err = closeErr + } + + if o.translatedLink { + if err == nil { + // Remove any current symlink or file, if one exists + if _, err := os.Lstat(o.path); err == nil { + if removeErr := os.Remove(o.path); removeErr != nil { + fs.Errorf(o, "Failed to remove previous file: %v", removeErr) + return removeErr + } + } + // Use the contents for the copied object to create a symlink + err = os.Symlink(symlinkData.String(), o.path) + } + + // only continue if symlink creation succeeded + if err != nil { + return err + } + } + + if err != nil { + fs.Logf(o, "Removing partially written file on error: %v", err) + if removeErr := os.Remove(o.path); removeErr != nil { + fs.Errorf(o, "Failed to remove partially written file: %v", removeErr) + } + return err + } + + // All successful so update the hashes + if hasher != nil { + o.fs.objectMetaMu.Lock() + o.hashes = hasher.Sums() + o.fs.objectMetaMu.Unlock() + } + + // Set the mtime + err = o.SetModTime(ctx, src.ModTime(ctx)) + if err != nil { + return err + } + + // ReRead info now that we have finished + return o.lstat() +} + +var sparseWarning sync.Once + +// OpenWriterAt opens with a handle for random access writes +// +// Pass in the remote desired and the size if known. +// +// It truncates any existing object +func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.WriterAtCloser, error) { + // Temporary Object under construction + o := f.newObject(remote) + + err := o.mkdirAll() + if err != nil { + return nil, err + } + + if o.translatedLink { + return nil, errors.New("can't open a symlink for random writing") + } + + out, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return nil, err + } + // Pre-allocate the file for performance reasons + err = file.PreAllocate(size, out) + if err != nil { + fs.Debugf(o, "Failed to pre-allocate: %v", err) + } + if !f.opt.NoSparse && file.SetSparseImplemented { + sparseWarning.Do(func() { + fs.Infof(nil, "Writing sparse files: use --local-no-sparse or --multi-thread-streams 0 to disable") + }) + // Set the file to be a sparse file (important on Windows) + err = file.SetSparse(out) + if err != nil { + fs.Errorf(o, "Failed to set sparse: %v", err) + } + } + + return out, nil +} + +// setMetadata sets the file info from the os.FileInfo passed in +func (o *Object) setMetadata(info os.FileInfo) { + // if not checking updated then don't update the stat + if o.fs.opt.NoCheckUpdated && !o.modTime.IsZero() { + return + } + o.fs.objectMetaMu.Lock() + o.size = info.Size() + o.modTime = info.ModTime() + o.mode = info.Mode() + o.fs.objectMetaMu.Unlock() + // On Windows links read as 0 size so set the correct size here + // Optionally, users can turn this feature on with the zero_size_links flag + if (runtime.GOOS == "windows" || o.fs.opt.ZeroSizeLinks) && o.translatedLink { + linkdst, err := os.Readlink(o.path) + if err != nil { + fs.Errorf(o, "Failed to read link size: %v", err) + } else { + o.size = int64(len(linkdst)) + } + } +} + +// Stat an Object into info +func (o *Object) lstat() error { + info, err := o.fs.lstat(o.path) + if err == nil { + o.setMetadata(info) + } + return err +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) error { + return remove(o.path) +} + +func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string { + if runtime.GOOS == "windows" { + if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") { + s2, err := filepath.Abs(s) + if err == nil { + s = s2 + } + } + s = filepath.ToSlash(s) + vol := filepath.VolumeName(s) + s = vol + enc.FromStandardPath(s[len(vol):]) + s = filepath.FromSlash(s) + + if !noUNC { + // Convert to UNC + s = file.UNCPath(s) + } + return s + } + if !filepath.IsAbs(s) { + s2, err := filepath.Abs(s) + if err == nil { + s = s2 + } + } + s = enc.FromStandardPath(s) + return s +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = &Fs{} + _ fs.Purger = &Fs{} + _ fs.PutStreamer = &Fs{} + _ fs.Mover = &Fs{} + _ fs.DirMover = &Fs{} + _ fs.Commander = &Fs{} + _ fs.OpenWriterAter = &Fs{} + _ fs.Object = &Object{} +) diff --git a/vendor/github.com/rclone/rclone/backend/local/read_device_other.go b/vendor/github.com/rclone/rclone/backend/local/read_device_other.go new file mode 100644 index 00000000000..c3fc4f40869 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/read_device_other.go @@ -0,0 +1,13 @@ +// Device reading functions + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package local + +import "os" + +// readDevice turns a valid os.FileInfo into a device number, +// returning devUnset if it fails. +func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 { + return devUnset +} diff --git a/vendor/github.com/rclone/rclone/backend/local/read_device_unix.go b/vendor/github.com/rclone/rclone/backend/local/read_device_unix.go new file mode 100644 index 00000000000..7a1b91b24ef --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/read_device_unix.go @@ -0,0 +1,26 @@ +// Device reading functions + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package local + +import ( + "os" + "syscall" + + "github.com/rclone/rclone/fs" +) + +// readDevice turns a valid os.FileInfo into a device number, +// returning devUnset if it fails. +func readDevice(fi os.FileInfo, oneFileSystem bool) uint64 { + if !oneFileSystem { + return devUnset + } + statT, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + fs.Debugf(fi.Name(), "Type assertion fi.Sys().(*syscall.Stat_t) failed from: %#v", fi.Sys()) + return devUnset + } + return uint64(statT.Dev) // nolint: unconvert +} diff --git a/vendor/github.com/rclone/rclone/backend/local/remove_other.go b/vendor/github.com/rclone/rclone/backend/local/remove_other.go new file mode 100644 index 00000000000..760e2cf3be5 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/remove_other.go @@ -0,0 +1,10 @@ +//+build !windows + +package local + +import "os" + +// Removes name, retrying on a sharing violation +func remove(name string) error { + return os.Remove(name) +} diff --git a/vendor/github.com/rclone/rclone/backend/local/remove_windows.go b/vendor/github.com/rclone/rclone/backend/local/remove_windows.go new file mode 100644 index 00000000000..65acb542804 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/remove_windows.go @@ -0,0 +1,38 @@ +//+build windows + +package local + +import ( + "os" + "syscall" + "time" + + "github.com/rclone/rclone/fs" +) + +const ( + ERROR_SHARING_VIOLATION syscall.Errno = 32 +) + +// Removes name, retrying on a sharing violation +func remove(name string) (err error) { + const maxTries = 10 + var sleepTime = 1 * time.Millisecond + for i := 0; i < maxTries; i++ { + err = os.Remove(name) + if err == nil { + break + } + pathErr, ok := err.(*os.PathError) + if !ok { + break + } + if pathErr.Err != ERROR_SHARING_VIOLATION { + break + } + fs.Logf(name, "Remove detected sharing violation - retry %d/%d sleeping %v", i+1, maxTries, sleepTime) + time.Sleep(sleepTime) + sleepTime <<= 1 + } + return err +} diff --git a/vendor/github.com/rclone/rclone/backend/local/symlink.go b/vendor/github.com/rclone/rclone/backend/local/symlink.go new file mode 100644 index 00000000000..cf49c728204 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/symlink.go @@ -0,0 +1,22 @@ +// +build !windows,!plan9,!js + +package local + +import ( + "os" + "syscall" +) + +// isCircularSymlinkError checks if the current error code is because of a circular symlink +func isCircularSymlinkError(err error) bool { + if err != nil { + if newerr, ok := err.(*os.PathError); ok { + if errcode, ok := newerr.Err.(syscall.Errno); ok { + if errcode == syscall.ELOOP { + return true + } + } + } + } + return false +} diff --git a/vendor/github.com/rclone/rclone/backend/local/symlink_other.go b/vendor/github.com/rclone/rclone/backend/local/symlink_other.go new file mode 100644 index 00000000000..36e01276805 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/local/symlink_other.go @@ -0,0 +1,17 @@ +// +build windows plan9 js + +package local + +import ( + "strings" +) + +// isCircularSymlinkError checks if the current error code is because of a circular symlink +func isCircularSymlinkError(err error) bool { + if err != nil { + if strings.Contains(err.Error(), "The name of the file cannot be resolved by the system") { + return true + } + } + return false +} diff --git a/vendor/github.com/rclone/rclone/backend/s3/s3.go b/vendor/github.com/rclone/rclone/backend/s3/s3.go new file mode 100644 index 00000000000..d51c6ee7e6f --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/s3/s3.go @@ -0,0 +1,3423 @@ +// Package s3 provides an interface to Amazon S3 oject storage +package s3 + +import ( + "bytes" + "context" + "crypto/md5" + "crypto/tls" + "encoding/base64" + "encoding/hex" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "path" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/ncw/swift" + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/operations" + "github.com/rclone/rclone/fs/walk" + "github.com/rclone/rclone/lib/atexit" + "github.com/rclone/rclone/lib/bucket" + "github.com/rclone/rclone/lib/encoder" + "github.com/rclone/rclone/lib/pacer" + "github.com/rclone/rclone/lib/pool" + "github.com/rclone/rclone/lib/readers" + "github.com/rclone/rclone/lib/rest" + "github.com/rclone/rclone/lib/structs" + "golang.org/x/sync/errgroup" +) + +// Register with Fs +func init() { + fs.Register(&fs.RegInfo{ + Name: "s3", + Description: "Amazon S3 Compliant Storage Providers including AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, and Tencent COS", + NewFs: NewFs, + CommandHelp: commandHelp, + Options: []fs.Option{{ + Name: fs.ConfigProvider, + Help: "Choose your S3 provider.", + Examples: []fs.OptionExample{{ + Value: "AWS", + Help: "Amazon Web Services (AWS) S3", + }, { + Value: "Alibaba", + Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun", + }, { + Value: "Ceph", + Help: "Ceph Object Storage", + }, { + Value: "DigitalOcean", + Help: "Digital Ocean Spaces", + }, { + Value: "Dreamhost", + Help: "Dreamhost DreamObjects", + }, { + Value: "IBMCOS", + Help: "IBM COS S3", + }, { + Value: "Minio", + Help: "Minio Object Storage", + }, { + Value: "Netease", + Help: "Netease Object Storage (NOS)", + }, { + Value: "Scaleway", + Help: "Scaleway Object Storage", + }, { + Value: "StackPath", + Help: "StackPath Object Storage", + }, { + Value: "TencentCOS", + Help: "Tencent Cloud Object Storage (COS)", + }, { + Value: "Wasabi", + Help: "Wasabi Object Storage", + }, { + Value: "Other", + Help: "Any other S3 compatible provider", + }}, + }, { + Name: "env_auth", + Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.", + Default: false, + Examples: []fs.OptionExample{{ + Value: "false", + Help: "Enter AWS credentials in the next step", + }, { + Value: "true", + Help: "Get AWS credentials from the environment (env vars or IAM)", + }}, + }, { + Name: "access_key_id", + Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.", + }, { + Name: "secret_access_key", + Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.", + }, { + // References: + // 1. https://docs.aws.amazon.com/general/latest/gr/rande.html + // 2. https://docs.aws.amazon.com/general/latest/gr/s3.html + Name: "region", + Help: "Region to connect to.", + Provider: "AWS", + Examples: []fs.OptionExample{{ + Value: "us-east-1", + Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia, or Pacific Northwest.\nLeave location constraint empty.", + }, { + Value: "us-east-2", + Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.", + }, { + Value: "us-west-1", + Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.", + }, { + Value: "us-west-2", + Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.", + }, { + Value: "ca-central-1", + Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.", + }, { + Value: "eu-west-1", + Help: "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.", + }, { + Value: "eu-west-2", + Help: "EU (London) Region\nNeeds location constraint eu-west-2.", + }, { + Value: "eu-west-3", + Help: "EU (Paris) Region\nNeeds location constraint eu-west-3.", + }, { + Value: "eu-north-1", + Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.", + }, { + Value: "eu-south-1", + Help: "EU (Milan) Region\nNeeds location constraint eu-south-1.", + }, { + Value: "eu-central-1", + Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.", + }, { + Value: "ap-southeast-1", + Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.", + }, { + Value: "ap-southeast-2", + Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.", + }, { + Value: "ap-northeast-1", + Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.", + }, { + Value: "ap-northeast-2", + Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.", + }, { + Value: "ap-northeast-3", + Help: "Asia Pacific (Osaka-Local)\nNeeds location constraint ap-northeast-3.", + }, { + Value: "ap-south-1", + Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.", + }, { + Value: "ap-east-1", + Help: "Asia Pacific (Hong Kong) Region\nNeeds location constraint ap-east-1.", + }, { + Value: "sa-east-1", + Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.", + }, { + Value: "me-south-1", + Help: "Middle East (Bahrain) Region\nNeeds location constraint me-south-1.", + }, { + Value: "af-south-1", + Help: "Africa (Cape Town) Region\nNeeds location constraint af-south-1.", + }, { + Value: "cn-north-1", + Help: "China (Beijing) Region\nNeeds location constraint cn-north-1.", + }, { + Value: "cn-northwest-1", + Help: "China (Ningxia) Region\nNeeds location constraint cn-northwest-1.", + }, { + Value: "us-gov-east-1", + Help: "AWS GovCloud (US-East) Region\nNeeds location constraint us-gov-east-1.", + }, { + Value: "us-gov-west-1", + Help: "AWS GovCloud (US) Region\nNeeds location constraint us-gov-west-1.", + }}, + }, { + Name: "region", + Help: "Region to connect to.", + Provider: "Scaleway", + Examples: []fs.OptionExample{{ + Value: "nl-ams", + Help: "Amsterdam, The Netherlands", + }, { + Value: "fr-par", + Help: "Paris, France", + }}, + }, { + Name: "region", + Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.", + Provider: "!AWS,Alibaba,Scaleway,TencentCOS", + Examples: []fs.OptionExample{{ + Value: "", + Help: "Use this if unsure. Will use v4 signatures and an empty region.", + }, { + Value: "other-v2-signature", + Help: "Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.", + }}, + }, { + Name: "endpoint", + Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.", + Provider: "AWS", + }, { + Name: "endpoint", + Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.", + Provider: "IBMCOS", + Examples: []fs.OptionExample{{ + Value: "s3.us.cloud-object-storage.appdomain.cloud", + Help: "US Cross Region Endpoint", + }, { + Value: "s3.dal.us.cloud-object-storage.appdomain.cloud", + Help: "US Cross Region Dallas Endpoint", + }, { + Value: "s3.wdc.us.cloud-object-storage.appdomain.cloud", + Help: "US Cross Region Washington DC Endpoint", + }, { + Value: "s3.sjc.us.cloud-object-storage.appdomain.cloud", + Help: "US Cross Region San Jose Endpoint", + }, { + Value: "s3.private.us.cloud-object-storage.appdomain.cloud", + Help: "US Cross Region Private Endpoint", + }, { + Value: "s3.private.dal.us.cloud-object-storage.appdomain.cloud", + Help: "US Cross Region Dallas Private Endpoint", + }, { + Value: "s3.private.wdc.us.cloud-object-storage.appdomain.cloud", + Help: "US Cross Region Washington DC Private Endpoint", + }, { + Value: "s3.private.sjc.us.cloud-object-storage.appdomain.cloud", + Help: "US Cross Region San Jose Private Endpoint", + }, { + Value: "s3.us-east.cloud-object-storage.appdomain.cloud", + Help: "US Region East Endpoint", + }, { + Value: "s3.private.us-east.cloud-object-storage.appdomain.cloud", + Help: "US Region East Private Endpoint", + }, { + Value: "s3.us-south.cloud-object-storage.appdomain.cloud", + Help: "US Region South Endpoint", + }, { + Value: "s3.private.us-south.cloud-object-storage.appdomain.cloud", + Help: "US Region South Private Endpoint", + }, { + Value: "s3.eu.cloud-object-storage.appdomain.cloud", + Help: "EU Cross Region Endpoint", + }, { + Value: "s3.fra.eu.cloud-object-storage.appdomain.cloud", + Help: "EU Cross Region Frankfurt Endpoint", + }, { + Value: "s3.mil.eu.cloud-object-storage.appdomain.cloud", + Help: "EU Cross Region Milan Endpoint", + }, { + Value: "s3.ams.eu.cloud-object-storage.appdomain.cloud", + Help: "EU Cross Region Amsterdam Endpoint", + }, { + Value: "s3.private.eu.cloud-object-storage.appdomain.cloud", + Help: "EU Cross Region Private Endpoint", + }, { + Value: "s3.private.fra.eu.cloud-object-storage.appdomain.cloud", + Help: "EU Cross Region Frankfurt Private Endpoint", + }, { + Value: "s3.private.mil.eu.cloud-object-storage.appdomain.cloud", + Help: "EU Cross Region Milan Private Endpoint", + }, { + Value: "s3.private.ams.eu.cloud-object-storage.appdomain.cloud", + Help: "EU Cross Region Amsterdam Private Endpoint", + }, { + Value: "s3.eu-gb.cloud-object-storage.appdomain.cloud", + Help: "Great Britain Endpoint", + }, { + Value: "s3.private.eu-gb.cloud-object-storage.appdomain.cloud", + Help: "Great Britain Private Endpoint", + }, { + Value: "s3.eu-de.cloud-object-storage.appdomain.cloud", + Help: "EU Region DE Endpoint", + }, { + Value: "s3.private.eu-de.cloud-object-storage.appdomain.cloud", + Help: "EU Region DE Private Endpoint", + }, { + Value: "s3.ap.cloud-object-storage.appdomain.cloud", + Help: "APAC Cross Regional Endpoint", + }, { + Value: "s3.tok.ap.cloud-object-storage.appdomain.cloud", + Help: "APAC Cross Regional Tokyo Endpoint", + }, { + Value: "s3.hkg.ap.cloud-object-storage.appdomain.cloud", + Help: "APAC Cross Regional HongKong Endpoint", + }, { + Value: "s3.seo.ap.cloud-object-storage.appdomain.cloud", + Help: "APAC Cross Regional Seoul Endpoint", + }, { + Value: "s3.private.ap.cloud-object-storage.appdomain.cloud", + Help: "APAC Cross Regional Private Endpoint", + }, { + Value: "s3.private.tok.ap.cloud-object-storage.appdomain.cloud", + Help: "APAC Cross Regional Tokyo Private Endpoint", + }, { + Value: "s3.private.hkg.ap.cloud-object-storage.appdomain.cloud", + Help: "APAC Cross Regional HongKong Private Endpoint", + }, { + Value: "s3.private.seo.ap.cloud-object-storage.appdomain.cloud", + Help: "APAC Cross Regional Seoul Private Endpoint", + }, { + Value: "s3.jp-tok.cloud-object-storage.appdomain.cloud", + Help: "APAC Region Japan Endpoint", + }, { + Value: "s3.private.jp-tok.cloud-object-storage.appdomain.cloud", + Help: "APAC Region Japan Private Endpoint", + }, { + Value: "s3.au-syd.cloud-object-storage.appdomain.cloud", + Help: "APAC Region Australia Endpoint", + }, { + Value: "s3.private.au-syd.cloud-object-storage.appdomain.cloud", + Help: "APAC Region Australia Private Endpoint", + }, { + Value: "s3.ams03.cloud-object-storage.appdomain.cloud", + Help: "Amsterdam Single Site Endpoint", + }, { + Value: "s3.private.ams03.cloud-object-storage.appdomain.cloud", + Help: "Amsterdam Single Site Private Endpoint", + }, { + Value: "s3.che01.cloud-object-storage.appdomain.cloud", + Help: "Chennai Single Site Endpoint", + }, { + Value: "s3.private.che01.cloud-object-storage.appdomain.cloud", + Help: "Chennai Single Site Private Endpoint", + }, { + Value: "s3.mel01.cloud-object-storage.appdomain.cloud", + Help: "Melbourne Single Site Endpoint", + }, { + Value: "s3.private.mel01.cloud-object-storage.appdomain.cloud", + Help: "Melbourne Single Site Private Endpoint", + }, { + Value: "s3.osl01.cloud-object-storage.appdomain.cloud", + Help: "Oslo Single Site Endpoint", + }, { + Value: "s3.private.osl01.cloud-object-storage.appdomain.cloud", + Help: "Oslo Single Site Private Endpoint", + }, { + Value: "s3.tor01.cloud-object-storage.appdomain.cloud", + Help: "Toronto Single Site Endpoint", + }, { + Value: "s3.private.tor01.cloud-object-storage.appdomain.cloud", + Help: "Toronto Single Site Private Endpoint", + }, { + Value: "s3.seo01.cloud-object-storage.appdomain.cloud", + Help: "Seoul Single Site Endpoint", + }, { + Value: "s3.private.seo01.cloud-object-storage.appdomain.cloud", + Help: "Seoul Single Site Private Endpoint", + }, { + Value: "s3.mon01.cloud-object-storage.appdomain.cloud", + Help: "Montreal Single Site Endpoint", + }, { + Value: "s3.private.mon01.cloud-object-storage.appdomain.cloud", + Help: "Montreal Single Site Private Endpoint", + }, { + Value: "s3.mex01.cloud-object-storage.appdomain.cloud", + Help: "Mexico Single Site Endpoint", + }, { + Value: "s3.private.mex01.cloud-object-storage.appdomain.cloud", + Help: "Mexico Single Site Private Endpoint", + }, { + Value: "s3.sjc04.cloud-object-storage.appdomain.cloud", + Help: "San Jose Single Site Endpoint", + }, { + Value: "s3.private.sjc04.cloud-object-storage.appdomain.cloud", + Help: "San Jose Single Site Private Endpoint", + }, { + Value: "s3.mil01.cloud-object-storage.appdomain.cloud", + Help: "Milan Single Site Endpoint", + }, { + Value: "s3.private.mil01.cloud-object-storage.appdomain.cloud", + Help: "Milan Single Site Private Endpoint", + }, { + Value: "s3.hkg02.cloud-object-storage.appdomain.cloud", + Help: "Hong Kong Single Site Endpoint", + }, { + Value: "s3.private.hkg02.cloud-object-storage.appdomain.cloud", + Help: "Hong Kong Single Site Private Endpoint", + }, { + Value: "s3.par01.cloud-object-storage.appdomain.cloud", + Help: "Paris Single Site Endpoint", + }, { + Value: "s3.private.par01.cloud-object-storage.appdomain.cloud", + Help: "Paris Single Site Private Endpoint", + }, { + Value: "s3.sng01.cloud-object-storage.appdomain.cloud", + Help: "Singapore Single Site Endpoint", + }, { + Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud", + Help: "Singapore Single Site Private Endpoint", + }}, + }, { + // oss endpoints: https://help.aliyun.com/document_detail/31837.html + Name: "endpoint", + Help: "Endpoint for OSS API.", + Provider: "Alibaba", + Examples: []fs.OptionExample{{ + Value: "oss-cn-hangzhou.aliyuncs.com", + Help: "East China 1 (Hangzhou)", + }, { + Value: "oss-cn-shanghai.aliyuncs.com", + Help: "East China 2 (Shanghai)", + }, { + Value: "oss-cn-qingdao.aliyuncs.com", + Help: "North China 1 (Qingdao)", + }, { + Value: "oss-cn-beijing.aliyuncs.com", + Help: "North China 2 (Beijing)", + }, { + Value: "oss-cn-zhangjiakou.aliyuncs.com", + Help: "North China 3 (Zhangjiakou)", + }, { + Value: "oss-cn-huhehaote.aliyuncs.com", + Help: "North China 5 (Huhehaote)", + }, { + Value: "oss-cn-shenzhen.aliyuncs.com", + Help: "South China 1 (Shenzhen)", + }, { + Value: "oss-cn-hongkong.aliyuncs.com", + Help: "Hong Kong (Hong Kong)", + }, { + Value: "oss-us-west-1.aliyuncs.com", + Help: "US West 1 (Silicon Valley)", + }, { + Value: "oss-us-east-1.aliyuncs.com", + Help: "US East 1 (Virginia)", + }, { + Value: "oss-ap-southeast-1.aliyuncs.com", + Help: "Southeast Asia Southeast 1 (Singapore)", + }, { + Value: "oss-ap-southeast-2.aliyuncs.com", + Help: "Asia Pacific Southeast 2 (Sydney)", + }, { + Value: "oss-ap-southeast-3.aliyuncs.com", + Help: "Southeast Asia Southeast 3 (Kuala Lumpur)", + }, { + Value: "oss-ap-southeast-5.aliyuncs.com", + Help: "Asia Pacific Southeast 5 (Jakarta)", + }, { + Value: "oss-ap-northeast-1.aliyuncs.com", + Help: "Asia Pacific Northeast 1 (Japan)", + }, { + Value: "oss-ap-south-1.aliyuncs.com", + Help: "Asia Pacific South 1 (Mumbai)", + }, { + Value: "oss-eu-central-1.aliyuncs.com", + Help: "Central Europe 1 (Frankfurt)", + }, { + Value: "oss-eu-west-1.aliyuncs.com", + Help: "West Europe (London)", + }, { + Value: "oss-me-east-1.aliyuncs.com", + Help: "Middle East 1 (Dubai)", + }}, + }, { + Name: "endpoint", + Help: "Endpoint for Scaleway Object Storage.", + Provider: "Scaleway", + Examples: []fs.OptionExample{{ + Value: "s3.nl-ams.scw.cloud", + Help: "Amsterdam Endpoint", + }, { + Value: "s3.fr-par.scw.cloud", + Help: "Paris Endpoint", + }}, + }, { + Name: "endpoint", + Help: "Endpoint for StackPath Object Storage.", + Provider: "StackPath", + Examples: []fs.OptionExample{{ + Value: "s3.us-east-2.stackpathstorage.com", + Help: "US East Endpoint", + }, { + Value: "s3.us-west-1.stackpathstorage.com", + Help: "US West Endpoint", + }, { + Value: "s3.eu-central-1.stackpathstorage.com", + Help: "EU Endpoint", + }}, + }, { + // cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224 + Name: "endpoint", + Help: "Endpoint for Tencent COS API.", + Provider: "TencentCOS", + Examples: []fs.OptionExample{{ + Value: "cos.ap-beijing.myqcloud.com", + Help: "Beijing Region.", + }, { + Value: "cos.ap-nanjing.myqcloud.com", + Help: "Nanjing Region.", + }, { + Value: "cos.ap-shanghai.myqcloud.com", + Help: "Shanghai Region.", + }, { + Value: "cos.ap-guangzhou.myqcloud.com", + Help: "Guangzhou Region.", + }, { + Value: "cos.ap-nanjing.myqcloud.com", + Help: "Nanjing Region.", + }, { + Value: "cos.ap-chengdu.myqcloud.com", + Help: "Chengdu Region.", + }, { + Value: "cos.ap-chongqing.myqcloud.com", + Help: "Chongqing Region.", + }, { + Value: "cos.ap-hongkong.myqcloud.com", + Help: "Hong Kong (China) Region.", + }, { + Value: "cos.ap-singapore.myqcloud.com", + Help: "Singapore Region.", + }, { + Value: "cos.ap-mumbai.myqcloud.com", + Help: "Mumbai Region.", + }, { + Value: "cos.ap-seoul.myqcloud.com", + Help: "Seoul Region.", + }, { + Value: "cos.ap-bangkok.myqcloud.com", + Help: "Bangkok Region.", + }, { + Value: "cos.ap-tokyo.myqcloud.com", + Help: "Tokyo Region.", + }, { + Value: "cos.na-siliconvalley.myqcloud.com", + Help: "Silicon Valley Region.", + }, { + Value: "cos.na-ashburn.myqcloud.com", + Help: "Virginia Region.", + }, { + Value: "cos.na-toronto.myqcloud.com", + Help: "Toronto Region.", + }, { + Value: "cos.eu-frankfurt.myqcloud.com", + Help: "Frankfurt Region.", + }, { + Value: "cos.eu-moscow.myqcloud.com", + Help: "Moscow Region.", + }, { + Value: "cos.accelerate.myqcloud.com", + Help: "Use Tencent COS Accelerate Endpoint.", + }}, + }, { + Name: "endpoint", + Help: "Endpoint for S3 API.\nRequired when using an S3 clone.", + Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath", + Examples: []fs.OptionExample{{ + Value: "objects-us-east-1.dream.io", + Help: "Dream Objects endpoint", + Provider: "Dreamhost", + }, { + Value: "nyc3.digitaloceanspaces.com", + Help: "Digital Ocean Spaces New York 3", + Provider: "DigitalOcean", + }, { + Value: "ams3.digitaloceanspaces.com", + Help: "Digital Ocean Spaces Amsterdam 3", + Provider: "DigitalOcean", + }, { + Value: "sgp1.digitaloceanspaces.com", + Help: "Digital Ocean Spaces Singapore 1", + Provider: "DigitalOcean", + }, { + Value: "s3.wasabisys.com", + Help: "Wasabi US East endpoint", + Provider: "Wasabi", + }, { + Value: "s3.us-west-1.wasabisys.com", + Help: "Wasabi US West endpoint", + Provider: "Wasabi", + }, { + Value: "s3.eu-central-1.wasabisys.com", + Help: "Wasabi EU Central endpoint", + Provider: "Wasabi", + }}, + }, { + Name: "location_constraint", + Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.", + Provider: "AWS", + Examples: []fs.OptionExample{{ + Value: "", + Help: "Empty for US Region, Northern Virginia, or Pacific Northwest.", + }, { + Value: "us-east-2", + Help: "US East (Ohio) Region.", + }, { + Value: "us-west-1", + Help: "US West (Northern California) Region.", + }, { + Value: "us-west-2", + Help: "US West (Oregon) Region.", + }, { + Value: "ca-central-1", + Help: "Canada (Central) Region.", + }, { + Value: "eu-west-1", + Help: "EU (Ireland) Region.", + }, { + Value: "eu-west-2", + Help: "EU (London) Region.", + }, { + Value: "eu-west-3", + Help: "EU (Paris) Region.", + }, { + Value: "eu-north-1", + Help: "EU (Stockholm) Region.", + }, { + Value: "eu-south-1", + Help: "EU (Milan) Region.", + }, { + Value: "EU", + Help: "EU Region.", + }, { + Value: "ap-southeast-1", + Help: "Asia Pacific (Singapore) Region.", + }, { + Value: "ap-southeast-2", + Help: "Asia Pacific (Sydney) Region.", + }, { + Value: "ap-northeast-1", + Help: "Asia Pacific (Tokyo) Region.", + }, { + Value: "ap-northeast-2", + Help: "Asia Pacific (Seoul) Region.", + }, { + Value: "ap-northeast-3", + Help: "Asia Pacific (Osaka-Local) Region.", + }, { + Value: "ap-south-1", + Help: "Asia Pacific (Mumbai) Region.", + }, { + Value: "ap-east-1", + Help: "Asia Pacific (Hong Kong) Region.", + }, { + Value: "sa-east-1", + Help: "South America (Sao Paulo) Region.", + }, { + Value: "me-south-1", + Help: "Middle East (Bahrain) Region.", + }, { + Value: "af-south-1", + Help: "Africa (Cape Town) Region.", + }, { + Value: "cn-north-1", + Help: "China (Beijing) Region", + }, { + Value: "cn-northwest-1", + Help: "China (Ningxia) Region.", + }, { + Value: "us-gov-east-1", + Help: "AWS GovCloud (US-East) Region.", + }, { + Value: "us-gov-west-1", + Help: "AWS GovCloud (US) Region.", + }}, + }, { + Name: "location_constraint", + Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter", + Provider: "IBMCOS", + Examples: []fs.OptionExample{{ + Value: "us-standard", + Help: "US Cross Region Standard", + }, { + Value: "us-vault", + Help: "US Cross Region Vault", + }, { + Value: "us-cold", + Help: "US Cross Region Cold", + }, { + Value: "us-flex", + Help: "US Cross Region Flex", + }, { + Value: "us-east-standard", + Help: "US East Region Standard", + }, { + Value: "us-east-vault", + Help: "US East Region Vault", + }, { + Value: "us-east-cold", + Help: "US East Region Cold", + }, { + Value: "us-east-flex", + Help: "US East Region Flex", + }, { + Value: "us-south-standard", + Help: "US South Region Standard", + }, { + Value: "us-south-vault", + Help: "US South Region Vault", + }, { + Value: "us-south-cold", + Help: "US South Region Cold", + }, { + Value: "us-south-flex", + Help: "US South Region Flex", + }, { + Value: "eu-standard", + Help: "EU Cross Region Standard", + }, { + Value: "eu-vault", + Help: "EU Cross Region Vault", + }, { + Value: "eu-cold", + Help: "EU Cross Region Cold", + }, { + Value: "eu-flex", + Help: "EU Cross Region Flex", + }, { + Value: "eu-gb-standard", + Help: "Great Britain Standard", + }, { + Value: "eu-gb-vault", + Help: "Great Britain Vault", + }, { + Value: "eu-gb-cold", + Help: "Great Britain Cold", + }, { + Value: "eu-gb-flex", + Help: "Great Britain Flex", + }, { + Value: "ap-standard", + Help: "APAC Standard", + }, { + Value: "ap-vault", + Help: "APAC Vault", + }, { + Value: "ap-cold", + Help: "APAC Cold", + }, { + Value: "ap-flex", + Help: "APAC Flex", + }, { + Value: "mel01-standard", + Help: "Melbourne Standard", + }, { + Value: "mel01-vault", + Help: "Melbourne Vault", + }, { + Value: "mel01-cold", + Help: "Melbourne Cold", + }, { + Value: "mel01-flex", + Help: "Melbourne Flex", + }, { + Value: "tor01-standard", + Help: "Toronto Standard", + }, { + Value: "tor01-vault", + Help: "Toronto Vault", + }, { + Value: "tor01-cold", + Help: "Toronto Cold", + }, { + Value: "tor01-flex", + Help: "Toronto Flex", + }}, + }, { + Name: "location_constraint", + Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.", + Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS", + }, { + Name: "acl", + Help: `Canned ACL used when creating buckets and storing or copying objects. + +This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + +For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + +Note that this ACL is applied when server-side copying objects as S3 +doesn't copy the ACL from the source but rather writes a fresh one.`, + Examples: []fs.OptionExample{{ + Value: "default", + Help: "Owner gets Full_CONTROL. No one else has access rights (default).", + Provider: "TencentCOS", + }, { + Value: "private", + Help: "Owner gets FULL_CONTROL. No one else has access rights (default).", + Provider: "!IBMCOS,TencentCOS", + }, { + Value: "public-read", + Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.", + Provider: "!IBMCOS", + }, { + Value: "public-read-write", + Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.", + Provider: "!IBMCOS", + }, { + Value: "authenticated-read", + Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.", + Provider: "!IBMCOS", + }, { + Value: "bucket-owner-read", + Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.", + Provider: "!IBMCOS", + }, { + Value: "bucket-owner-full-control", + Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.", + Provider: "!IBMCOS", + }, { + Value: "private", + Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS", + Provider: "IBMCOS", + }, { + Value: "public-read", + Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS", + Provider: "IBMCOS", + }, { + Value: "public-read-write", + Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS", + Provider: "IBMCOS", + }, { + Value: "authenticated-read", + Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS", + Provider: "IBMCOS", + }}, + }, { + Name: "bucket_acl", + Help: `Canned ACL used when creating buckets. + +For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + +Note that this ACL is applied when only when creating buckets. If it +isn't set then "acl" is used instead.`, + Advanced: true, + Examples: []fs.OptionExample{{ + Value: "private", + Help: "Owner gets FULL_CONTROL. No one else has access rights (default).", + }, { + Value: "public-read", + Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.", + }, { + Value: "public-read-write", + Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.", + }, { + Value: "authenticated-read", + Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.", + }}, + }, { + Name: "requester_pays", + Help: "Enables requester pays option when interacting with S3 bucket.", + Provider: "AWS", + Default: false, + Advanced: true, + }, { + Name: "server_side_encryption", + Help: "The server-side encryption algorithm used when storing this object in S3.", + Provider: "AWS,Ceph,Minio", + Examples: []fs.OptionExample{{ + Value: "", + Help: "None", + }, { + Value: "AES256", + Help: "AES256", + }, { + Value: "aws:kms", + Help: "aws:kms", + }}, + }, { + Name: "sse_customer_algorithm", + Help: "If using SSE-C, the server-side encryption algorithm used when storing this object in S3.", + Provider: "AWS,Ceph,Minio", + Advanced: true, + Examples: []fs.OptionExample{{ + Value: "", + Help: "None", + }, { + Value: "AES256", + Help: "AES256", + }}, + }, { + Name: "sse_kms_key_id", + Help: "If using KMS ID you must provide the ARN of Key.", + Provider: "AWS,Ceph,Minio", + Examples: []fs.OptionExample{{ + Value: "", + Help: "None", + }, { + Value: "arn:aws:kms:us-east-1:*", + Help: "arn:aws:kms:*", + }}, + }, { + Name: "sse_customer_key", + Help: "If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data.", + Provider: "AWS,Ceph,Minio", + Advanced: true, + Examples: []fs.OptionExample{{ + Value: "", + Help: "None", + }}, + }, { + Name: "sse_customer_key_md5", + Help: `If using SSE-C you may provide the secret encryption key MD5 checksum (optional). + +If you leave it blank, this is calculated automatically from the sse_customer_key provided. +`, + Provider: "AWS,Ceph,Minio", + Advanced: true, + Examples: []fs.OptionExample{{ + Value: "", + Help: "None", + }}, + }, { + Name: "storage_class", + Help: "The storage class to use when storing new objects in S3.", + Provider: "AWS", + Examples: []fs.OptionExample{{ + Value: "", + Help: "Default", + }, { + Value: "STANDARD", + Help: "Standard storage class", + }, { + Value: "REDUCED_REDUNDANCY", + Help: "Reduced redundancy storage class", + }, { + Value: "STANDARD_IA", + Help: "Standard Infrequent Access storage class", + }, { + Value: "ONEZONE_IA", + Help: "One Zone Infrequent Access storage class", + }, { + Value: "GLACIER", + Help: "Glacier storage class", + }, { + Value: "DEEP_ARCHIVE", + Help: "Glacier Deep Archive storage class", + }, { + Value: "INTELLIGENT_TIERING", + Help: "Intelligent-Tiering storage class", + }}, + }, { + // Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm + Name: "storage_class", + Help: "The storage class to use when storing new objects in OSS.", + Provider: "Alibaba", + Examples: []fs.OptionExample{{ + Value: "", + Help: "Default", + }, { + Value: "STANDARD", + Help: "Standard storage class", + }, { + Value: "GLACIER", + Help: "Archive storage mode.", + }, { + Value: "STANDARD_IA", + Help: "Infrequent access storage mode.", + }}, + }, { + // Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925 + Name: "storage_class", + Help: "The storage class to use when storing new objects in Tencent COS.", + Provider: "TencentCOS", + Examples: []fs.OptionExample{{ + Value: "", + Help: "Default", + }, { + Value: "STANDARD", + Help: "Standard storage class", + }, { + Value: "ARCHIVE", + Help: "Archive storage mode.", + }, { + Value: "STANDARD_IA", + Help: "Infrequent access storage mode.", + }}, + }, { + // Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes + Name: "storage_class", + Help: "The storage class to use when storing new objects in S3.", + Provider: "Scaleway", + Examples: []fs.OptionExample{{ + Value: "", + Help: "Default", + }, { + Value: "STANDARD", + Help: "The Standard class for any upload; suitable for on-demand content like streaming or CDN.", + }, { + Value: "GLACIER", + Help: "Archived storage; prices are lower, but it needs to be restored first to be accessed.", + }}, + }, { + Name: "upload_cutoff", + Help: `Cutoff for switching to chunked upload + +Any files larger than this will be uploaded in chunks of chunk_size. +The minimum is 0 and the maximum is 5GB.`, + Default: defaultUploadCutoff, + Advanced: true, + }, { + Name: "chunk_size", + Help: `Chunk size to use for uploading. + +When uploading files larger than upload_cutoff or files with unknown +size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google +photos or google docs) they will be uploaded as multipart uploads +using this chunk size. + +Note that "--s3-upload-concurrency" chunks of this size are buffered +in memory per transfer. + +If you are transferring large files over high-speed links and you have +enough memory, then increasing this will speed up the transfers. + +Rclone will automatically increase the chunk size when uploading a +large file of known size to stay below the 10,000 chunks limit. + +Files of unknown size are uploaded with the configured +chunk_size. Since the default chunk size is 5MB and there can be at +most 10,000 chunks, this means that by default the maximum size of +a file you can stream upload is 48GB. If you wish to stream upload +larger files then you will need to increase chunk_size.`, + Default: minChunkSize, + Advanced: true, + }, { + Name: "max_upload_parts", + Help: `Maximum number of parts in a multipart upload. + +This option defines the maximum number of multipart chunks to use +when doing a multipart upload. + +This can be useful if a service does not support the AWS S3 +specification of 10,000 chunks. + +Rclone will automatically increase the chunk size when uploading a +large file of a known size to stay below this number of chunks limit. +`, + Default: maxUploadParts, + Advanced: true, + }, { + Name: "copy_cutoff", + Help: `Cutoff for switching to multipart copy + +Any files larger than this that need to be server-side copied will be +copied in chunks of this size. + +The minimum is 0 and the maximum is 5GB.`, + Default: fs.SizeSuffix(maxSizeForCopy), + Advanced: true, + }, { + Name: "disable_checksum", + Help: `Don't store MD5 checksum with object metadata + +Normally rclone will calculate the MD5 checksum of the input before +uploading it so it can add it to metadata on the object. This is great +for data integrity checking but can cause long delays for large files +to start uploading.`, + Default: false, + Advanced: true, + }, { + Name: "shared_credentials_file", + Help: `Path to the shared credentials file + +If env_auth = true then rclone can use a shared credentials file. + +If this variable is empty rclone will look for the +"AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty +it will default to the current user's home directory. + + Linux/OSX: "$HOME/.aws/credentials" + Windows: "%USERPROFILE%\.aws\credentials" +`, + Advanced: true, + }, { + Name: "profile", + Help: `Profile to use in the shared credentials file + +If env_auth = true then rclone can use a shared credentials file. This +variable controls which profile is used in that file. + +If empty it will default to the environment variable "AWS_PROFILE" or +"default" if that environment variable is also not set. +`, + Advanced: true, + }, { + Name: "session_token", + Help: "An AWS session token", + Advanced: true, + }, { + Name: "upload_concurrency", + Help: `Concurrency for multipart uploads. + +This is the number of chunks of the same file that are uploaded +concurrently. + +If you are uploading small numbers of large files over high-speed links +and these uploads do not fully utilize your bandwidth, then increasing +this may help to speed up the transfers.`, + Default: 4, + Advanced: true, + }, { + Name: "force_path_style", + Help: `If true use path style access if false use virtual hosted style. + +If this is true (the default) then rclone will use path style access, +if false then rclone will use virtual path style. See [the AWS S3 +docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) +for more info. + +Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to +false - rclone will do this automatically based on the provider +setting.`, + Default: true, + Advanced: true, + }, { + Name: "v2_auth", + Help: `If true use v2 authentication. + +If this is false (the default) then rclone will use v4 authentication. +If it is set then rclone will use v2 authentication. + +Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH.`, + Default: false, + Advanced: true, + }, { + Name: "use_accelerate_endpoint", + Provider: "AWS", + Help: `If true use the AWS S3 accelerated endpoint. + +See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html)`, + Default: false, + Advanced: true, + }, { + Name: "leave_parts_on_error", + Provider: "AWS", + Help: `If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + +It should be set to true for resuming uploads across different sessions. + +WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up. +`, + Default: false, + Advanced: true, + }, { + Name: "list_chunk", + Help: `Size of listing chunk (response list for each ListObject S3 request). + +This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. +Most services truncate the response list to 1000 objects even if requested more than that. +In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). +In Ceph, this can be increased with the "rgw list buckets max chunk" option. +`, + Default: 1000, + Advanced: true, + }, { + Name: "no_check_bucket", + Help: `If set, don't attempt to check the bucket exists or create it + +This can be useful when trying to minimise the number of transactions +rclone does if you know the bucket exists already. + +It can also be needed if the user you are using does not have bucket +creation permissions. Before v1.52.0 this would have passed silently +due to a bug. +`, + Default: false, + Advanced: true, + }, { + Name: "no_head", + Help: `If set, don't HEAD uploaded objects to check integrity + +This can be useful when trying to minimise the number of transactions +rclone does. + +Setting it means that if rclone receives a 200 OK message after +uploading an object with PUT then it will assume that it got uploaded +properly. + +In particular it will assume: + +- the metadata, including modtime, storage class and content type was as uploaded +- the size was as uploaded + +It reads the following items from the response for a single part PUT: + +- the MD5SUM +- The uploaded date + +For multipart uploads these items aren't read. + +If an source object of unknown length is uploaded then rclone **will** do a +HEAD request. + +Setting this flag increases the chance for undetected upload failures, +in particular an incorrect size, so it isn't recommended for normal +operation. In practice the chance of an undetected upload failure is +very small even with this flag. +`, + Default: false, + Advanced: true, + }, { + Name: config.ConfigEncoding, + Help: config.ConfigEncodingHelp, + Advanced: true, + // Any UTF-8 character is valid in a key, however it can't handle + // invalid UTF-8 and / have a special meaning. + // + // The SDK can't seem to handle uploading files called '.' + // + // FIXME would be nice to add + // - initial / encoding + // - doubled / encoding + // - trailing / encoding + // so that AWS keys are always valid file names + Default: encoder.EncodeInvalidUtf8 | + encoder.EncodeSlash | + encoder.EncodeDot, + }, { + Name: "memory_pool_flush_time", + Default: memoryPoolFlushTime, + Advanced: true, + Help: `How often internal memory buffer pools will be flushed. +Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. +This option controls how often unused buffers will be removed from the pool.`, + }, { + Name: "memory_pool_use_mmap", + Default: memoryPoolUseMmap, + Advanced: true, + Help: `Whether to use mmap buffers in internal memory pool.`, + }, { + Name: "disable_http2", + Default: false, + Advanced: true, + Help: `Disable usage of http2 for S3 backends + +There is currently an unsolved issue with the s3 (specifically minio) backend +and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be +disabled here. When the issue is solved this flag will be removed. + +See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + +`, + }, + }}) +} + +// Constants +const ( + metaMtime = "Mtime" // the meta key to store mtime in - e.g. X-Amz-Meta-Mtime + metaMD5Hash = "Md5chksum" // the meta key to store md5hash in + // The maximum size of object we can COPY - this should be 5GiB but is < 5GB for b2 compatibility + // See https://forum.rclone.org/t/copying-files-within-a-b2-bucket/16680/76 + maxSizeForCopy = 4768 * 1024 * 1024 + maxUploadParts = 10000 // maximum allowed number of parts in a multi-part upload + minChunkSize = fs.SizeSuffix(1024 * 1024 * 5) + defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024) + maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024) + minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep. + + memoryPoolFlushTime = fs.Duration(time.Minute) // flush the cached buffers after this long + memoryPoolUseMmap = false + maxExpireDuration = fs.Duration(7 * 24 * time.Hour) // max expiry is 1 week +) + +// Options defines the configuration for this backend +type Options struct { + Provider string `config:"provider"` + EnvAuth bool `config:"env_auth"` + AccessKeyID string `config:"access_key_id"` + SecretAccessKey string `config:"secret_access_key"` + Region string `config:"region"` + Endpoint string `config:"endpoint"` + LocationConstraint string `config:"location_constraint"` + ACL string `config:"acl"` + BucketACL string `config:"bucket_acl"` + RequesterPays bool `config:"requester_pays"` + ServerSideEncryption string `config:"server_side_encryption"` + SSEKMSKeyID string `config:"sse_kms_key_id"` + SSECustomerAlgorithm string `config:"sse_customer_algorithm"` + SSECustomerKey string `config:"sse_customer_key"` + SSECustomerKeyMD5 string `config:"sse_customer_key_md5"` + StorageClass string `config:"storage_class"` + UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` + CopyCutoff fs.SizeSuffix `config:"copy_cutoff"` + ChunkSize fs.SizeSuffix `config:"chunk_size"` + MaxUploadParts int64 `config:"max_upload_parts"` + DisableChecksum bool `config:"disable_checksum"` + SharedCredentialsFile string `config:"shared_credentials_file"` + Profile string `config:"profile"` + SessionToken string `config:"session_token"` + UploadConcurrency int `config:"upload_concurrency"` + ForcePathStyle bool `config:"force_path_style"` + V2Auth bool `config:"v2_auth"` + UseAccelerateEndpoint bool `config:"use_accelerate_endpoint"` + LeavePartsOnError bool `config:"leave_parts_on_error"` + ListChunk int64 `config:"list_chunk"` + NoCheckBucket bool `config:"no_check_bucket"` + NoHead bool `config:"no_head"` + Enc encoder.MultiEncoder `config:"encoding"` + MemoryPoolFlushTime fs.Duration `config:"memory_pool_flush_time"` + MemoryPoolUseMmap bool `config:"memory_pool_use_mmap"` + DisableHTTP2 bool `config:"disable_http2"` +} + +// Fs represents a remote s3 server +type Fs struct { + name string // the name of the remote + root string // root of the bucket - ignore all objects above this + opt Options // parsed options + ci *fs.ConfigInfo // global config + ctx context.Context // global context for reading config + features *fs.Features // optional features + c *s3.S3 // the connection to the s3 server + ses *session.Session // the s3 session + rootBucket string // bucket part of root (if any) + rootDirectory string // directory part of root (if any) + cache *bucket.Cache // cache for bucket creation status + pacer *fs.Pacer // To pace the API calls + srv *http.Client // a plain http client + pool *pool.Pool // memory pool + etagIsNotMD5 bool // if set ETags are not MD5s +} + +// Object describes a s3 object +type Object struct { + // Will definitely have everything but meta which may be nil + // + // List will read everything but meta & mimeType - to fill + // that in you need to call readMetaData + fs *Fs // what this object is part of + remote string // The remote path + md5 string // md5sum of the object + bytes int64 // size of the object + lastModified time.Time // Last modified + meta map[string]*string // The object metadata if known - may be nil + mimeType string // MimeType of object - may be "" + storageClass string // e.g. GLACIER +} + +// ------------------------------------------------------------ + +// Name of the remote (as passed into NewFs) +func (f *Fs) Name() string { + return f.name +} + +// Root of the remote (as passed into NewFs) +func (f *Fs) Root() string { + return f.root +} + +// String converts this Fs to a string +func (f *Fs) String() string { + if f.rootBucket == "" { + return fmt.Sprintf("S3 root") + } + if f.rootDirectory == "" { + return fmt.Sprintf("S3 bucket %s", f.rootBucket) + } + return fmt.Sprintf("S3 bucket %s path %s", f.rootBucket, f.rootDirectory) +} + +// Features returns the optional features of this Fs +func (f *Fs) Features() *fs.Features { + return f.features +} + +// retryErrorCodes is a slice of error codes that we will retry +// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var retryErrorCodes = []int{ + 429, // Too Many Requests + 500, // Internal Server Error - "We encountered an internal error. Please try again." + 503, // Service Unavailable/Slow Down - "Reduce your request rate" +} + +//S3 is pretty resilient, and the built in retry handling is probably sufficient +// as it should notice closed connections and timeouts which are the most likely +// sort of failure modes +func (f *Fs) shouldRetry(err error) (bool, error) { + // If this is an awserr object, try and extract more useful information to determine if we should retry + if awsError, ok := err.(awserr.Error); ok { + // Simple case, check the original embedded error in case it's generically retryable + if fserrors.ShouldRetry(awsError.OrigErr()) { + return true, err + } + // Failing that, if it's a RequestFailure it's probably got an http status code we can check + if reqErr, ok := err.(awserr.RequestFailure); ok { + // 301 if wrong region for bucket - can only update if running from a bucket + if f.rootBucket != "" { + if reqErr.StatusCode() == http.StatusMovedPermanently { + urfbErr := f.updateRegionForBucket(f.rootBucket) + if urfbErr != nil { + fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr) + return false, err + } + return true, err + } + } + for _, e := range retryErrorCodes { + if reqErr.StatusCode() == e { + return true, err + } + } + } + } + // Ok, not an awserr, check for generic failure conditions + return fserrors.ShouldRetry(err), err +} + +// parsePath parses a remote 'url' +func parsePath(path string) (root string) { + root = strings.Trim(path, "/") + return +} + +// split returns bucket and bucketPath from the rootRelativePath +// relative to f.root +func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) { + bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath)) + return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath) +} + +// split returns bucket and bucketPath from the object +func (o *Object) split() (bucket, bucketPath string) { + return o.fs.split(o.remote) +} + +// getClient makes an http client according to the options +func getClient(ctx context.Context, opt *Options) *http.Client { + // TODO: Do we need cookies too? + t := fshttp.NewTransportCustom(ctx, func(t *http.Transport) { + if opt.DisableHTTP2 { + t.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{} + } + }) + return &http.Client{ + Transport: t, + } +} + +// s3Connection makes a connection to s3 +func s3Connection(ctx context.Context, opt *Options) (*s3.S3, *session.Session, error) { + // Make the auth + v := credentials.Value{ + AccessKeyID: opt.AccessKeyID, + SecretAccessKey: opt.SecretAccessKey, + SessionToken: opt.SessionToken, + } + + lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service + + def := defaults.Get() + def.Config.HTTPClient = lowTimeoutClient + + // start a new AWS session + awsSession, err := session.NewSession() + if err != nil { + return nil, nil, errors.Wrap(err, "NewSession") + } + + // first provider to supply a credential set "wins" + providers := []credentials.Provider{ + // use static credentials if they're present (checked by provider) + &credentials.StaticProvider{Value: v}, + + // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY + // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY + &credentials.EnvProvider{}, + + // A SharedCredentialsProvider retrieves credentials + // from the current user's home directory. It checks + // AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too. + &credentials.SharedCredentialsProvider{ + Filename: opt.SharedCredentialsFile, // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. + Profile: opt.Profile, // If empty will look gor "AWS_PROFILE" env var or "default" if not set. + }, + + // Pick up IAM role if we're in an ECS task + defaults.RemoteCredProvider(*def.Config, def.Handlers), + + // Pick up IAM role in case we're on EC2 + &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.New(awsSession, &aws.Config{ + HTTPClient: lowTimeoutClient, + }), + ExpiryWindow: 3 * time.Minute, + }, + + // Pick up IAM role if we are in EKS + &stscreds.WebIdentityRoleProvider{ + ExpiryWindow: 3 * time.Minute, + }, + } + cred := credentials.NewChainCredentials(providers) + + switch { + case opt.EnvAuth: + // No need for empty checks if "env_auth" is true + case v.AccessKeyID == "" && v.SecretAccessKey == "": + // if no access key/secret and iam is explicitly disabled then fall back to anon interaction + cred = credentials.AnonymousCredentials + case v.AccessKeyID == "": + return nil, nil, errors.New("access_key_id not found") + case v.SecretAccessKey == "": + return nil, nil, errors.New("secret_access_key not found") + } + + if opt.Region == "" { + opt.Region = "us-east-1" + } + if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.Provider == "TencentCOS" || opt.UseAccelerateEndpoint { + opt.ForcePathStyle = false + } + if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 { + opt.MaxUploadParts = 1000 + } + awsConfig := aws.NewConfig(). + WithMaxRetries(0). // Rely on rclone's retry logic + WithCredentials(cred). + WithHTTPClient(getClient(ctx, opt)). + WithS3ForcePathStyle(opt.ForcePathStyle). + WithS3UseAccelerate(opt.UseAccelerateEndpoint). + WithS3UsEast1RegionalEndpoint(endpoints.RegionalS3UsEast1Endpoint) + + if opt.Region != "" { + awsConfig.WithRegion(opt.Region) + } + if opt.Endpoint != "" { + awsConfig.WithEndpoint(opt.Endpoint) + } + + // awsConfig.WithLogLevel(aws.LogDebugWithSigning) + awsSessionOpts := session.Options{ + Config: *awsConfig, + } + if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" { + // Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env) + awsSessionOpts.SharedConfigState = session.SharedConfigEnable + // The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source + // (from the shared config file) if the passed-in Options.Config.Credentials is nil. + awsSessionOpts.Config.Credentials = nil + } + ses, err := session.NewSessionWithOptions(awsSessionOpts) + if err != nil { + return nil, nil, err + } + c := s3.New(ses) + if opt.V2Auth || opt.Region == "other-v2-signature" { + fs.Debugf(nil, "Using v2 auth") + signer := func(req *request.Request) { + // Ignore AnonymousCredentials object + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest) + } + c.Handlers.Sign.Clear() + c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + c.Handlers.Sign.PushBack(signer) + } + return c, ses, nil +} + +func checkUploadChunkSize(cs fs.SizeSuffix) error { + if cs < minChunkSize { + return errors.Errorf("%s is less than %s", cs, minChunkSize) + } + return nil +} + +func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { + err = checkUploadChunkSize(cs) + if err == nil { + old, f.opt.ChunkSize = f.opt.ChunkSize, cs + } + return +} + +func checkUploadCutoff(cs fs.SizeSuffix) error { + if cs > maxUploadCutoff { + return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff) + } + return nil +} + +func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) { + err = checkUploadCutoff(cs) + if err == nil { + old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs + } + return +} + +// setRoot changes the root of the Fs +func (f *Fs) setRoot(root string) { + f.root = parsePath(root) + f.rootBucket, f.rootDirectory = bucket.Split(f.root) +} + +// NewFs constructs an Fs from the path, bucket:path +func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + // Parse config into Options struct + opt := new(Options) + err := configstruct.Set(m, opt) + if err != nil { + return nil, err + } + err = checkUploadChunkSize(opt.ChunkSize) + if err != nil { + return nil, errors.Wrap(err, "s3: chunk size") + } + err = checkUploadCutoff(opt.UploadCutoff) + if err != nil { + return nil, errors.Wrap(err, "s3: upload cutoff") + } + if opt.ACL == "" { + opt.ACL = "private" + } + if opt.BucketACL == "" { + opt.BucketACL = opt.ACL + } + if opt.SSECustomerKey != "" && opt.SSECustomerKeyMD5 == "" { + // calculate CustomerKeyMD5 if not supplied + md5sumBinary := md5.Sum([]byte(opt.SSECustomerKey)) + opt.SSECustomerKeyMD5 = base64.StdEncoding.EncodeToString(md5sumBinary[:]) + } + c, ses, err := s3Connection(ctx, opt) + if err != nil { + return nil, err + } + + ci := fs.GetConfig(ctx) + f := &Fs{ + name: name, + opt: *opt, + ci: ci, + ctx: ctx, + c: c, + ses: ses, + pacer: fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep))), + cache: bucket.NewCache(), + srv: getClient(ctx, opt), + pool: pool.New( + time.Duration(opt.MemoryPoolFlushTime), + int(opt.ChunkSize), + opt.UploadConcurrency*ci.Transfers, + opt.MemoryPoolUseMmap, + ), + } + if opt.ServerSideEncryption == "aws:kms" || opt.SSECustomerAlgorithm != "" { + // From: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html + // + // Objects encrypted by SSE-S3 or plaintext have ETags that are an MD5 + // digest of their data. + // + // Objects encrypted by SSE-C or SSE-KMS have ETags that are not an + // MD5 digest of their object data. + f.etagIsNotMD5 = true + } + f.setRoot(root) + f.features = (&fs.Features{ + ReadMimeType: true, + WriteMimeType: true, + BucketBased: true, + BucketBasedRootOK: true, + SetTier: true, + GetTier: true, + SlowModTime: true, + }).Fill(ctx, f) + if f.rootBucket != "" && f.rootDirectory != "" { + // Check to see if the (bucket,directory) is actually an existing file + oldRoot := f.root + newRoot, leaf := path.Split(oldRoot) + f.setRoot(newRoot) + _, err := f.NewObject(ctx, leaf) + if err != nil { + if err == fs.ErrorObjectNotFound || err == fs.ErrorNotAFile { + // File doesn't exist or is a directory so return old f + f.setRoot(oldRoot) + return f, nil + } + return nil, err + } + // return an error with an fs which points to the parent + return f, fs.ErrorIsFile + } + // f.listMultipartUploads() + return f, nil +} + +// Return an Object from a path +// +//If it can't be found it returns the error ErrorObjectNotFound. +func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *s3.Object) (fs.Object, error) { + o := &Object{ + fs: f, + remote: remote, + } + if info != nil { + // Set info but not meta + if info.LastModified == nil { + fs.Logf(o, "Failed to read last modified") + o.lastModified = time.Now() + } else { + o.lastModified = *info.LastModified + } + o.setMD5FromEtag(aws.StringValue(info.ETag)) + o.bytes = aws.Int64Value(info.Size) + o.storageClass = aws.StringValue(info.StorageClass) + } else { + err := o.readMetaData(ctx) // reads info and meta, returning an error + if err != nil { + return nil, err + } + } + return o, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error fs.ErrorObjectNotFound. +func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return f.newObjectWithInfo(ctx, remote, nil) +} + +// Gets the bucket location +func (f *Fs) getBucketLocation(bucket string) (string, error) { + req := s3.GetBucketLocationInput{ + Bucket: &bucket, + } + var resp *s3.GetBucketLocationOutput + var err error + err = f.pacer.Call(func() (bool, error) { + resp, err = f.c.GetBucketLocation(&req) + return f.shouldRetry(err) + }) + if err != nil { + return "", err + } + return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil +} + +// Updates the region for the bucket by reading the region from the +// bucket then updating the session. +func (f *Fs) updateRegionForBucket(bucket string) error { + region, err := f.getBucketLocation(bucket) + if err != nil { + return errors.Wrap(err, "reading bucket location failed") + } + if aws.StringValue(f.c.Config.Endpoint) != "" { + return errors.Errorf("can't set region to %q as endpoint is set", region) + } + if aws.StringValue(f.c.Config.Region) == region { + return errors.Errorf("region is already %q - not updating", region) + } + + // Make a new session with the new region + oldRegion := f.opt.Region + f.opt.Region = region + c, ses, err := s3Connection(f.ctx, &f.opt) + if err != nil { + return errors.Wrap(err, "creating new session failed") + } + f.c = c + f.ses = ses + + fs.Logf(f, "Switched region to %q from %q", region, oldRegion) + return nil +} + +// listFn is called from list to handle an object. +type listFn func(remote string, object *s3.Object, isDirectory bool) error + +// list lists the objects into the function supplied from +// the bucket and directory supplied. The remote has prefix +// removed from it and if addBucket is set then it adds the +// bucket to the start. +// +// Set recurse to read sub directories +func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) error { + if prefix != "" { + prefix += "/" + } + if directory != "" { + directory += "/" + } + delimiter := "" + if !recurse { + delimiter = "/" + } + var marker *string + // URL encode the listings so we can use control characters in object names + // See: https://github.com/aws/aws-sdk-go/issues/1914 + // + // However this doesn't work perfectly under Ceph (and hence DigitalOcean/Dreamhost) because + // it doesn't encode CommonPrefixes. + // See: https://tracker.ceph.com/issues/41870 + // + // This does not work under IBM COS also: See https://github.com/rclone/rclone/issues/3345 + // though maybe it does on some versions. + // + // This does work with minio but was only added relatively recently + // https://github.com/minio/minio/pull/7265 + // + // So we enable only on providers we know supports it properly, all others can retry when a + // XML Syntax error is detected. + var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio" || f.opt.Provider == "TencentCOS") + for { + // FIXME need to implement ALL loop + req := s3.ListObjectsInput{ + Bucket: &bucket, + Delimiter: &delimiter, + Prefix: &directory, + MaxKeys: &f.opt.ListChunk, + Marker: marker, + } + if urlEncodeListings { + req.EncodingType = aws.String(s3.EncodingTypeUrl) + } + if f.opt.RequesterPays { + req.RequestPayer = aws.String(s3.RequestPayerRequester) + } + var resp *s3.ListObjectsOutput + var err error + err = f.pacer.CallContext(ctx, func() (bool, error) { + resp, err = f.c.ListObjectsWithContext(ctx, &req) + if err != nil && !urlEncodeListings { + if awsErr, ok := err.(awserr.RequestFailure); ok { + if origErr := awsErr.OrigErr(); origErr != nil { + if _, ok := origErr.(*xml.SyntaxError); ok { + // Retry the listing with URL encoding as there were characters that XML can't encode + urlEncodeListings = true + req.EncodingType = aws.String(s3.EncodingTypeUrl) + fs.Debugf(f, "Retrying listing because of characters which can't be XML encoded") + return true, err + } + } + } + } + return f.shouldRetry(err) + }) + if err != nil { + if awsErr, ok := err.(awserr.RequestFailure); ok { + if awsErr.StatusCode() == http.StatusNotFound { + err = fs.ErrorDirNotFound + } + } + if f.rootBucket == "" { + // if listing from the root ignore wrong region requests returning + // empty directory + if reqErr, ok := err.(awserr.RequestFailure); ok { + // 301 if wrong region for bucket + if reqErr.StatusCode() == http.StatusMovedPermanently { + fs.Errorf(f, "Can't change region for bucket %q with no bucket specified", bucket) + return nil + } + } + } + return err + } + if !recurse { + for _, commonPrefix := range resp.CommonPrefixes { + if commonPrefix.Prefix == nil { + fs.Logf(f, "Nil common prefix received") + continue + } + remote := *commonPrefix.Prefix + if urlEncodeListings { + remote, err = url.QueryUnescape(remote) + if err != nil { + fs.Logf(f, "failed to URL decode %q in listing common prefix: %v", *commonPrefix.Prefix, err) + continue + } + } + remote = f.opt.Enc.ToStandardPath(remote) + if !strings.HasPrefix(remote, prefix) { + fs.Logf(f, "Odd name received %q", remote) + continue + } + remote = remote[len(prefix):] + if addBucket { + remote = path.Join(bucket, remote) + } + if strings.HasSuffix(remote, "/") { + remote = remote[:len(remote)-1] + } + err = fn(remote, &s3.Object{Key: &remote}, true) + if err != nil { + return err + } + } + } + for _, object := range resp.Contents { + remote := aws.StringValue(object.Key) + if urlEncodeListings { + remote, err = url.QueryUnescape(remote) + if err != nil { + fs.Logf(f, "failed to URL decode %q in listing: %v", aws.StringValue(object.Key), err) + continue + } + } + remote = f.opt.Enc.ToStandardPath(remote) + if !strings.HasPrefix(remote, prefix) { + fs.Logf(f, "Odd name received %q", remote) + continue + } + remote = remote[len(prefix):] + isDirectory := remote == "" || strings.HasSuffix(remote, "/") + if addBucket { + remote = path.Join(bucket, remote) + } + // is this a directory marker? + if isDirectory && object.Size != nil && *object.Size == 0 { + continue // skip directory marker + } + err = fn(remote, object, false) + if err != nil { + return err + } + } + if !aws.BoolValue(resp.IsTruncated) { + break + } + // Use NextMarker if set, otherwise use last Key + if resp.NextMarker == nil || *resp.NextMarker == "" { + if len(resp.Contents) == 0 { + return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents") + } + marker = resp.Contents[len(resp.Contents)-1].Key + } else { + marker = resp.NextMarker + } + if urlEncodeListings { + *marker, err = url.QueryUnescape(*marker) + if err != nil { + return errors.Wrapf(err, "failed to URL decode NextMarker %q", *marker) + } + } + } + return nil +} + +// Convert a list item into a DirEntry +func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) { + if isDirectory { + size := int64(0) + if object.Size != nil { + size = *object.Size + } + d := fs.NewDir(remote, time.Time{}).SetSize(size) + return d, nil + } + o, err := f.newObjectWithInfo(ctx, remote, object) + if err != nil { + return nil, err + } + return o, nil +} + +// listDir lists files and directories to out +func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) { + // List the objects and directories + err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *s3.Object, isDirectory bool) error { + entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) + if err != nil { + return err + } + if entry != nil { + entries = append(entries, entry) + } + return nil + }) + if err != nil { + return nil, err + } + // bucket must be present if listing succeeded + f.cache.MarkOK(bucket) + return entries, nil +} + +// listBuckets lists the buckets to out +func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) { + req := s3.ListBucketsInput{} + var resp *s3.ListBucketsOutput + err = f.pacer.CallContext(ctx, func() (bool, error) { + resp, err = f.c.ListBucketsWithContext(ctx, &req) + return f.shouldRetry(err) + }) + if err != nil { + return nil, err + } + for _, bucket := range resp.Buckets { + bucketName := f.opt.Enc.ToStandardName(aws.StringValue(bucket.Name)) + f.cache.MarkOK(bucketName) + d := fs.NewDir(bucketName, aws.TimeValue(bucket.CreationDate)) + entries = append(entries, d) + } + return entries, nil +} + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + bucket, directory := f.split(dir) + if bucket == "" { + if directory != "" { + return nil, fs.ErrorListBucketRequired + } + return f.listBuckets(ctx) + } + return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "") +} + +// ListR lists the objects and directories of the Fs starting +// from dir recursively into out. +// +// dir should be "" to start from the root, and should not +// have trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +// +// It should call callback for each tranche of entries read. +// These need not be returned in any particular order. If +// callback returns an error then the listing will stop +// immediately. +// +// Don't implement this unless you have a more efficient way +// of listing recursively than doing a directory traversal. +func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) { + bucket, directory := f.split(dir) + list := walk.NewListRHelper(callback) + listR := func(bucket, directory, prefix string, addBucket bool) error { + return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *s3.Object, isDirectory bool) error { + entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory) + if err != nil { + return err + } + return list.Add(entry) + }) + } + if bucket == "" { + entries, err := f.listBuckets(ctx) + if err != nil { + return err + } + for _, entry := range entries { + err = list.Add(entry) + if err != nil { + return err + } + bucket := entry.Remote() + err = listR(bucket, "", f.rootDirectory, true) + if err != nil { + return err + } + // bucket must be present if listing succeeded + f.cache.MarkOK(bucket) + } + } else { + err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "") + if err != nil { + return err + } + // bucket must be present if listing succeeded + f.cache.MarkOK(bucket) + } + return list.Flush() +} + +// Put the Object into the bucket +func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + // Temporary Object under construction + fs := &Object{ + fs: f, + remote: src.Remote(), + } + return fs, fs.Update(ctx, in, src, options...) +} + +// PutStream uploads to the remote path with the modTime given of indeterminate size +func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + return f.Put(ctx, in, src, options...) +} + +// Check if the bucket exists +// +// NB this can return incorrect results if called immediately after bucket deletion +func (f *Fs) bucketExists(ctx context.Context, bucket string) (bool, error) { + req := s3.HeadBucketInput{ + Bucket: &bucket, + } + err := f.pacer.CallContext(ctx, func() (bool, error) { + _, err := f.c.HeadBucketWithContext(ctx, &req) + return f.shouldRetry(err) + }) + if err == nil { + return true, nil + } + if err, ok := err.(awserr.RequestFailure); ok { + if err.StatusCode() == http.StatusNotFound { + return false, nil + } + } + return false, err +} + +// Mkdir creates the bucket if it doesn't exist +func (f *Fs) Mkdir(ctx context.Context, dir string) error { + bucket, _ := f.split(dir) + return f.makeBucket(ctx, bucket) +} + +// makeBucket creates the bucket if it doesn't exist +func (f *Fs) makeBucket(ctx context.Context, bucket string) error { + if f.opt.NoCheckBucket { + return nil + } + return f.cache.Create(bucket, func() error { + req := s3.CreateBucketInput{ + Bucket: &bucket, + ACL: &f.opt.BucketACL, + } + if f.opt.LocationConstraint != "" { + req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ + LocationConstraint: &f.opt.LocationConstraint, + } + } + err := f.pacer.CallContext(ctx, func() (bool, error) { + _, err := f.c.CreateBucketWithContext(ctx, &req) + return f.shouldRetry(err) + }) + if err == nil { + fs.Infof(f, "Bucket %q created with ACL %q", bucket, f.opt.BucketACL) + } + if awsErr, ok := err.(awserr.Error); ok { + if code := awsErr.Code(); code == "BucketAlreadyOwnedByYou" || code == "BucketAlreadyExists" { + err = nil + } + } + return err + }, func() (bool, error) { + return f.bucketExists(ctx, bucket) + }) +} + +// Rmdir deletes the bucket if the fs is at the root +// +// Returns an error if it isn't empty +func (f *Fs) Rmdir(ctx context.Context, dir string) error { + bucket, directory := f.split(dir) + if bucket == "" || directory != "" { + return nil + } + return f.cache.Remove(bucket, func() error { + req := s3.DeleteBucketInput{ + Bucket: &bucket, + } + err := f.pacer.CallContext(ctx, func() (bool, error) { + _, err := f.c.DeleteBucketWithContext(ctx, &req) + return f.shouldRetry(err) + }) + if err == nil { + fs.Infof(f, "Bucket %q deleted", bucket) + } + return err + }) +} + +// Precision of the remote +func (f *Fs) Precision() time.Duration { + return time.Nanosecond +} + +// pathEscape escapes s as for a URL path. It uses rest.URLPathEscape +// but also escapes '+' for S3 and Digital Ocean spaces compatibility +func pathEscape(s string) string { + return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1) +} + +// copy does a server-side copy +// +// It adds the boiler plate to the req passed in and calls the s3 +// method +func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) error { + req.Bucket = &dstBucket + req.ACL = &f.opt.ACL + req.Key = &dstPath + source := pathEscape(path.Join(srcBucket, srcPath)) + req.CopySource = &source + if f.opt.RequesterPays { + req.RequestPayer = aws.String(s3.RequestPayerRequester) + } + if f.opt.ServerSideEncryption != "" { + req.ServerSideEncryption = &f.opt.ServerSideEncryption + } + if f.opt.SSECustomerAlgorithm != "" { + req.SSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm + req.CopySourceSSECustomerAlgorithm = &f.opt.SSECustomerAlgorithm + } + if f.opt.SSECustomerKey != "" { + req.SSECustomerKey = &f.opt.SSECustomerKey + req.CopySourceSSECustomerKey = &f.opt.SSECustomerKey + } + if f.opt.SSECustomerKeyMD5 != "" { + req.SSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5 + req.CopySourceSSECustomerKeyMD5 = &f.opt.SSECustomerKeyMD5 + } + if f.opt.SSEKMSKeyID != "" { + req.SSEKMSKeyId = &f.opt.SSEKMSKeyID + } + if req.StorageClass == nil && f.opt.StorageClass != "" { + req.StorageClass = &f.opt.StorageClass + } + + if src.bytes >= int64(f.opt.CopyCutoff) { + return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, src) + } + return f.pacer.CallContext(ctx, func() (bool, error) { + _, err := f.c.CopyObjectWithContext(ctx, req) + return f.shouldRetry(err) + }) +} + +func calculateRange(partSize, partIndex, numParts, totalSize int64) string { + start := partIndex * partSize + var ends string + if partIndex == numParts-1 { + if totalSize >= 1 { + ends = strconv.FormatInt(totalSize-1, 10) + } + } else { + ends = strconv.FormatInt(start+partSize-1, 10) + } + return fmt.Sprintf("bytes=%v-%v", start, ends) +} + +func (f *Fs) copyMultipart(ctx context.Context, copyReq *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) (err error) { + info, err := src.headObject(ctx) + if err != nil { + return err + } + + req := &s3.CreateMultipartUploadInput{} + + // Fill in the request from the head info + structs.SetFrom(req, info) + + // If copy metadata was set then set the Metadata to that read + // from the head request + if aws.StringValue(copyReq.MetadataDirective) == s3.MetadataDirectiveCopy { + copyReq.Metadata = info.Metadata + } + + // Overwrite any from the copyReq + structs.SetFrom(req, copyReq) + + req.Bucket = &dstBucket + req.Key = &dstPath + + var cout *s3.CreateMultipartUploadOutput + if err := f.pacer.CallContext(ctx, func() (bool, error) { + var err error + cout, err = f.c.CreateMultipartUploadWithContext(ctx, req) + return f.shouldRetry(err) + }); err != nil { + return err + } + uid := cout.UploadId + + defer atexit.OnError(&err, func() { + // Try to abort the upload, but ignore the error. + fs.Debugf(src, "Cancelling multipart copy") + _ = f.pacer.CallContext(ctx, func() (bool, error) { + _, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{ + Bucket: &dstBucket, + Key: &dstPath, + UploadId: uid, + RequestPayer: req.RequestPayer, + }) + return f.shouldRetry(err) + }) + })() + + srcSize := src.bytes + partSize := int64(f.opt.CopyCutoff) + numParts := (srcSize-1)/partSize + 1 + + fs.Debugf(src, "Starting multipart copy with %d parts", numParts) + + var parts []*s3.CompletedPart + for partNum := int64(1); partNum <= numParts; partNum++ { + if err := f.pacer.CallContext(ctx, func() (bool, error) { + partNum := partNum + uploadPartReq := &s3.UploadPartCopyInput{} + structs.SetFrom(uploadPartReq, copyReq) + uploadPartReq.Bucket = &dstBucket + uploadPartReq.Key = &dstPath + uploadPartReq.PartNumber = &partNum + uploadPartReq.UploadId = uid + uploadPartReq.CopySourceRange = aws.String(calculateRange(partSize, partNum-1, numParts, srcSize)) + uout, err := f.c.UploadPartCopyWithContext(ctx, uploadPartReq) + if err != nil { + return f.shouldRetry(err) + } + parts = append(parts, &s3.CompletedPart{ + PartNumber: &partNum, + ETag: uout.CopyPartResult.ETag, + }) + return false, nil + }); err != nil { + return err + } + } + + return f.pacer.CallContext(ctx, func() (bool, error) { + _, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{ + Bucket: &dstBucket, + Key: &dstPath, + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: parts, + }, + RequestPayer: req.RequestPayer, + UploadId: uid, + }) + return f.shouldRetry(err) + }) +} + +// Copy src to this remote using server-side copy operations. +// +// This is stored with the remote path given +// +// It returns the destination Object and a possible error +// +// Will only be called if src.Fs().Name() == f.Name() +// +// If it isn't possible then return fs.ErrorCantCopy +func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) { + dstBucket, dstPath := f.split(remote) + err := f.makeBucket(ctx, dstBucket) + if err != nil { + return nil, err + } + srcObj, ok := src.(*Object) + if !ok { + fs.Debugf(src, "Can't copy - not same remote type") + return nil, fs.ErrorCantCopy + } + srcBucket, srcPath := srcObj.split() + req := s3.CopyObjectInput{ + MetadataDirective: aws.String(s3.MetadataDirectiveCopy), + } + err = f.copy(ctx, &req, dstBucket, dstPath, srcBucket, srcPath, srcObj) + if err != nil { + return nil, err + } + return f.NewObject(ctx, remote) +} + +// Hashes returns the supported hash sets. +func (f *Fs) Hashes() hash.Set { + return hash.Set(hash.MD5) +} + +func (f *Fs) getMemoryPool(size int64) *pool.Pool { + if size == int64(f.opt.ChunkSize) { + return f.pool + } + + return pool.New( + time.Duration(f.opt.MemoryPoolFlushTime), + int(size), + f.opt.UploadConcurrency*f.ci.Transfers, + f.opt.MemoryPoolUseMmap, + ) +} + +// PublicLink generates a public link to the remote path (usually readable by anyone) +func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration, unlink bool) (link string, err error) { + if strings.HasSuffix(remote, "/") { + return "", fs.ErrorCantShareDirectories + } + if _, err := f.NewObject(ctx, remote); err != nil { + return "", err + } + if expire > maxExpireDuration { + fs.Logf(f, "Public Link: Reducing expiry to %v as %v is greater than the max time allowed", maxExpireDuration, expire) + expire = maxExpireDuration + } + bucket, bucketPath := f.split(remote) + httpReq, _ := f.c.GetObjectRequest(&s3.GetObjectInput{ + Bucket: &bucket, + Key: &bucketPath, + }) + + return httpReq.Presign(time.Duration(expire)) +} + +var commandHelp = []fs.CommandHelp{{ + Name: "restore", + Short: "Restore objects from GLACIER to normal storage", + Long: `This command can be used to restore one or more objects from GLACIER +to normal storage. + +Usage Examples: + + rclone backend restore s3:bucket/path/to/object [-o priority=PRIORITY] [-o lifetime=DAYS] + rclone backend restore s3:bucket/path/to/directory [-o priority=PRIORITY] [-o lifetime=DAYS] + rclone backend restore s3:bucket [-o priority=PRIORITY] [-o lifetime=DAYS] + +This flag also obeys the filters. Test first with -i/--interactive or --dry-run flags + + rclone -i backend restore --include "*.txt" s3:bucket/path -o priority=Standard + +All the objects shown will be marked for restore, then + + rclone backend restore --include "*.txt" s3:bucket/path -o priority=Standard + +It returns a list of status dictionaries with Remote and Status +keys. The Status will be OK if it was successful or an error message +if not. + + [ + { + "Status": "OK", + "Path": "test.txt" + }, + { + "Status": "OK", + "Path": "test/file4.txt" + } + ] + +`, + Opts: map[string]string{ + "priority": "Priority of restore: Standard|Expedited|Bulk", + "lifetime": "Lifetime of the active copy in days", + "description": "The optional description for the job.", + }, +}, { + Name: "list-multipart-uploads", + Short: "List the unfinished multipart uploads", + Long: `This command lists the unfinished multipart uploads in JSON format. + + rclone backend list-multipart s3:bucket/path/to/object + +It returns a dictionary of buckets with values as lists of unfinished +multipart uploads. + +You can call it with no bucket in which case it lists all bucket, with +a bucket or with a bucket and path. + + { + "rclone": [ + { + "Initiated": "2020-06-26T14:20:36Z", + "Initiator": { + "DisplayName": "XXX", + "ID": "arn:aws:iam::XXX:user/XXX" + }, + "Key": "KEY", + "Owner": { + "DisplayName": null, + "ID": "XXX" + }, + "StorageClass": "STANDARD", + "UploadId": "XXX" + } + ], + "rclone-1000files": [], + "rclone-dst": [] + } + +`, +}, { + Name: "cleanup", + Short: "Remove unfinished multipart uploads.", + Long: `This command removes unfinished multipart uploads of age greater than +max-age which defaults to 24 hours. + +Note that you can use -i/--dry-run with this command to see what it +would do. + + rclone backend cleanup s3:bucket/path/to/object + rclone backend cleanup -o max-age=7w s3:bucket/path/to/object + +Durations are parsed as per the rest of rclone, 2h, 7d, 7w etc. +`, + Opts: map[string]string{ + "max-age": "Max age of upload to delete", + }, +}} + +// Command the backend to run a named command +// +// The command run is name +// args may be used to read arguments from +// opts may be used to read optional arguments from +// +// The result should be capable of being JSON encoded +// If it is a string or a []string it will be shown to the user +// otherwise it will be JSON encoded and shown to the user like that +func (f *Fs) Command(ctx context.Context, name string, arg []string, opt map[string]string) (out interface{}, err error) { + switch name { + case "restore": + req := s3.RestoreObjectInput{ + //Bucket: &f.rootBucket, + //Key: &encodedDirectory, + RestoreRequest: &s3.RestoreRequest{}, + } + if lifetime := opt["lifetime"]; lifetime != "" { + ilifetime, err := strconv.ParseInt(lifetime, 10, 64) + if err != nil { + return nil, errors.Wrap(err, "bad lifetime") + } + req.RestoreRequest.Days = &ilifetime + } + if priority := opt["priority"]; priority != "" { + req.RestoreRequest.GlacierJobParameters = &s3.GlacierJobParameters{ + Tier: &priority, + } + } + if description := opt["description"]; description != "" { + req.RestoreRequest.Description = &description + } + type status struct { + Status string + Remote string + } + var ( + outMu sync.Mutex + out = []status{} + ) + err = operations.ListFn(ctx, f, func(obj fs.Object) { + // Remember this is run --checkers times concurrently + o, ok := obj.(*Object) + st := status{Status: "OK", Remote: obj.Remote()} + defer func() { + outMu.Lock() + out = append(out, st) + outMu.Unlock() + }() + if operations.SkipDestructive(ctx, obj, "restore") { + return + } + if !ok { + st.Status = "Not an S3 object" + return + } + bucket, bucketPath := o.split() + reqCopy := req + reqCopy.Bucket = &bucket + reqCopy.Key = &bucketPath + err = f.pacer.Call(func() (bool, error) { + _, err = f.c.RestoreObject(&reqCopy) + return f.shouldRetry(err) + }) + if err != nil { + st.Status = err.Error() + } + }) + if err != nil { + return out, err + } + return out, nil + case "list-multipart-uploads": + return f.listMultipartUploadsAll(ctx) + case "cleanup": + maxAge := 24 * time.Hour + if opt["max-age"] != "" { + maxAge, err = fs.ParseDuration(opt["max-age"]) + if err != nil { + return nil, errors.Wrap(err, "bad max-age") + } + } + return nil, f.cleanUp(ctx, maxAge) + default: + return nil, fs.ErrorCommandNotFound + } +} + +// listMultipartUploads lists all outstanding multipart uploads for (bucket, key) +// +// Note that rather lazily we treat key as a prefix so it matches +// directories and objects. This could surprise the user if they ask +// for "dir" and it returns "dirKey" +func (f *Fs) listMultipartUploads(ctx context.Context, bucket, key string) (uploads []*s3.MultipartUpload, err error) { + var ( + keyMarker *string + uploadIDMarker *string + ) + uploads = []*s3.MultipartUpload{} + for { + req := s3.ListMultipartUploadsInput{ + Bucket: &bucket, + MaxUploads: &f.opt.ListChunk, + KeyMarker: keyMarker, + UploadIdMarker: uploadIDMarker, + Prefix: &key, + } + var resp *s3.ListMultipartUploadsOutput + err = f.pacer.Call(func() (bool, error) { + resp, err = f.c.ListMultipartUploads(&req) + return f.shouldRetry(err) + }) + if err != nil { + return nil, errors.Wrapf(err, "list multipart uploads bucket %q key %q", bucket, key) + } + uploads = append(uploads, resp.Uploads...) + if !aws.BoolValue(resp.IsTruncated) { + break + } + keyMarker = resp.NextKeyMarker + uploadIDMarker = resp.NextUploadIdMarker + } + return uploads, nil +} + +func (f *Fs) listMultipartUploadsAll(ctx context.Context) (uploadsMap map[string][]*s3.MultipartUpload, err error) { + uploadsMap = make(map[string][]*s3.MultipartUpload) + bucket, directory := f.split("") + if bucket != "" { + uploads, err := f.listMultipartUploads(ctx, bucket, directory) + if err != nil { + return uploadsMap, err + } + uploadsMap[bucket] = uploads + return uploadsMap, nil + } + entries, err := f.listBuckets(ctx) + if err != nil { + return uploadsMap, err + } + for _, entry := range entries { + bucket := entry.Remote() + uploads, listErr := f.listMultipartUploads(ctx, bucket, "") + if listErr != nil { + err = listErr + fs.Errorf(f, "%v", err) + } + uploadsMap[bucket] = uploads + } + return uploadsMap, err +} + +// cleanUpBucket removes all pending multipart uploads for a given bucket over the age of maxAge +func (f *Fs) cleanUpBucket(ctx context.Context, bucket string, maxAge time.Duration, uploads []*s3.MultipartUpload) (err error) { + fs.Infof(f, "cleaning bucket %q of pending multipart uploads older than %v", bucket, maxAge) + for _, upload := range uploads { + if upload.Initiated != nil && upload.Key != nil && upload.UploadId != nil { + age := time.Since(*upload.Initiated) + what := fmt.Sprintf("pending multipart upload for bucket %q key %q dated %v (%v ago)", bucket, *upload.Key, upload.Initiated, age) + if age > maxAge { + fs.Infof(f, "removing %s", what) + if operations.SkipDestructive(ctx, what, "remove pending upload") { + continue + } + req := s3.AbortMultipartUploadInput{ + Bucket: &bucket, + UploadId: upload.UploadId, + Key: upload.Key, + } + _, abortErr := f.c.AbortMultipartUpload(&req) + if abortErr != nil { + err = errors.Wrapf(abortErr, "failed to remove %s", what) + fs.Errorf(f, "%v", err) + } + } else { + fs.Debugf(f, "ignoring %s", what) + } + } + } + return err +} + +// CleanUp removes all pending multipart uploads +func (f *Fs) cleanUp(ctx context.Context, maxAge time.Duration) (err error) { + uploadsMap, err := f.listMultipartUploadsAll(ctx) + if err != nil { + return err + } + for bucket, uploads := range uploadsMap { + cleanErr := f.cleanUpBucket(ctx, bucket, maxAge, uploads) + if err != nil { + fs.Errorf(f, "Failed to cleanup bucket %q: %v", bucket, cleanErr) + err = cleanErr + } + } + return err +} + +// CleanUp removes all pending multipart uploads older than 24 hours +func (f *Fs) CleanUp(ctx context.Context) (err error) { + return f.cleanUp(ctx, 24*time.Hour) +} + +// ------------------------------------------------------------ + +// Fs returns the parent Fs +func (o *Object) Fs() fs.Info { + return o.fs +} + +// Return a string version +func (o *Object) String() string { + if o == nil { + return "" + } + return o.remote +} + +// Remote returns the remote path +func (o *Object) Remote() string { + return o.remote +} + +var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`) + +// Set the MD5 from the etag +func (o *Object) setMD5FromEtag(etag string) { + if o.fs.etagIsNotMD5 { + o.md5 = "" + return + } + if etag == "" { + o.md5 = "" + return + } + hash := strings.Trim(strings.ToLower(etag), `"`) + // Check the etag is a valid md5sum + if !matchMd5.MatchString(hash) { + o.md5 = "" + return + } + o.md5 = hash +} + +// Hash returns the Md5sum of an object returning a lowercase hex string +func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { + if t != hash.MD5 { + return "", hash.ErrUnsupported + } + // If we haven't got an MD5, then check the metadata + if o.md5 == "" { + err := o.readMetaData(ctx) + if err != nil { + return "", err + } + } + return o.md5, nil +} + +// Size returns the size of an object in bytes +func (o *Object) Size() int64 { + return o.bytes +} + +func (o *Object) headObject(ctx context.Context) (resp *s3.HeadObjectOutput, err error) { + bucket, bucketPath := o.split() + req := s3.HeadObjectInput{ + Bucket: &bucket, + Key: &bucketPath, + } + if o.fs.opt.RequesterPays { + req.RequestPayer = aws.String(s3.RequestPayerRequester) + } + if o.fs.opt.SSECustomerAlgorithm != "" { + req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm + } + if o.fs.opt.SSECustomerKey != "" { + req.SSECustomerKey = &o.fs.opt.SSECustomerKey + } + if o.fs.opt.SSECustomerKeyMD5 != "" { + req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5 + } + err = o.fs.pacer.CallContext(ctx, func() (bool, error) { + var err error + resp, err = o.fs.c.HeadObjectWithContext(ctx, &req) + return o.fs.shouldRetry(err) + }) + if err != nil { + if awsErr, ok := err.(awserr.RequestFailure); ok { + if awsErr.StatusCode() == http.StatusNotFound { + return nil, fs.ErrorObjectNotFound + } + } + return nil, err + } + o.fs.cache.MarkOK(bucket) + return resp, nil +} + +// readMetaData gets the metadata if it hasn't already been fetched +// +// it also sets the info +func (o *Object) readMetaData(ctx context.Context) (err error) { + if o.meta != nil { + return nil + } + resp, err := o.headObject(ctx) + if err != nil { + return err + } + var size int64 + // Ignore missing Content-Length assuming it is 0 + // Some versions of ceph do this due their apache proxies + if resp.ContentLength != nil { + size = *resp.ContentLength + } + o.setMD5FromEtag(aws.StringValue(resp.ETag)) + o.bytes = size + o.meta = resp.Metadata + if o.meta == nil { + o.meta = map[string]*string{} + } + // Read MD5 from metadata if present + if md5sumBase64, ok := o.meta[metaMD5Hash]; ok { + md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sumBase64) + if err != nil { + fs.Debugf(o, "Failed to read md5sum from metadata %q: %v", *md5sumBase64, err) + } else if len(md5sumBytes) != 16 { + fs.Debugf(o, "Failed to read md5sum from metadata %q: wrong length", *md5sumBase64) + } else { + o.md5 = hex.EncodeToString(md5sumBytes) + } + } + o.storageClass = aws.StringValue(resp.StorageClass) + if resp.LastModified == nil { + fs.Logf(o, "Failed to read last modified from HEAD: %v", err) + o.lastModified = time.Now() + } else { + o.lastModified = *resp.LastModified + } + o.mimeType = aws.StringValue(resp.ContentType) + return nil +} + +// ModTime returns the modification time of the object +// +// It attempts to read the objects mtime and if that isn't present the +// LastModified returned in the http headers +func (o *Object) ModTime(ctx context.Context) time.Time { + if o.fs.ci.UseServerModTime { + return o.lastModified + } + err := o.readMetaData(ctx) + if err != nil { + fs.Logf(o, "Failed to read metadata: %v", err) + return time.Now() + } + // read mtime out of metadata if available + d, ok := o.meta[metaMtime] + if !ok || d == nil { + // fs.Debugf(o, "No metadata") + return o.lastModified + } + modTime, err := swift.FloatStringToTime(*d) + if err != nil { + fs.Logf(o, "Failed to read mtime from object: %v", err) + return o.lastModified + } + return modTime +} + +// SetModTime sets the modification time of the local fs object +func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error { + err := o.readMetaData(ctx) + if err != nil { + return err + } + o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime)) + + // Can't update metadata here, so return this error to force a recopy + if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" { + return fs.ErrorCantSetModTime + } + + // Copy the object to itself to update the metadata + bucket, bucketPath := o.split() + req := s3.CopyObjectInput{ + ContentType: aws.String(fs.MimeType(ctx, o)), // Guess the content type + Metadata: o.meta, + MetadataDirective: aws.String(s3.MetadataDirectiveReplace), // replace metadata with that passed in + } + if o.fs.opt.RequesterPays { + req.RequestPayer = aws.String(s3.RequestPayerRequester) + } + return o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o) +} + +// Storable raturns a boolean indicating if this object is storable +func (o *Object) Storable() bool { + return true +} + +// Open an object for read +func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) { + bucket, bucketPath := o.split() + req := s3.GetObjectInput{ + Bucket: &bucket, + Key: &bucketPath, + } + if o.fs.opt.RequesterPays { + req.RequestPayer = aws.String(s3.RequestPayerRequester) + } + if o.fs.opt.SSECustomerAlgorithm != "" { + req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm + } + if o.fs.opt.SSECustomerKey != "" { + req.SSECustomerKey = &o.fs.opt.SSECustomerKey + } + if o.fs.opt.SSECustomerKeyMD5 != "" { + req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5 + } + httpReq, resp := o.fs.c.GetObjectRequest(&req) + fs.FixRangeOption(options, o.bytes) + for _, option := range options { + switch option.(type) { + case *fs.RangeOption, *fs.SeekOption: + _, value := option.Header() + req.Range = &value + case *fs.HTTPOption: + key, value := option.Header() + httpReq.HTTPRequest.Header.Add(key, value) + default: + if option.Mandatory() { + fs.Logf(o, "Unsupported mandatory option: %v", option) + } + } + } + + err = o.fs.pacer.CallContext(ctx, func() (bool, error) { + var err error + httpReq.HTTPRequest = httpReq.HTTPRequest.WithContext(ctx) + err = httpReq.Send() + return o.fs.shouldRetry(err) + }) + if err, ok := err.(awserr.RequestFailure); ok { + if err.Code() == "InvalidObjectState" { + return nil, errors.Errorf("Object in GLACIER, restore first: bucket=%q, key=%q", bucket, bucketPath) + } + } + if err != nil { + return nil, err + } + return resp.Body, nil +} + +var warnStreamUpload sync.Once + +func (o *Object) uploadMultipart(ctx context.Context, req *s3.PutObjectInput, size int64, in io.Reader) (err error) { + f := o.fs + + // make concurrency machinery + concurrency := f.opt.UploadConcurrency + if concurrency < 1 { + concurrency = 1 + } + tokens := pacer.NewTokenDispenser(concurrency) + + uploadParts := f.opt.MaxUploadParts + if uploadParts < 1 { + uploadParts = 1 + } else if uploadParts > maxUploadParts { + uploadParts = maxUploadParts + } + + // calculate size of parts + partSize := int(f.opt.ChunkSize) + + // size can be -1 here meaning we don't know the size of the incoming file. We use ChunkSize + // buffers here (default 5MB). With a maximum number of parts (10,000) this will be a file of + // 48GB which seems like a not too unreasonable limit. + if size == -1 { + warnStreamUpload.Do(func() { + fs.Logf(f, "Streaming uploads using chunk size %v will have maximum file size of %v", + f.opt.ChunkSize, fs.SizeSuffix(int64(partSize)*uploadParts)) + }) + } else { + // Adjust partSize until the number of parts is small enough. + if size/int64(partSize) >= uploadParts { + // Calculate partition size rounded up to the nearest MB + partSize = int((((size / uploadParts) >> 20) + 1) << 20) + } + } + + memPool := f.getMemoryPool(int64(partSize)) + + var mReq s3.CreateMultipartUploadInput + structs.SetFrom(&mReq, req) + var cout *s3.CreateMultipartUploadOutput + err = f.pacer.CallContext(ctx, func() (bool, error) { + var err error + cout, err = f.c.CreateMultipartUploadWithContext(ctx, &mReq) + return f.shouldRetry(err) + }) + if err != nil { + return errors.Wrap(err, "multipart upload failed to initialise") + } + uid := cout.UploadId + + defer atexit.OnError(&err, func() { + if o.fs.opt.LeavePartsOnError { + return + } + fs.Debugf(o, "Cancelling multipart upload") + errCancel := f.pacer.CallContext(ctx, func() (bool, error) { + _, err := f.c.AbortMultipartUploadWithContext(context.Background(), &s3.AbortMultipartUploadInput{ + Bucket: req.Bucket, + Key: req.Key, + UploadId: uid, + RequestPayer: req.RequestPayer, + }) + return f.shouldRetry(err) + }) + if errCancel != nil { + fs.Debugf(o, "Failed to cancel multipart upload: %v", errCancel) + } + })() + + var ( + g, gCtx = errgroup.WithContext(ctx) + finished = false + partsMu sync.Mutex // to protect parts + parts []*s3.CompletedPart + off int64 + ) + + for partNum := int64(1); !finished; partNum++ { + // Get a block of memory from the pool and token which limits concurrency. + tokens.Get() + buf := memPool.Get() + + free := func() { + // return the memory and token + memPool.Put(buf) + tokens.Put() + } + + // Fail fast, in case an errgroup managed function returns an error + // gCtx is cancelled. There is no point in uploading all the other parts. + if gCtx.Err() != nil { + free() + break + } + + // Read the chunk + var n int + n, err = readers.ReadFill(in, buf) // this can never return 0, nil + if err == io.EOF { + if n == 0 && partNum != 1 { // end if no data and if not first chunk + free() + break + } + finished = true + } else if err != nil { + free() + return errors.Wrap(err, "multipart upload failed to read source") + } + buf = buf[:n] + + partNum := partNum + fs.Debugf(o, "multipart upload starting chunk %d size %v offset %v/%v", partNum, fs.SizeSuffix(n), fs.SizeSuffix(off), fs.SizeSuffix(size)) + off += int64(n) + g.Go(func() (err error) { + defer free() + partLength := int64(len(buf)) + + // create checksum of buffer for integrity checking + md5sumBinary := md5.Sum(buf) + md5sum := base64.StdEncoding.EncodeToString(md5sumBinary[:]) + + err = f.pacer.CallContext(ctx, func() (bool, error) { + uploadPartReq := &s3.UploadPartInput{ + Body: bytes.NewReader(buf), + Bucket: req.Bucket, + Key: req.Key, + PartNumber: &partNum, + UploadId: uid, + ContentMD5: &md5sum, + ContentLength: &partLength, + RequestPayer: req.RequestPayer, + SSECustomerAlgorithm: req.SSECustomerAlgorithm, + SSECustomerKey: req.SSECustomerKey, + SSECustomerKeyMD5: req.SSECustomerKeyMD5, + } + uout, err := f.c.UploadPartWithContext(gCtx, uploadPartReq) + if err != nil { + if partNum <= int64(concurrency) { + return f.shouldRetry(err) + } + // retry all chunks once have done the first batch + return true, err + } + partsMu.Lock() + parts = append(parts, &s3.CompletedPart{ + PartNumber: &partNum, + ETag: uout.ETag, + }) + partsMu.Unlock() + + return false, nil + }) + if err != nil { + return errors.Wrap(err, "multipart upload failed to upload part") + } + return nil + }) + } + err = g.Wait() + if err != nil { + return err + } + + // sort the completed parts by part number + sort.Slice(parts, func(i, j int) bool { + return *parts[i].PartNumber < *parts[j].PartNumber + }) + + err = f.pacer.CallContext(ctx, func() (bool, error) { + _, err := f.c.CompleteMultipartUploadWithContext(ctx, &s3.CompleteMultipartUploadInput{ + Bucket: req.Bucket, + Key: req.Key, + MultipartUpload: &s3.CompletedMultipartUpload{ + Parts: parts, + }, + RequestPayer: req.RequestPayer, + UploadId: uid, + }) + return f.shouldRetry(err) + }) + if err != nil { + return errors.Wrap(err, "multipart upload failed to finalise") + } + return nil +} + +// Update the Object from in with modTime and size +func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + bucket, bucketPath := o.split() + err := o.fs.makeBucket(ctx, bucket) + if err != nil { + return err + } + modTime := src.ModTime(ctx) + size := src.Size() + + multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff) + + // Set the mtime in the meta data + metadata := map[string]*string{ + metaMtime: aws.String(swift.TimeToFloatString(modTime)), + } + + // read the md5sum if available + // - for non multipart + // - so we can add a ContentMD5 + // - so we can add the md5sum in the metadata as metaMD5Hash if using SSE/SSE-C + // - for multipart provided checksums aren't disabled + // - so we can add the md5sum in the metadata as metaMD5Hash + var md5sum string + if !multipart || !o.fs.opt.DisableChecksum { + hash, err := src.Hash(ctx, hash.MD5) + if err == nil && matchMd5.MatchString(hash) { + hashBytes, err := hex.DecodeString(hash) + if err == nil { + md5sum = base64.StdEncoding.EncodeToString(hashBytes) + if (multipart || o.fs.etagIsNotMD5) && !o.fs.opt.DisableChecksum { + // Set the md5sum as metadata on the object if + // - a multipart upload + // - the Etag is not an MD5, eg when using SSE/SSE-C + // provided checksums aren't disabled + metadata[metaMD5Hash] = &md5sum + } + } + } + } + + // Guess the content type + mimeType := fs.MimeType(ctx, src) + req := s3.PutObjectInput{ + Bucket: &bucket, + ACL: &o.fs.opt.ACL, + Key: &bucketPath, + ContentType: &mimeType, + Metadata: metadata, + } + if md5sum != "" { + req.ContentMD5 = &md5sum + } + if o.fs.opt.RequesterPays { + req.RequestPayer = aws.String(s3.RequestPayerRequester) + } + if o.fs.opt.ServerSideEncryption != "" { + req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption + } + if o.fs.opt.SSECustomerAlgorithm != "" { + req.SSECustomerAlgorithm = &o.fs.opt.SSECustomerAlgorithm + } + if o.fs.opt.SSECustomerKey != "" { + req.SSECustomerKey = &o.fs.opt.SSECustomerKey + } + if o.fs.opt.SSECustomerKeyMD5 != "" { + req.SSECustomerKeyMD5 = &o.fs.opt.SSECustomerKeyMD5 + } + if o.fs.opt.SSEKMSKeyID != "" { + req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID + } + if o.fs.opt.StorageClass != "" { + req.StorageClass = &o.fs.opt.StorageClass + } + // Apply upload options + for _, option := range options { + key, value := option.Header() + lowerKey := strings.ToLower(key) + switch lowerKey { + case "": + // ignore + case "cache-control": + req.CacheControl = aws.String(value) + case "content-disposition": + req.ContentDisposition = aws.String(value) + case "content-encoding": + req.ContentEncoding = aws.String(value) + case "content-language": + req.ContentLanguage = aws.String(value) + case "content-type": + req.ContentType = aws.String(value) + case "x-amz-tagging": + req.Tagging = aws.String(value) + default: + const amzMetaPrefix = "x-amz-meta-" + if strings.HasPrefix(lowerKey, amzMetaPrefix) { + metaKey := lowerKey[len(amzMetaPrefix):] + req.Metadata[metaKey] = aws.String(value) + } else { + fs.Errorf(o, "Don't know how to set key %q on upload", key) + } + } + } + + var resp *http.Response // response from PUT + if multipart { + err = o.uploadMultipart(ctx, &req, size, in) + if err != nil { + return err + } + } else { + + // Create the request + putObj, _ := o.fs.c.PutObjectRequest(&req) + + // Sign it so we can upload using a presigned request. + // + // Note the SDK doesn't currently support streaming to + // PutObject so we'll use this work-around. + url, headers, err := putObj.PresignRequest(15 * time.Minute) + if err != nil { + return errors.Wrap(err, "s3 upload: sign request") + } + + if o.fs.opt.V2Auth && headers == nil { + headers = putObj.HTTPRequest.Header + } + + // Set request to nil if empty so as not to make chunked encoding + if size == 0 { + in = nil + } + + // create the vanilla http request + httpReq, err := http.NewRequest("PUT", url, in) + if err != nil { + return errors.Wrap(err, "s3 upload: new request") + } + httpReq = httpReq.WithContext(ctx) // go1.13 can use NewRequestWithContext + + // set the headers we signed and the length + httpReq.Header = headers + httpReq.ContentLength = size + + err = o.fs.pacer.CallNoRetry(func() (bool, error) { + var err error + resp, err = o.fs.srv.Do(httpReq) + if err != nil { + return o.fs.shouldRetry(err) + } + body, err := rest.ReadBody(resp) + if err != nil { + return o.fs.shouldRetry(err) + } + if resp.StatusCode >= 200 && resp.StatusCode < 299 { + return false, nil + } + err = errors.Errorf("s3 upload: %s: %s", resp.Status, body) + return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err + }) + if err != nil { + return err + } + } + + // User requested we don't HEAD the object after uploading it + // so make up the object as best we can assuming it got + // uploaded properly. If size < 0 then we need to do the HEAD. + if o.fs.opt.NoHead && size >= 0 { + o.md5 = md5sum + o.bytes = size + o.lastModified = time.Now() + o.meta = req.Metadata + o.mimeType = aws.StringValue(req.ContentType) + o.storageClass = aws.StringValue(req.StorageClass) + // If we have done a single part PUT request then we can read these + if resp != nil { + if date, err := http.ParseTime(resp.Header.Get("Date")); err == nil { + o.lastModified = date + } + o.setMD5FromEtag(resp.Header.Get("Etag")) + } + return nil + } + + // Read the metadata from the newly created object + o.meta = nil // wipe old metadata + err = o.readMetaData(ctx) + return err +} + +// Remove an object +func (o *Object) Remove(ctx context.Context) error { + bucket, bucketPath := o.split() + req := s3.DeleteObjectInput{ + Bucket: &bucket, + Key: &bucketPath, + } + if o.fs.opt.RequesterPays { + req.RequestPayer = aws.String(s3.RequestPayerRequester) + } + err := o.fs.pacer.CallContext(ctx, func() (bool, error) { + _, err := o.fs.c.DeleteObjectWithContext(ctx, &req) + return o.fs.shouldRetry(err) + }) + return err +} + +// MimeType of an Object if known, "" otherwise +func (o *Object) MimeType(ctx context.Context) string { + err := o.readMetaData(ctx) + if err != nil { + fs.Logf(o, "Failed to read metadata: %v", err) + return "" + } + return o.mimeType +} + +// SetTier performs changing storage class +func (o *Object) SetTier(tier string) (err error) { + ctx := context.TODO() + tier = strings.ToUpper(tier) + bucket, bucketPath := o.split() + req := s3.CopyObjectInput{ + MetadataDirective: aws.String(s3.MetadataDirectiveCopy), + StorageClass: aws.String(tier), + } + err = o.fs.copy(ctx, &req, bucket, bucketPath, bucket, bucketPath, o) + if err != nil { + return err + } + o.storageClass = tier + return err +} + +// GetTier returns storage class as string +func (o *Object) GetTier() string { + if o.storageClass == "" { + return "STANDARD" + } + return o.storageClass +} + +// Check the interfaces are satisfied +var ( + _ fs.Fs = &Fs{} + _ fs.Copier = &Fs{} + _ fs.PutStreamer = &Fs{} + _ fs.ListRer = &Fs{} + _ fs.Commander = &Fs{} + _ fs.CleanUpper = &Fs{} + _ fs.Object = &Object{} + _ fs.MimeTyper = &Object{} + _ fs.GetTierer = &Object{} + _ fs.SetTierer = &Object{} +) diff --git a/vendor/github.com/rclone/rclone/backend/s3/v2sign.go b/vendor/github.com/rclone/rclone/backend/s3/v2sign.go new file mode 100644 index 00000000000..d31cbeb7143 --- /dev/null +++ b/vendor/github.com/rclone/rclone/backend/s3/v2sign.go @@ -0,0 +1,116 @@ +// v2 signing + +package s3 + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "net/http" + "sort" + "strings" + "time" +) + +// URL parameters that need to be added to the signature +var s3ParamsToSign = map[string]struct{}{ + "acl": {}, + "location": {}, + "logging": {}, + "notification": {}, + "partNumber": {}, + "policy": {}, + "requestPayment": {}, + "torrent": {}, + "uploadId": {}, + "uploads": {}, + "versionId": {}, + "versioning": {}, + "versions": {}, + "response-content-type": {}, + "response-content-language": {}, + "response-expires": {}, + "response-cache-control": {}, + "response-content-disposition": {}, + "response-content-encoding": {}, +} + +// sign signs requests using v2 auth +// +// Cobbled together from goamz and aws-sdk-go +func sign(AccessKey, SecretKey string, req *http.Request) { + // Set date + date := time.Now().UTC().Format(time.RFC1123) + req.Header.Set("Date", date) + + // Sort out URI + uri := req.URL.EscapedPath() + if uri == "" { + uri = "/" + } + + // Look through headers of interest + var md5 string + var contentType string + var headersToSign []string + tmpHeadersToSign := make(map[string][]string) + for k, v := range req.Header { + k = strings.ToLower(k) + switch k { + case "content-md5": + md5 = v[0] + case "content-type": + contentType = v[0] + default: + if strings.HasPrefix(k, "x-amz-") { + tmpHeadersToSign[k] = v + } + } + } + var keys []string + for k := range tmpHeadersToSign { + keys = append(keys, k) + } + // https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html + sort.Strings(keys) + + for _, key := range keys { + vall := strings.Join(tmpHeadersToSign[key], ",") + headersToSign = append(headersToSign, key+":"+vall) + } + // Make headers of interest into canonical string + var joinedHeadersToSign string + if len(headersToSign) > 0 { + joinedHeadersToSign = strings.Join(headersToSign, "\n") + "\n" + } + + // Look for query parameters which need to be added to the signature + params := req.URL.Query() + var queriesToSign []string + for k, vs := range params { + if _, ok := s3ParamsToSign[k]; ok { + for _, v := range vs { + if v == "" { + queriesToSign = append(queriesToSign, k) + } else { + queriesToSign = append(queriesToSign, k+"="+v) + } + } + } + } + // Add query parameters to URI + if len(queriesToSign) > 0 { + sort.StringSlice(queriesToSign).Sort() + uri += "?" + strings.Join(queriesToSign, "&") + } + + // Make signature + payload := req.Method + "\n" + md5 + "\n" + contentType + "\n" + date + "\n" + joinedHeadersToSign + uri + hash := hmac.New(sha1.New, []byte(SecretKey)) + _, _ = hash.Write([]byte(payload)) + signature := make([]byte, base64.StdEncoding.EncodedLen(hash.Size())) + base64.StdEncoding.Encode(signature, hash.Sum(nil)) + + // Set signature in request + req.Header.Set("Authorization", "AWS "+AccessKey+":"+string(signature)) +} diff --git a/vendor/github.com/rclone/rclone/cmd/serve/httplib/httplib.go b/vendor/github.com/rclone/rclone/cmd/serve/httplib/httplib.go new file mode 100644 index 00000000000..d8f5ff962db --- /dev/null +++ b/vendor/github.com/rclone/rclone/cmd/serve/httplib/httplib.go @@ -0,0 +1,417 @@ +// Package httplib provides common functionality for http servers +package httplib + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "fmt" + "html/template" + "io/ioutil" + "log" + "net" + "net/http" + "strings" + "time" + + auth "github.com/abbot/go-http-auth" + "github.com/pkg/errors" + "github.com/rclone/rclone/cmd/serve/httplib/serve/data" + "github.com/rclone/rclone/fs" +) + +// Globals +var () + +// Help contains text describing the http server to add to the command +// help. +var Help = ` +### Server options + +Use --addr to specify which IP address and port the server should +listen on, e.g. --addr 1.2.3.4:8000 or --addr :8080 to listen to all +IPs. By default it only listens on localhost. You can use port +:0 to let the OS choose an available port. + +If you set --addr to listen on a public or LAN accessible IP address +then using Authentication is advised - see the next section for info. + +--server-read-timeout and --server-write-timeout can be used to +control the timeouts on the server. Note that this is the total time +for a transfer. + +--max-header-bytes controls the maximum number of bytes the server will +accept in the HTTP header. + +--baseurl controls the URL prefix that rclone serves from. By default +rclone will serve from the root. If you used --baseurl "/rclone" then +rclone would serve from a URL starting with "/rclone/". This is +useful if you wish to proxy rclone serve. Rclone automatically +inserts leading and trailing "/" on --baseurl, so --baseurl "rclone", +--baseurl "/rclone" and --baseurl "/rclone/" are all treated +identically. + +--template allows a user to specify a custom markup template for http +and webdav serve functions. The server exports the following markup +to be used within the template to server pages: + +| Parameter | Description | +| :---------- | :---------- | +| .Name | The full path of a file/directory. | +| .Title | Directory listing of .Name | +| .Sort | The current sort used. This is changeable via ?sort= parameter | +| | Sort Options: namedirfirst,name,size,time (default namedirfirst) | +| .Order | The current ordering used. This is changeable via ?order= parameter | +| | Order Options: asc,desc (default asc) | +| .Query | Currently unused. | +| .Breadcrumb | Allows for creating a relative navigation | +|-- .Link | The relative to the root link of the Text. | +|-- .Text | The Name of the directory. | +| .Entries | Information about a specific file/directory. | +|-- .URL | The 'url' of an entry. | +|-- .Leaf | Currently same as 'URL' but intended to be 'just' the name. | +|-- .IsDir | Boolean for if an entry is a directory or not. | +|-- .Size | Size in Bytes of the entry. | +|-- .ModTime | The UTC timestamp of an entry. | + +#### Authentication + +By default this will serve files without needing a login. + +You can either use an htpasswd file which can take lots of users, or +set a single username and password with the --user and --pass flags. + +Use --htpasswd /path/to/htpasswd to provide an htpasswd file. This is +in standard apache format and supports MD5, SHA1 and BCrypt for basic +authentication. Bcrypt is recommended. + +To create an htpasswd file: + + touch htpasswd + htpasswd -B htpasswd user + htpasswd -B htpasswd anotherUser + +The password file can be updated while rclone is running. + +Use --realm to set the authentication realm. + +#### SSL/TLS + +By default this will serve over http. If you want you can serve over +https. You will need to supply the --cert and --key flags. If you +wish to do client side certificate validation then you will need to +supply --client-ca also. + +--cert should be either a PEM encoded certificate or a concatenation +of that with the CA certificate. --key should be the PEM encoded +private key and --client-ca should be the PEM encoded client +certificate authority certificate. +` + +// Options contains options for the http Server +type Options struct { + ListenAddr string // Port to listen on + BaseURL string // prefix to strip from URLs + ServerReadTimeout time.Duration // Timeout for server reading data + ServerWriteTimeout time.Duration // Timeout for server writing data + MaxHeaderBytes int // Maximum size of request header + SslCert string // SSL PEM key (concatenation of certificate and CA certificate) + SslKey string // SSL PEM Private key + ClientCA string // Client certificate authority to verify clients with + HtPasswd string // htpasswd file - if not provided no authentication is done + Realm string // realm for authentication + BasicUser string // single username for basic auth if not using Htpasswd + BasicPass string // password for BasicUser + Auth AuthFn `json:"-"` // custom Auth (not set by command line flags) + Template string // User specified template +} + +// AuthFn if used will be used to authenticate user, pass. If an error +// is returned then the user is not authenticated. +// +// If a non nil value is returned then it is added to the context under the key +type AuthFn func(user, pass string) (value interface{}, err error) + +// DefaultOpt is the default values used for Options +var DefaultOpt = Options{ + ListenAddr: "localhost:8080", + Realm: "rclone", + ServerReadTimeout: 1 * time.Hour, + ServerWriteTimeout: 1 * time.Hour, + MaxHeaderBytes: 4096, +} + +// Server contains info about the running http server +type Server struct { + Opt Options + handler http.Handler // original handler + listener net.Listener + waitChan chan struct{} // for waiting on the listener to close + httpServer *http.Server + basicPassHashed string + useSSL bool // if server is configured for SSL/TLS + usingAuth bool // set if authentication is configured + HTMLTemplate *template.Template // HTML template for web interface +} + +type contextUserType struct{} + +// ContextUserKey is a simple context key for storing the username of the request +var ContextUserKey = &contextUserType{} + +type contextAuthType struct{} + +// ContextAuthKey is a simple context key for storing info returned by AuthFn +var ContextAuthKey = &contextAuthType{} + +// singleUserProvider provides the encrypted password for a single user +func (s *Server) singleUserProvider(user, realm string) string { + if user == s.Opt.BasicUser { + return s.basicPassHashed + } + return "" +} + +// parseAuthorization parses the Authorization header into user, pass +// it returns a boolean as to whether the parse was successful +func parseAuthorization(r *http.Request) (user, pass string, ok bool) { + authHeader := r.Header.Get("Authorization") + if authHeader != "" { + s := strings.SplitN(authHeader, " ", 2) + if len(s) == 2 && s[0] == "Basic" { + b, err := base64.StdEncoding.DecodeString(s[1]) + if err == nil { + parts := strings.SplitN(string(b), ":", 2) + user = parts[0] + if len(parts) > 1 { + pass = parts[1] + ok = true + } + } + } + } + return +} + +// NewServer creates an http server. The opt can be nil in which case +// the default options will be used. +func NewServer(handler http.Handler, opt *Options) *Server { + s := &Server{ + handler: handler, + } + + // Make a copy of the options + if opt != nil { + s.Opt = *opt + } else { + s.Opt = DefaultOpt + } + + // Use htpasswd if required on everything + if s.Opt.HtPasswd != "" || s.Opt.BasicUser != "" || s.Opt.Auth != nil { + var authenticator *auth.BasicAuth + if s.Opt.Auth == nil { + var secretProvider auth.SecretProvider + if s.Opt.HtPasswd != "" { + fs.Infof(nil, "Using %q as htpasswd storage", s.Opt.HtPasswd) + secretProvider = auth.HtpasswdFileProvider(s.Opt.HtPasswd) + } else { + fs.Infof(nil, "Using --user %s --pass XXXX as authenticated user", s.Opt.BasicUser) + s.basicPassHashed = string(auth.MD5Crypt([]byte(s.Opt.BasicPass), []byte("dlPL2MqE"), []byte("$1$"))) + secretProvider = s.singleUserProvider + } + authenticator = auth.NewBasicAuthenticator(s.Opt.Realm, secretProvider) + } + oldHandler := handler + handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // No auth wanted for OPTIONS method + if r.Method == "OPTIONS" { + oldHandler.ServeHTTP(w, r) + return + } + unauthorized := func() { + w.Header().Set("Content-Type", "text/plain") + w.Header().Set("WWW-Authenticate", `Basic realm="`+s.Opt.Realm+`"`) + http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized) + } + user, pass, authValid := parseAuthorization(r) + if !authValid { + unauthorized() + return + } + if s.Opt.Auth == nil { + if username := authenticator.CheckAuth(r); username == "" { + fs.Infof(r.URL.Path, "%s: Unauthorized request from %s", r.RemoteAddr, user) + unauthorized() + return + } + } else { + // Custom Auth + value, err := s.Opt.Auth(user, pass) + if err != nil { + fs.Infof(r.URL.Path, "%s: Auth failed from %s: %v", r.RemoteAddr, user, err) + unauthorized() + return + } + if value != nil { + r = r.WithContext(context.WithValue(r.Context(), ContextAuthKey, value)) + } + } + r = r.WithContext(context.WithValue(r.Context(), ContextUserKey, user)) + oldHandler.ServeHTTP(w, r) + }) + s.usingAuth = true + } + + s.useSSL = s.Opt.SslKey != "" + if (s.Opt.SslCert != "") != s.useSSL { + log.Fatalf("Need both -cert and -key to use SSL") + } + + // If a Base URL is set then serve from there + s.Opt.BaseURL = strings.Trim(s.Opt.BaseURL, "/") + if s.Opt.BaseURL != "" { + s.Opt.BaseURL = "/" + s.Opt.BaseURL + } + + // FIXME make a transport? + s.httpServer = &http.Server{ + Addr: s.Opt.ListenAddr, + Handler: handler, + ReadTimeout: s.Opt.ServerReadTimeout, + WriteTimeout: s.Opt.ServerWriteTimeout, + MaxHeaderBytes: s.Opt.MaxHeaderBytes, + ReadHeaderTimeout: 10 * time.Second, // time to send the headers + IdleTimeout: 60 * time.Second, // time to keep idle connections open + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS10, // disable SSL v3.0 and earlier + }, + } + + if s.Opt.ClientCA != "" { + if !s.useSSL { + log.Fatalf("Can't use --client-ca without --cert and --key") + } + certpool := x509.NewCertPool() + pem, err := ioutil.ReadFile(s.Opt.ClientCA) + if err != nil { + log.Fatalf("Failed to read client certificate authority: %v", err) + } + if !certpool.AppendCertsFromPEM(pem) { + log.Fatalf("Can't parse client certificate authority") + } + s.httpServer.TLSConfig.ClientCAs = certpool + s.httpServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert + } + + htmlTemplate, templateErr := data.GetTemplate(s.Opt.Template) + if templateErr != nil { + log.Fatalf(templateErr.Error()) + } + s.HTMLTemplate = htmlTemplate + + return s +} + +// Serve runs the server - returns an error only if +// the listener was not started; does not block, so +// use s.Wait() to block on the listener indefinitely. +func (s *Server) Serve() error { + ln, err := net.Listen("tcp", s.httpServer.Addr) + if err != nil { + return errors.Wrapf(err, "start server failed") + } + s.listener = ln + s.waitChan = make(chan struct{}) + go func() { + var err error + if s.useSSL { + // hacky hack to get this to work with old Go versions, which + // don't have ServeTLS on http.Server; see PR #2194. + type tlsServer interface { + ServeTLS(ln net.Listener, cert, key string) error + } + srvIface := interface{}(s.httpServer) + if tlsSrv, ok := srvIface.(tlsServer); ok { + // yay -- we get easy TLS support with HTTP/2 + err = tlsSrv.ServeTLS(s.listener, s.Opt.SslCert, s.Opt.SslKey) + } else { + // oh well -- we can still do TLS but might not have HTTP/2 + tlsConfig := new(tls.Config) + tlsConfig.Certificates = make([]tls.Certificate, 1) + tlsConfig.Certificates[0], err = tls.LoadX509KeyPair(s.Opt.SslCert, s.Opt.SslKey) + if err != nil { + log.Printf("Error loading key pair: %v", err) + } + tlsLn := tls.NewListener(s.listener, tlsConfig) + err = s.httpServer.Serve(tlsLn) + } + } else { + err = s.httpServer.Serve(s.listener) + } + if err != nil { + log.Printf("Error on serving HTTP server: %v", err) + } + }() + return nil +} + +// Wait blocks while the listener is open. +func (s *Server) Wait() { + <-s.waitChan +} + +// Close shuts the running server down +func (s *Server) Close() { + err := s.httpServer.Close() + if err != nil { + log.Printf("Error on closing HTTP server: %v", err) + return + } + close(s.waitChan) +} + +// URL returns the serving address of this server +func (s *Server) URL() string { + proto := "http" + if s.useSSL { + proto = "https" + } + addr := s.Opt.ListenAddr + // prefer actual listener address if using ":port" or "addr:0" + useActualAddress := addr == "" || addr[0] == ':' || addr[len(addr)-1] == ':' || strings.HasSuffix(addr, ":0") + if s.listener != nil && useActualAddress { + // use actual listener address; required if using 0-port + // (i.e. port assigned by operating system) + addr = s.listener.Addr().String() + } + return fmt.Sprintf("%s://%s%s/", proto, addr, s.Opt.BaseURL) +} + +// UsingAuth returns true if authentication is required +func (s *Server) UsingAuth() bool { + return s.usingAuth +} + +// Path returns the current path with the Prefix stripped +// +// If it returns false, then the path was invalid and the handler +// should exit as the error response has already been sent +func (s *Server) Path(w http.ResponseWriter, r *http.Request) (Path string, ok bool) { + Path = r.URL.Path + if s.Opt.BaseURL == "" { + return Path, true + } + if !strings.HasPrefix(Path, s.Opt.BaseURL+"/") { + // Send a redirect if the BaseURL was requested without a / + if Path == s.Opt.BaseURL { + http.Redirect(w, r, s.Opt.BaseURL+"/", http.StatusPermanentRedirect) + return Path, false + } + http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) + return Path, false + } + Path = Path[len(s.Opt.BaseURL):] + return Path, true +} diff --git a/vendor/github.com/rclone/rclone/cmd/serve/httplib/serve/data/assets_vfsdata.go b/vendor/github.com/rclone/rclone/cmd/serve/httplib/serve/data/assets_vfsdata.go new file mode 100644 index 00000000000..dc96cd755f6 --- /dev/null +++ b/vendor/github.com/rclone/rclone/cmd/serve/httplib/serve/data/assets_vfsdata.go @@ -0,0 +1,186 @@ +// Code generated by vfsgen; DO NOT EDIT. + +// +build !dev + +package data + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + pathpkg "path" + "time" +) + +// Assets statically implements the virtual filesystem provided to vfsgen. +var Assets = func() http.FileSystem { + fs := vfsgen۰FS{ + "/": &vfsgen۰DirInfo{ + name: "/", + modTime: time.Date(2020, 5, 5, 16, 40, 6, 115915195, time.UTC), + }, + "/index.html": &vfsgen۰CompressedFileInfo{ + name: "index.html", + modTime: time.Date(2020, 5, 5, 16, 40, 5, 919909715, time.UTC), + uncompressedSize: 15424, + + compressedContent: []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xbc\x7b\x6d\x73\xdb\x46\xf2\xe7\x6b\xe9\x53\x4c\x98\xcd\x8a\x4a\xc0\xe1\x3c\x3f\x48\xa4\xf6\x6c\xc6\x59\xbb\x56\x71\x52\xb1\x9d\xad\x6c\x2a\x2f\x20\x62\x48\xe2\x0c\x02\x0c\x00\xea\xc1\x3a\x55\xdd\x87\xb8\x4f\x78\x9f\xe4\xaa\x67\x00\x12\x90\x28\x27\x7b\xf5\xdf\xbf\xec\xa2\x80\x9e\x99\x9e\x7e\xf8\x75\x4f\x37\x08\x4d\xbe\x18\x8d\xd0\xf1\x78\x8c\x66\xc5\xe6\xae\x4c\x97\xab\x1a\x31\x42\x25\xfa\x3e\xae\xeb\x95\xbb\x41\xaf\x8b\xac\x46\x71\x9e\xa0\xf7\x2b\x87\x66\x71\x92\xdc\xa1\x17\xdb\x7a\x55\x94\xd5\xf1\x78\x0c\xeb\x2e\xd3\xb9\xcb\x2b\x97\xa0\x6d\x9e\xb8\x12\xd5\x2b\x87\x5e\x6c\xe2\xf9\xca\xb5\x23\x11\xfa\xd9\x95\x55\x5a\xe4\x88\x61\x82\x86\x30\x61\xd0\x0c\x0d\x4e\xcf\x81\xc5\x5d\xb1\x45\xeb\xf8\x0e\xe5\x45\x8d\xb6\x95\x43\xf5\x2a\xad\xd0\x22\xcd\x1c\x72\xb7\x73\xb7\xa9\x51\x9a\xa3\x79\xb1\xde\x64\x69\x9c\xcf\x1d\xba\x49\xeb\x95\xdf\xa7\xe1\x82\x81\xc7\x2f\x0d\x8f\xe2\xaa\x8e\xd3\x1c\xc5\x68\x5e\x6c\xee\x50\xb1\xe8\x4e\x44\x71\xdd\x08\x0d\x3f\xab\xba\xde\x9c\x8d\xc7\x37\x37\x37\x38\xf6\x02\xe3\xa2\x5c\x8e\xb3\x30\xb5\x1a\x5f\xbe\x99\xbd\x7a\xfb\xee\xd5\x88\x61\xd2\x2c\xfa\x90\x67\xae\xaa\x50\xe9\x7e\xdf\xa6\xa5\x4b\xd0\xd5\x1d\x8a\x37\x9b\x2c\x9d\xc7\x57\x99\x43\x59\x7c\x83\x8a\x12\xc5\xcb\xd2\xb9\x04\xd5\x05\x08\x7d\x53\xa6\x75\x9a\x2f\x23\x54\x15\x8b\xfa\x26\x2e\x1d\xb0\x49\xd2\xaa\x2e\xd3\xab\x6d\xdd\xb3\x59\x2b\x62\x5a\xf5\x26\x14\x39\x8a\x73\x34\x78\xf1\x0e\xbd\x79\x37\x40\x2f\x5f\xbc\x7b\xf3\x2e\x02\x26\xff\x7c\xf3\xfe\xf5\x0f\x1f\xde\xa3\x7f\xbe\xf8\xe9\xa7\x17\x6f\xdf\xbf\x79\xf5\x0e\xfd\xf0\x13\x9a\xfd\xf0\xf6\xdb\x37\xef\xdf\xfc\xf0\xf6\x1d\xfa\xe1\x3b\xf4\xe2\xed\x2f\xe8\x1f\x6f\xde\x7e\x1b\x21\x97\xd6\x2b\x57\x22\x77\xbb\x29\x41\x83\xa2\x44\x29\x58\xd3\x25\xde\x74\xef\x9c\xeb\x89\xb0\x28\x82\x48\xd5\xc6\xcd\xd3\x45\x3a\x47\x59\x9c\x2f\xb7\xf1\xd2\xa1\x65\x71\xed\xca\x3c\xcd\x97\x68\xe3\xca\x75\x5a\x81\x57\x2b\x40\x07\xb0\xc9\xd2\x75\x5a\xc7\xb5\x27\x3d\xd1\x0b\xa3\xe3\xef\x8b\x04\xb8\x85\x19\x67\x08\xbd\x48\xe2\x4d\x1d\x4c\x55\xce\xb3\x22\x77\x68\x1d\x97\x1f\xb7\x1b\x34\x1a\x5d\x1c\x1f\x4f\xbe\xf8\xf6\x87\xd9\xfb\x5f\x7e\x7c\x85\x56\xf5\x3a\xbb\x38\x9e\x84\x5f\x47\x93\x95\x8b\x93\x8b\xe3\xa3\xa3\x49\x9d\xd6\x99\xbb\xb8\xbf\x87\x01\x84\xdf\xc6\x6b\xf7\xf0\x30\x19\x07\x2a\x8c\xaf\x5d\x1d\xa3\xf9\x2a\x2e\x2b\x57\x4f\x07\xdb\x7a\x31\x32\x83\xfd\x40\x1e\xaf\xdd\x74\x70\x9d\xba\x9b\x4d\x51\xd6\x03\x34\x2f\xf2\xda\xe5\xf5\x74\x70\x93\x26\xf5\x6a\x9a\xb8\xeb\x74\xee\x46\xfe\x26\x42\x69\x9e\xd6\x69\x9c\x8d\xaa\x79\x9c\xb9\x29\xc5\xe4\x09\xa3\x65\x51\x2c\x33\xd7\x61\x93\x17\x75\x19\xe7\x55\x16\xd7\x6e\x70\x71\x3c\xa9\xea\x3b\x10\xeb\x6b\x74\x8f\x36\x71\x92\xa4\xf9\xf2\x0c\x91\x73\xd0\x78\x99\xe6\xfe\xf2\xe1\xf8\xaa\x48\xee\xd0\xfd\xf1\xd1\xa2\xc8\xeb\xd1\x22\x5e\xa7\xd9\xdd\x19\xaa\xe2\xbc\x1a\x55\xae\x4c\x17\xe7\xc7\x47\xb5\xbb\xad\x47\xa5\x03\xe3\x7a\x0e\xc5\xa6\x4e\xd7\xe9\x27\x57\x6d\x9c\x4b\xce\x8f\x8f\xae\xe2\xf9\xc7\x65\x59\x6c\xf3\x64\x34\x2f\xb2\xa2\x3c\x43\x5f\x2e\xfc\xcf\xf9\xf1\xc3\x71\x0c\xbc\x5b\x32\x21\xca\x25\xbc\x65\x99\xb8\x79\x51\x7a\xc7\x9c\xa1\xbc\xc8\x9d\x9f\x7e\xb6\x02\x6f\x47\xc7\x2b\x8a\x9a\xeb\x2e\x03\x4e\xed\x3c\xf0\x05\x87\xc0\xbc\x2f\xab\xed\x7a\x1d\x97\x5e\x85\x46\xc7\x51\xe6\x16\xf5\x19\x92\x5f\x9d\xef\x49\x3e\xc7\x04\xda\xc3\x71\xbd\x3a\x5b\xa4\x65\x55\x8f\xe6\xab\x34\x4b\xa2\xe3\x3a\xe9\xde\x03\x27\xef\x81\x33\x44\xbf\x3a\x47\xe3\xaf\x51\x0d\x8b\x5d\xe9\x21\xba\x2e\xae\x20\x45\x7c\x3d\x0e\x7c\xb2\xb8\xc7\x66\x7f\xfb\xe7\xb9\x04\x4d\xba\xf2\xd7\xc5\xe6\x0c\x31\xb9\xb9\xed\x28\x70\x55\xd4\x75\xb1\x3e\x43\x34\x90\x0f\xd9\x9c\xc1\x3f\x6f\x1b\xba\x73\x68\x95\x7e\x72\x67\x88\x11\xbf\xc8\x53\x6e\x5c\x30\x45\x5e\x94\xeb\x38\x3b\x3f\x3e\xba\x59\xa5\xb5\x1b\x55\x9b\x78\xee\x80\x7a\x53\xc6\x9b\xf3\xe3\x23\xb0\xfc\x22\x2b\x6e\x46\xb7\x67\x68\x95\x26\x89\xcb\x5b\xb7\xb5\x23\x67\xc8\x65\x59\xba\xa9\xd2\xea\x7c\xef\x20\x6b\x6d\x23\xc1\x23\xc7\x93\xf3\xe3\xa3\x1d\xee\x90\x00\x79\x1e\x1e\x39\xf9\x09\x28\x7c\x3c\x67\x69\x40\x86\x9f\xfb\xc8\x4d\x7b\x20\x1f\x3f\x1c\xaf\x21\x03\xdf\x1f\x1f\x25\x69\xb5\xc9\xe2\xbb\x33\x74\x95\x15\xf3\x8f\x30\x82\x7d\xc8\xf4\x4d\x42\xd9\xde\x24\x2d\xea\x7f\x76\x65\x12\xe7\x71\xd4\x87\xff\x55\x51\x26\xae\xdc\x3b\x60\x73\x8b\xaa\x22\x4b\x13\xf4\xa5\x9d\xc1\xbf\xf3\x47\x8e\xa3\xe4\xb0\xe3\x48\xd0\xd9\x0b\x33\x4a\x6b\xb7\xde\x6b\xd0\xc2\x93\xba\x35\x4c\xf9\x72\x91\x66\x75\x0f\x12\x67\xc1\x62\x8d\x2c\x3d\x21\x66\xb3\x99\xc7\xb4\x3f\x0e\x3a\xa0\x23\xe4\xab\xbd\xf0\xf3\x22\xcb\xe2\x4d\xe5\xce\x50\x7b\xe5\xd7\xf8\x2d\x0e\xe8\x97\xc4\xd5\xca\x25\xe8\xcb\x24\x86\x7f\x7e\xaa\x4f\x13\x75\xb9\xf7\xd6\x33\x51\xef\xe6\x21\xc2\x20\x1c\x76\x4e\x8d\xb3\x74\x99\x9f\x21\x88\xcb\xf3\x8e\x4e\x60\x12\x04\x7e\x80\xf0\x98\xad\xe2\x7c\xe9\x12\xb4\x28\x8b\x35\x22\x90\x9f\x59\x1b\x65\x4f\x62\xe3\xb3\x26\xee\x79\x59\x79\xca\x41\x88\x7b\xce\x5d\x94\x5e\x65\x71\xc0\x4b\xbd\x42\xd5\xf5\x12\x46\xae\x5d\x59\xa7\xf3\x38\x6b\x35\x58\xa7\x49\x92\x05\xdb\x85\x08\x3f\x18\x3b\x5d\x01\x1a\xa4\xd7\xc9\x59\x5e\xaf\x02\x72\x87\xec\xb4\xe3\x28\x43\xbe\x7a\x32\x81\x9f\xf6\x7c\x4f\x7c\x00\x37\xbf\x9a\x04\xb6\x9f\x2c\x4e\xa3\xfe\x6a\x71\xfa\xd8\xf0\x1e\x5e\x87\xc4\x68\xd4\xdc\x14\x55\x1a\x42\x2e\xbe\xaa\x8a\x6c\x5b\xb7\x2a\x62\x38\x67\xbc\x2b\xf1\xb2\xd8\x6e\x3a\x88\x0d\x39\x96\x62\x2d\x01\xb3\x47\x37\x45\x99\x8c\xae\x4a\x17\x7f\x3c\x43\xfe\xd7\x28\xce\xb2\x6e\x1a\x01\xd3\xb4\x43\x30\xf9\xb1\x57\x36\xa5\x1b\xb5\x7e\xc1\xe9\xbc\xc8\x9f\x46\x87\x6c\x02\x08\x46\x71\x55\x94\x75\x2f\xda\xd3\x1c\x32\xc5\xa8\x09\xfa\x5d\x18\x78\xe9\x56\xae\x13\x5f\x1d\x6d\x4b\x97\xc5\x75\x7a\xed\x20\xb5\x01\xae\x30\x0b\x01\xd8\xd9\x02\xd7\xc5\xe6\x39\x13\x1d\x05\x23\x90\x76\xf9\x88\x3e\x91\x10\x07\x6c\x3e\xcb\xa1\x85\x6e\x58\xba\x67\xf8\x70\xbc\x28\x8a\x27\x39\xc0\xc7\xcb\x53\x90\x87\x54\xd6\x75\xf8\xdc\xe5\xb5\x2b\x81\xcd\xff\x58\xbb\x24\x8d\xd1\x70\x1d\xdf\x8e\x1a\x9b\x28\x42\x36\xb7\x80\x91\xf1\xd7\x47\x78\x95\x26\xae\x4d\x1d\x7b\x63\x86\xe3\xf8\xe8\x01\x95\x6e\x5d\x5c\x87\x72\xe9\xa3\x73\x1b\x94\xc4\xb5\xab\xa0\x3e\xdc\x9f\x60\x47\x07\xb0\xdd\x9a\x3f\xde\xd6\x05\xf0\x39\x3e\xea\x41\x96\x9f\x46\x8f\x96\x05\xc4\x1f\x3a\xae\x8f\x0e\x21\x19\x38\x86\x53\xee\xd1\x11\x13\xe8\x3e\xaa\xbb\xa7\x03\xd0\x3b\x59\xf5\xa8\x63\x0d\x4a\x82\x41\x1f\x8e\x1f\x8e\x27\xe3\xa6\x62\x3a\x9a\x8c\x9b\x8a\x6f\xe2\x13\x5f\x91\x67\x45\x9c\x4c\x4f\x02\x8b\xe1\xe9\x79\x5d\x2c\x97\x99\x1b\x0e\x7c\xee\x1c\x9c\x9e\xcf\x7d\xf6\x7a\x97\x7e\x72\xc3\xd3\x13\x5f\xa6\x41\x68\x5d\x87\x16\x64\x3a\xa0\x98\x0e\xd0\xed\x3a\xcb\xab\xe9\xa0\xd3\x01\xdc\x70\x5f\xfd\x33\x42\xc8\xb8\xba\x5e\x36\x53\xce\x6e\xb3\x34\xff\x78\x68\x22\xb5\xd6\x8e\xfd\xe8\x00\x05\x4c\x4f\x07\x64\x80\x42\xf1\x08\x57\x5e\xfc\xe9\xe0\x00\xd4\x7c\xed\x78\x34\x49\xdc\xa2\xf2\x57\x47\xbe\x05\xfb\xae\xc8\xa0\xf6\x80\xda\xd7\xd3\x96\x28\x4d\xa6\x83\x85\xa7\x0e\xa0\x19\xca\x46\xe5\x16\x38\xe6\x45\xfe\xc9\x95\x45\xa0\xf9\x5b\x17\x38\x1e\x1d\x4d\x36\x71\xbd\x42\xc9\x74\xf0\x3d\x33\x12\x33\x86\xb8\xc6\x52\xae\x46\x54\x30\xac\x2e\x29\x25\xd8\x22\xf2\x9a\x53\xac\x67\x54\x60\x26\x11\x41\x04\x51\x05\xd4\x30\xf5\x5a\x4b\x4c\x57\x1c\x48\xec\x67\xb8\x9e\x93\x11\x23\x58\xc9\x11\xcc\x57\x23\x3f\x69\x04\x0c\xc2\xe5\xa7\x56\x8a\x2f\xbf\xfb\xee\x05\x21\x64\x30\x7e\x56\x12\xd5\xdd\x97\x2b\x44\x90\x24\x98\x19\x44\x90\xd2\x58\x8b\x6b\x2a\x0d\xd6\x73\x82\xa8\xc6\x42\x23\xbf\x1d\x82\x15\xd2\x7f\x86\xcb\xd7\x9e\xd9\x1c\xa6\x08\x10\x19\xe4\xa0\x02\xf3\x70\xe5\xa7\xfc\x0c\xdc\xe4\x9c\x8c\x3c\x9f\x56\x6c\x18\x19\xed\x27\x75\xc5\x9e\xbd\x60\xa6\x15\x7b\x32\x5e\x1e\xb0\xfe\xa8\x5a\x15\x65\x3d\xdf\xd6\xe0\xd4\xb2\xf8\xe8\x1a\xa3\x37\x77\xa3\xc6\xe7\xb4\xe7\x91\xae\xc7\xdc\xb5\xcb\x8b\x24\xd9\x79\xe9\x20\xf3\x11\x9c\xe0\x9b\x83\x9e\x6e\xd6\x3d\xb7\xb0\x5a\xc5\x9b\x1d\x04\x9e\x9a\x5e\x18\xad\x22\xf0\x96\x30\xca\x12\x86\x2e\x3d\x1a\x28\x13\xdc\xf4\xc9\x00\x0f\x46\xb4\x91\x11\x41\x97\x9c\x62\x65\xa9\x92\xcc\x46\x04\x79\xaf\x35\x4b\x08\x22\x11\x55\xd8\x58\x65\x29\x51\x88\xf4\x78\x90\x88\x52\x86\x95\x50\x44\x53\xe0\xa1\x70\xc3\xe3\x19\xb2\x96\x98\x58\xcd\x0d\x91\x68\xd6\x21\x4b\x81\x85\x90\x8a\x10\x83\x38\x61\x58\x49\xc9\x8c\xec\x6e\x74\x58\xb3\x7f\x0d\xbc\x7d\xde\x79\x7b\x3c\x02\xe6\xc5\x64\x0c\x76\xf9\x03\x2b\xa9\x9e\xe2\x5c\xf5\x34\x07\xd0\x46\x1e\xb4\xdc\x48\x65\x10\x89\x3c\x72\xa9\x25\xdc\x82\xea\x8c\x29\x2c\x24\x15\x4c\xa0\x19\x89\x98\xe0\xd8\x12\x2b\x34\x45\x1d\x1e\x4c\x1a\x4c\x2d\xe7\xcc\xa0\xce\x46\x1d\xea\x65\x47\x9c\x0e\x79\xd6\xb1\x43\x8f\xc7\xce\x66\x9d\xfd\xba\xd4\xbd\x4c\x5d\xbb\x77\x04\xef\xd9\x7d\xaf\x5c\xd7\xee\x0a\xf5\x6d\xf4\x8c\x9d\x7d\x24\x3d\xb2\xf3\x2e\xa2\xba\x16\xa7\x4c\x61\x2a\x05\xe5\x22\x62\x92\x60\x29\x2d\x35\x02\xcd\x80\x6c\x24\xb1\x1a\xc8\x14\x1b\xc3\x95\xe6\x88\x32\x8d\x39\x21\x52\x80\x99\x38\x26\x44\x51\xc6\x3c\x55\x6b\xa6\x08\x8b\x98\x14\x98\x06\xea\x8c\x32\x83\x85\xb2\x42\x00\x59\x62\xd6\x4e\x36\xd8\x52\x4b\x28\x98\x54\x61\x4a\x04\x31\x40\xb5\x58\x71\xc3\x39\x58\x54\x63\x42\x18\x11\x14\xcd\x28\xf7\x12\x59\xc5\xbc\xa1\x39\x53\x92\x53\x44\x21\x6f\x30\x63\x24\x4c\xb6\x88\x72\x8e\x29\x21\x44\x6a\x7f\x3b\xa3\x5c\x60\x61\xb9\xe6\xba\x19\x96\x58\x50\xc9\x95\xf0\x3c\xa4\xa4\x84\x21\xca\x15\xa6\x94\x31\x22\xfc\x7e\x4a\x4b\xe9\xb7\x53\xd8\x10\x4b\x84\xe8\x4a\x41\xb9\xc6\x4c\x1a\x45\xad\xd7\xc3\x1e\xa0\x0a\x2c\x75\xcb\xa2\x43\x06\x10\x04\xf5\xba\x54\x86\xcd\x9e\x4a\x38\x37\x9c\x79\x1b\x0b\xa9\xa9\xe0\x41\x0a\x6d\x94\x54\x2a\x62\xc2\x62\x4b\x0c\x55\xdc\x4b\x2c\x15\xd5\xda\x7a\x2a\xf1\xb6\xe8\x53\x0d\x96\xc1\x4d\x9e\x05\x31\x56\x33\x60\xc1\xb0\xa1\x82\x19\xe5\x2d\x61\x94\xb0\xdc\x46\x8c\x6b\xc8\x2f\x82\x98\x3e\x95\x63\xa6\xb9\x50\xde\x8a\x7b\x32\x93\x98\x04\xe1\xba\xb2\x51\x8d\x65\xcb\xd8\x60\x6a\x08\x13\x40\x25\xd8\x08\x65\xb9\x67\x61\xb1\xb6\x46\x53\x1e\x31\x22\x30\x6b\x0c\x27\x00\x4e\x96\x71\x11\x51\x6b\xb0\x82\xed\x04\xa2\x42\x60\xc1\x2c\x67\x26\xa2\x96\x63\xad\x40\x3f\x88\x78\x8d\x19\x55\xca\xd8\x88\x1a\x83\x8d\xb2\xdc\x18\x44\x25\xc1\x4a\x1b\x41\x69\x44\x8d\xc0\x26\x88\x4c\xa5\xc0\x86\x2b\xab\x79\x44\x0d\x6d\xc1\x32\x83\xb3\xcc\x5a\x29\xb9\x8c\xa8\x06\xa0\x5a\x69\x19\xa2\x8a\x63\xc5\x14\x15\x36\xa2\x5a\xec\xf0\xad\x0c\x16\x86\x4a\xc9\x22\xaa\x19\x56\x80\x58\x08\x06\xcd\x31\xe7\xca\x4a\x11\x51\x4d\xb0\xe0\x46\x6b\x85\xa8\xb6\x98\x52\x6e\x0d\x8f\x60\x9d\x52\x92\x13\x85\xa8\x91\x58\x1a\x6d\x80\x85\xd2\x98\x0b\x70\x1f\x9a\x51\xcb\x30\x51\x54\x33\x20\x2b\xcc\x28\x6c\x88\xc0\x00\x5a\x11\xae\x4d\x44\x95\xc4\x5c\x30\x23\x35\x62\x44\x7a\x29\xa8\x88\xa8\x12\x58\x05\xa5\x67\x8c\x32\xb0\x32\xd5\x9e\xca\x82\xf7\x18\xb5\x58\x5a\x43\x85\x8a\x40\x25\x6b\x21\x7e\x11\x63\x06\x53\xc5\xbc\xce\x7b\xea\x25\x13\x0a\x13\x09\x21\xfe\x2c\xd9\xca\xd6\xa9\xb3\x1e\x59\x63\x0d\x16\x92\x08\xa8\x5a\x32\xc1\x81\x6a\x31\x44\x13\x11\x08\xc0\xc7\x35\x31\xd6\x46\x8c\x50\x4c\x9a\x2c\x02\x19\x85\x51\x88\xbe\x88\x41\x0e\x0b\x50\x86\x10\x20\xda\x1a\xa3\x22\x46\x38\x96\x21\x31\x40\x14\x71\xcd\x34\x95\x5d\xea\x0c\x92\x84\x50\x9c\xf1\x47\x93\x0d\x96\x9c\x32\xad\x7b\x8c\x15\xc1\x60\x62\xc6\xbb\x52\x5c\x72\x48\x71\x84\x31\x00\x11\xd7\x58\x5b\x80\x00\x9a\x71\x48\x5b\x8c\x68\xa9\x23\x80\x35\x13\xc6\x1a\xc4\x99\xc1\x4a\x30\x6e\x44\xe4\xf3\x88\x8f\xea\x1e\x91\x61\x06\x8e\xa6\xc0\xa0\x43\x26\x98\x00\x95\xa1\x2e\x5b\x66\x30\x0b\x81\xd3\x95\x81\x29\xac\x83\xc0\x97\x1d\x89\x15\xc7\x42\xb4\xae\xf6\x89\x8a\x6b\xa9\x22\x45\xb1\xb1\x01\xb3\x1d\x53\x28\x1a\xec\x65\x25\xb5\x02\xee\x66\x1d\xa3\xfa\x41\x82\x19\x57\x8a\xb3\x1e\x03\xf0\x92\xe5\x5c\xeb\xfe\x6e\xe0\x52\x2d\x2c\x8d\x94\x68\x40\x21\xbc\x9f\x89\x36\x44\x47\x4a\x61\x0d\x33\xb5\xe9\x12\x21\xa8\x02\x88\x2f\xf7\x54\x4a\x48\x8b\x88\xcb\x2e\x06\xf7\xe4\x19\xa0\xdf\x28\x4e\xc0\x68\x7b\x32\xe4\x7f\xaa\x14\x33\x2c\xa2\x54\x63\xaa\x34\x87\xc2\x93\x4a\xcc\xb4\x10\x5c\x47\x10\xf2\xa2\x3d\x9b\x28\xc1\x4a\x29\x4e\x00\xc6\x14\x2a\x0e\x6b\x10\x25\x06\x73\x49\xac\xe5\x11\xd5\x12\xf3\x86\x6f\x87\x6a\x29\xd6\x21\xc0\x66\x1d\x32\x04\x1b\x6b\x72\x02\x85\x84\x1d\xa0\xc6\x38\xb6\x10\xfc\x12\x51\x26\x20\x46\x85\x14\x11\x13\xba\x0d\xfe\x19\x85\xa4\x48\xb4\x66\x3e\xf1\xd2\x76\xae\x84\x34\xce\xac\x08\x49\xba\x55\xee\xd0\x11\xfb\xcc\xc1\x0d\x3f\x03\xe4\x1f\x57\x2f\x8a\x72\x3d\x1d\xec\x9e\x5c\x0f\x19\x35\x58\x58\x9f\x0c\x11\x55\x04\x13\xff\x73\x8a\xfc\x83\xf0\xe1\x88\x46\x88\x9e\xa2\xfd\xf4\x51\x77\xfe\xa8\xbb\xe0\x51\x61\xb0\xaf\xb4\x77\x17\xbe\x09\x82\x46\xf6\x71\x0b\x94\x66\x6e\x5f\x79\x43\x73\xf9\xb8\xf2\x66\xb2\xab\xcc\xc1\xd2\xbb\x5d\x91\xa5\xb9\x9b\xc7\x9b\xe9\xc0\x3f\x2e\xeb\x91\xff\x67\x91\xe6\x2d\xfd\x49\x17\x43\x39\x62\x02\x53\x76\xcd\x34\xb8\x66\x4e\x90\xc2\x54\x21\x89\x0d\x40\x06\x53\x38\x59\x31\x6d\xae\x5f\x33\x6e\xe7\x1a\x73\x68\xae\x80\x3a\x12\xd8\xaa\xe6\xd2\x4f\xf8\xd9\xd7\x02\xf2\x1d\x44\x36\x0c\xf8\x0a\x85\x6b\x44\xf9\x6b\xf0\x9b\x9e\x51\xe3\x19\x73\xff\x5f\x87\xd5\x41\x80\x4f\x07\x5a\x2c\x40\xb2\x5f\x7d\x49\x99\x0d\x90\x82\x46\x8a\x60\x69\x90\x86\x3e\x8a\x5a\x4c\xa1\xcf\x63\xda\x5f\xbe\x66\xc2\x5e\xee\x16\x7d\x7a\xb6\xfb\x49\x33\xf7\x9f\xea\x7d\xba\xac\xdb\xce\xe7\x20\x00\x29\x6f\x20\x14\xa1\xdd\xe5\xe9\x93\x8e\xa8\xc7\x2e\xf4\x43\x3d\xc4\xfc\x21\x68\x3c\x6e\xfe\xbf\x30\xd2\x75\x04\xb4\x3f\x98\x32\x2a\x8c\x51\xbe\x23\x30\x00\x10\x23\xb4\xf6\x1d\x01\x9c\xc7\xdc\x5a\x26\x3c\x6e\x84\x35\xbe\xc8\xb7\x0a\x5b\x6b\xad\xe1\x01\x21\xcc\x70\xcd\xbb\xd4\x4b\xa8\x85\xac\xd5\x70\xe8\x77\xc8\x33\x5f\x39\x59\xe9\xcb\xe5\x3d\x99\x71\x8b\xa9\x26\x86\xed\xb6\x13\xac\x4b\xdc\x4b\x74\xb9\xa7\x52\xc6\x31\x15\x32\x64\xe6\x43\x54\x0a\x47\xbe\x91\x82\x46\x0c\x1b\xc1\xa8\x26\x56\xb8\x11\x15\x3e\x5d\x72\x65\x05\x1c\x7f\xfd\x91\xcb\x46\x1b\x09\x3a\xf6\x87\x66\x20\x83\x24\xc4\x12\x1d\x8d\x28\xd6\x54\x68\x6b\x0d\x73\x23\x22\x11\x89\x20\x58\x08\x63\xd6\xc2\x4d\xc7\x9e\x4d\xf2\x2a\xdd\xbc\xa6\x54\xd3\xcf\x74\x74\x94\x2a\xa8\x0c\x88\x6f\x64\x29\x55\x3e\xeb\x5b\x22\xac\xf2\x99\x5c\x45\x94\x52\x2c\x0c\x9c\x55\x08\x94\x64\xd2\x10\x62\x22\xca\x20\x5e\x19\x94\xa3\x70\x5c\xc1\xed\x25\x24\x66\x7f\xf1\x98\xe7\xee\xa6\x2b\x96\xb6\xe2\x4f\x35\x40\x42\x63\x43\x38\x05\x73\x5a\x81\x49\x38\xe8\x66\x02\x52\xa7\xb5\x86\x02\x59\x62\x1a\xca\x7b\x61\xb0\x15\x56\x4a\x19\xbc\x4f\x42\x01\x25\x2c\x16\x8c\x2a\xb2\xc3\x84\xa7\xce\x24\xc1\x94\x1a\x21\x0c\x60\x42\x63\x13\xc8\x70\x00\x28\x43\x18\xd3\x11\xf3\xe5\xaf\x2f\x1a\x24\xc5\x0c\xca\x58\x28\x7e\xac\xc5\x3c\xd4\x92\x33\xc9\x30\x23\xc6\x2a\x63\x22\x4e\x08\x16\xfe\xa4\x93\x1c\x73\xad\x7d\x33\xc1\x09\x45\x52\x60\x2d\x2c\x51\x5c\xf8\xdb\x19\x34\x55\x82\x69\xc1\x55\x18\xd6\x98\x28\xc1\x35\xb1\x9e\x85\x0a\x7d\x83\xd4\x58\x2b\xca\xbc\x76\x16\x3a\x4e\xce\x34\x9a\x49\x83\x85\x34\x44\x36\xe4\x46\x0a\xa8\x9f\x89\x56\x0c\x0e\x40\x0b\x2d\xdd\x53\xaa\xc6\x1c\xea\x19\xee\x59\xec\xc9\x0a\x9b\x46\xbd\x2e\x55\x62\xbb\xa3\x2a\x03\x3d\x2e\xf3\xa6\x37\x80\x4f\x1a\xa4\xe0\x52\x6a\x06\x93\x39\xb4\x37\x50\x84\x4b\x83\x19\x25\xbe\xae\x86\x60\x32\x8d\x2d\xfa\x54\xd1\x74\x61\xa0\x1e\x37\x9a\x43\x24\x18\x8d\x75\xa8\xc1\x24\x34\x2c\xdc\x0a\x49\x23\x66\x38\xd6\xa1\x8c\xeb\x52\xb5\xc5\xd6\xf7\x87\xb3\x1e\x95\x63\x16\x64\xeb\x8a\xa6\x74\xdb\x14\x49\x8b\x0d\xb3\x4c\x42\xb7\xa5\x28\x56\x4d\xf3\xaa\x28\x16\xc2\x17\x08\xe0\x92\x60\x35\xc5\xb1\xe4\x86\x09\xc2\x7d\xcb\xa7\x42\x81\xa0\x7c\xfd\xc4\x39\xb4\xd5\x42\x63\xc5\x84\xb0\x68\xa6\xa0\xdf\x91\xbe\xeb\x60\x02\xba\x15\x5f\xf0\x6b\x86\x39\xd3\x82\xfa\xc2\x83\x60\xee\xc5\xd5\x0a\x0b\x23\xad\xb6\xcc\x77\x76\xc1\x36\x33\x43\xb0\x12\x42\x0a\x0a\x54\x81\x65\x68\xcb\x0c\xd4\x3b\x92\x4a\x45\x23\xc6\x59\x8b\x6c\x4b\x30\xe5\x44\x4a\xf0\x05\x07\xb6\xa1\xd4\xb2\x02\x5b\x23\xad\x02\x79\x99\xc1\x32\x74\x33\x10\xc2\x5a\x31\x28\xf6\x99\xf6\x8d\x26\x1c\x8a\x44\x43\xc9\x69\x64\x78\xd0\x41\xda\xa7\x00\x94\x63\x4d\x89\x66\x26\xf4\x91\x06\xf6\x43\x94\x11\x2c\x20\xd6\x64\xc4\x18\xd4\xfd\x54\xc0\x69\xc9\xb4\x97\x82\xf9\xfa\xcb\x04\x85\x67\xd0\xdf\x1b\x66\x29\xd4\xfa\x8c\x63\x11\xdc\x06\x6d\x24\x13\x9a\x36\x93\x59\xd3\x19\x0a\x8b\x0d\xa5\x52\xf4\xa8\x97\xd0\x89\x69\x22\xa4\x35\xcf\x92\xa1\x5c\x6b\x1b\xf0\x0e\x59\x12\x48\x8f\x92\x86\xd6\x90\xd0\xf0\xdc\x88\xed\x1e\x45\x68\x82\x09\xb5\x96\x48\xdf\xee\xcb\x26\x7b\x50\x4d\xa1\xc8\xf5\xcf\x38\x04\x36\x01\xc1\xd0\x45\x6a\x66\x0c\x14\x9d\x52\x62\x19\xf2\x01\xd5\x0a\x13\xe6\x1b\xc3\x0e\x75\x46\x75\x53\x54\xf6\xc8\xd4\x10\xdf\x68\x43\x4a\xe9\x30\x36\x14\x1b\x46\x01\xec\x7b\x19\x2e\x01\x49\x5a\x52\x66\x95\x6f\x86\x4c\xe8\xb3\x67\xa0\x28\x34\xc9\x0a\x4a\x5f\xc8\x45\xbe\xd2\xf6\xfd\x82\xa5\xdc\x86\xae\x8e\x86\x84\xd0\xa3\x6a\xcc\x9a\xc6\xa9\x47\x96\x58\xb4\xcd\xc5\x8e\x31\x85\x44\x1a\x22\xa6\x23\x05\xb4\xc0\x3a\x48\x7c\xb9\x17\x19\xfc\xb8\x7b\xdc\x63\x08\x66\x84\x79\x16\xdc\x62\xdd\x3c\x1a\xd8\x9b\x82\x72\x1b\x0c\x26\x04\x83\xea\xdf\x3f\x65\xd8\x9b\x35\x0c\x53\x6c\x8c\x54\xdc\xf6\x79\x10\x4c\x9a\x56\xad\xbb\x21\x38\x95\x71\x0b\x3d\xb5\x60\x3b\x10\x81\xff\x99\x26\x70\xee\x08\x60\x1e\x9e\x93\x74\xa9\x12\xc2\x18\xfa\x80\xcb\x2e\x59\xef\x9e\x3a\x5c\x76\x80\xd8\x21\xcf\x8c\xc1\x92\x32\x62\xe1\x84\xdb\x93\x01\x64\x54\x32\xe8\xdd\xa8\x11\xd8\x86\x67\x54\x5c\x61\xcb\xb8\xef\x7e\x7c\xeb\xdf\x60\x8b\x33\xcc\xa9\xe4\x44\x43\xe8\x50\xcc\x82\x03\x39\xf1\xd1\x2c\x03\x43\xb8\x13\x12\xdb\x10\x56\x33\xb8\x85\x73\x20\x24\x00\x2e\xb1\x94\x60\x4e\x16\x31\xcd\xc0\x93\x86\x6b\x24\x14\x04\xa4\x50\x04\xce\x25\xda\x46\xfa\x4c\x28\xac\xa4\xd2\x4c\xa9\x50\xc3\x34\x93\x35\xa6\x44\x71\x42\x6c\x48\xc6\x61\xd7\x83\x27\x69\xb7\xcd\x19\xcd\x8a\xcd\xdd\xae\xd2\x6b\x4b\xc1\x43\x5f\xa7\x1c\x2e\x3f\x05\x81\x1a\x48\x59\x19\x21\xc6\xfe\xb8\xff\xe9\xce\x1f\x75\x17\xfc\xb9\xfe\xe7\xc3\x06\xc5\x65\x59\xdc\x3c\xee\x81\xb6\x9b\x91\xa7\x3f\x23\xe5\x08\x4e\x11\xc6\xd0\x88\x11\x83\x29\x3b\xed\xf7\x2f\x9d\x25\xeb\xb8\x2e\xd3\xdb\x21\x66\x4c\x50\xee\xbf\xfd\xc1\x14\x4e\x7b\xc4\xb9\xc4\x4a\x23\xaa\x04\xe6\xf2\xf4\x71\xad\x4c\x06\x50\xb6\xac\x47\x10\x64\x54\x23\x41\x19\x26\x74\x35\x62\x06\x1b\xa6\x9b\x5f\x19\x15\x58\x50\x31\x62\x50\xbf\x49\x74\xe8\x0e\x85\xbb\x03\x0d\x07\xe8\xfe\x6d\x71\x93\x1f\xd6\x3e\x29\x6e\xf2\xff\x94\xfe\xa3\xbe\x01\x00\xb3\x96\xff\x77\x1b\x60\x32\x6e\xbf\x0c\x9c\x8c\xab\x6b\x4f\x9b\x84\x77\x91\xc2\xf0\x8a\x86\xf9\xf7\xf7\x65\x9c\x2f\x1d\xfa\x4b\x1a\xa1\xbf\xcc\xcb\xed\xfa\x0a\x9d\x4d\x11\x7e\x59\xba\x38\xf1\xb7\x0f\x0f\x93\x18\xad\x4a\xb7\x98\x0e\x9a\xf7\xe2\xc2\x34\x7c\x99\xe6\x1f\x1f\x1e\x06\x17\x7d\xea\x7b\x77\x5b\x3f\x3c\x4c\xc6\xf1\xc5\xfd\x7d\xba\x40\x39\x70\x46\xe4\xe1\x61\x7c\x7f\xef\xf2\xe4\xe1\xa1\xf9\x15\x44\x0c\x42\x84\x6f\x63\x83\x60\x93\x75\x9c\xe6\xcd\x97\x99\xe9\x35\x9a\x67\x71\x55\x4d\x07\x6b\x57\xc7\x8d\x03\x3c\x19\x3c\xd8\xbc\x19\xb6\xf3\x4b\xb5\x89\xf3\xee\x7c\xff\x12\xce\xe0\x62\x92\xe6\x9b\x6d\x8d\xea\xbb\x8d\x9b\x0e\x6a\x77\x5b\x0f\xd0\x26\x8b\xe7\x6e\xe5\xbf\xf1\xf2\x7d\x5e\xed\xca\x41\xdb\xf3\xf9\xeb\x22\xff\xe8\xee\xb6\x9b\xfd\x17\xc2\x27\x17\x93\x31\xf0\x6f\x4d\x9c\xa4\xd7\xad\x91\xdb\xab\x8e\xb4\x59\x5a\xd5\x69\xbe\x6c\x05\x0e\xef\xee\xc4\x65\x1a\x8f\x12\x57\xcd\xcb\xf4\xca\x25\x57\x77\x4f\x15\xa8\xdb\xb7\x10\xfd\x4d\xb9\x2b\xf1\xeb\xd5\xc5\x64\xdc\xa9\xfe\xbb\xfd\x49\xeb\x99\xbf\x55\x45\x59\x4f\xf3\x78\xed\x92\xb4\xf4\xaf\x51\xfd\xd5\x7f\x77\x3d\x8d\xab\xf9\xa0\x95\xcb\xbf\x77\xe1\xdf\x5b\x08\xdf\x6b\x5f\xf8\x6f\xb1\x9b\xc1\xba\xd8\xec\xbe\x6a\xa6\x6e\xbd\xff\x06\x1a\x4b\xb8\xeb\x7f\xd7\x7d\x9d\xba\x9b\x97\xc5\xed\x74\xe0\xbf\xec\x65\xd8\x32\x46\xad\x40\x0a\x13\x2e\x8d\xb1\x76\x70\x31\xd9\x56\x0e\xf9\xef\xb2\xcf\x82\x84\x5f\xee\xf2\xcd\xc5\x64\xbc\xad\xdc\x45\x80\x65\x57\x84\xf0\xb6\xc4\x7f\x56\x8a\x4e\xdc\xf7\xe5\x18\xc7\x3b\xab\x7e\xc6\xba\x07\xac\xda\xd8\xf2\x6d\xbc\x76\x1d\x26\x7f\xd2\x63\x55\xfa\xe9\x33\x3c\xdf\xa5\x9f\x3e\xc3\xb3\x9d\xdc\xbe\xe3\x31\x78\x6e\x93\x3a\xfd\x9c\xe0\xe1\x15\x5a\x97\xfc\x3b\x1b\x75\x26\x4c\xc6\x3b\xa8\x02\xb5\x0b\xe1\xab\x22\xb9\x3b\x84\xe7\x04\xd6\x27\xdd\xfb\x27\x82\x63\xbc\xd7\xa6\x1f\xda\xcb\x62\xbb\x19\x5c\xfc\xbd\x40\xdb\x4d\x37\x26\xfd\xf6\x5d\x05\x7a\xfc\xff\xba\x4e\xe2\x6a\x75\xfe\x88\xfc\x54\xaf\x3f\x3b\xaf\x33\xa1\xa3\xff\xfd\xfd\x08\x85\x64\x8a\x5f\xe5\x75\x99\xba\x2a\xe4\x39\xaf\x7e\xcb\xc4\x3f\x7a\x3c\xa0\xfb\x73\x26\x01\xa6\xe9\x02\xe1\x37\xd5\xb7\x69\xd9\xf2\x6b\x5e\x40\x69\x03\x25\x04\x47\x1b\x2a\xf4\x0f\x22\x85\x53\x38\x93\x0e\x46\x47\xf3\x6a\x48\x2f\x32\xba\x82\xb8\xac\x72\xff\x25\x32\x30\x25\x11\x67\xfc\xa0\x0c\x69\xb0\xf0\x33\x12\xb4\x87\xc7\x13\x60\x40\x78\x0e\x2e\x1e\x9f\x55\xf8\xc3\x4f\x97\x9d\x43\x0a\x5f\xba\x78\x11\x8e\xa7\x3e\x7a\xba\xe6\x3f\x6c\x72\x00\x42\x12\xd7\xf1\x28\x44\xd2\x60\x44\x0f\xe2\xe5\x89\x99\x1e\xaf\xbb\xbf\xc7\x10\xd7\x20\xd4\x04\xc2\xff\x62\x47\x98\x8c\xfd\xfd\x13\x6e\x1d\x95\x5b\xd1\xbe\x2f\x92\xf7\xe9\xda\xa1\xff\x85\xe2\x45\xed\xca\x57\x9b\x62\xbe\x42\xbd\x2d\x9f\x62\x16\xd2\x80\x7f\xc3\x0b\x2e\xbc\x1c\x2d\x97\x60\xa0\xce\xed\x64\x0c\x73\x9e\x4a\xf2\x58\xaf\x27\x9b\xfc\xdf\xff\xfd\x7f\x3e\x27\xfe\xbf\x19\x4c\x9d\xa5\x93\x71\x27\x9d\x4c\xc6\xfe\x4c\xed\x1f\xc1\x93\x71\x5b\x3a\x4c\xe0\x90\xdd\xd4\x7e\xf8\x3a\x2e\x51\x38\xc5\x5f\x65\x68\x8a\x92\x62\xbe\x5d\xbb\xbc\xc6\x4b\x57\xbf\xca\x1c\x5c\xbe\xbc\x7b\x93\x0c\x9b\x93\xfe\xe4\xf4\x1c\x16\xb5\x0b\xf0\xa2\x98\x6f\xab\x61\x43\xdc\xe6\xf3\x3a\x2d\x72\xd4\x16\x05\xfe\x55\xb3\xb0\xc3\xef\x68\xba\xdb\x05\x5f\xc7\xd9\xd6\xe1\xba\x4c\xd7\xc3\x53\x5c\x17\x97\xc5\x8d\x2b\x67\x71\xe5\x1a\x3e\x7e\x81\xcb\xdc\xba\xea\xca\xf3\xfb\xd6\x95\x77\xef\x5c\xe6\xe6\x75\x51\xbe\xc8\xb2\xe1\x49\x5d\x62\x08\x85\x46\xa4\x23\xbf\x02\x2f\x8a\xf2\x55\x3c\x5f\x0d\x5b\x61\x86\x2e\x6b\xe5\x38\x4a\x17\x68\xf8\xc5\xef\xbb\xdb\x23\x97\x61\xff\xc2\x18\x6e\xde\xfb\x43\x53\x74\x72\x72\xde\x0c\x96\xae\xde\x96\x79\x73\xd7\xd8\x18\x04\x83\x28\xf2\x96\x72\x59\x5f\xa6\xe1\x89\x7f\x5b\xb4\x15\x67\x37\xf9\xe7\x18\x66\x87\x65\x18\xea\xab\x59\xf8\x6b\x85\xcf\x18\xc0\x4b\xda\xac\xc5\x69\x9e\xb8\xdb\x1f\x16\xc3\xdf\x4f\xd1\x17\xd3\x29\x1a\xd1\x3f\xa7\xc0\x83\x07\xe3\x67\xa7\xe6\x45\xee\x4e\x7a\x1a\x3e\x04\x01\x1e\x7a\xee\xcc\x8a\x79\x9c\xa5\x9f\xdc\xb7\x4d\x64\x0c\x5d\x84\xbc\x50\x11\x8a\xcb\x56\x18\x90\xd8\x75\xd5\x43\xd3\xe9\xd4\xbf\xc1\xbe\x48\x73\x97\xec\x64\xee\x9a\xf5\x61\xe7\xed\x04\x2c\xe4\x6e\x10\x6c\x31\x74\x80\xbd\x17\x75\xf3\xd7\x38\xc3\x93\x36\x22\x4f\x4e\x1b\xf3\xc0\x5e\x69\xf5\x36\x7e\x3b\x4c\x4e\x77\x8c\x1f\xb1\xe8\x48\xd2\x35\xea\x93\x65\x87\xfc\x1c\x3e\x1f\x69\x83\x12\xef\x29\x68\x41\xdf\xd5\x65\x9a\x2f\x87\xbf\xfe\x16\xa1\xfb\x24\xbe\x3b\x43\x03\x36\x4a\xd2\x65\x5a\x0f\x22\xb4\x2e\xf2\x7a\xd5\xa3\xdc\xb9\xb8\x3c\x43\x83\x7c\xbb\x76\x65\x3a\x1f\x44\x68\x55\x6c\xcb\xfe\x9a\x34\xdf\xd6\xae\x47\xaa\xdc\xbc\xc8\x93\x0e\xa9\xeb\x19\xb0\x18\x18\xe4\x32\xad\x40\xb0\x17\x65\x19\xdf\xe1\x4d\x59\xd4\x05\x14\xf1\xb8\xca\xd2\xb9\xc3\xf3\x38\xcb\x86\x07\xa2\xb9\x7a\x79\xf7\x3e\x5e\x42\x31\x36\x1c\x00\x93\x41\x63\xd5\x96\xe1\x2e\x82\x1e\xbb\xfd\xf4\xfc\xb8\xdd\x7c\xe9\xea\x0f\x65\xf6\x63\x5c\xc6\x6b\x57\xbb\x12\x62\xbb\x05\xcb\xa3\xa1\x61\xe5\x2f\xbb\xa9\xa0\xfa\x31\x5e\xba\x0f\x3f\x5d\xa2\x29\xba\x49\xf3\xa4\xb8\xc1\xb0\x13\x2c\xc6\x95\x8b\xcb\xf9\x0a\x57\xdb\xab\x2a\x98\x98\x9e\x46\x7e\x5d\xf5\xe1\xa7\xcb\x9f\xa1\x41\xb8\xca\x1c\x64\x85\x96\x07\xae\x36\x59\x5a\x0f\x4f\xfe\x7a\xd2\x4e\xdc\xed\xfc\xd6\xbf\xb9\xed\xfd\x1e\x04\x3f\x5a\x14\x25\x1a\xa6\x68\x8a\xc8\x39\x4a\xd1\x04\xf5\x98\xe2\xcc\xe5\xcb\x7a\x75\x8e\xd2\x6f\xbe\xd9\x81\xa3\xcf\x0d\xf6\xed\x2e\xf9\x35\xfd\xad\xdd\x7f\x7a\xd2\x58\x27\xa0\xac\xbf\xee\x57\xf2\x9b\x0f\x86\xbe\x29\x5a\xe4\xa1\x47\x93\xe9\x6f\xfd\xc8\x41\x7f\x43\x75\xb9\x75\xe8\x0c\x25\x6e\x5e\x24\xee\xc3\x4f\x6f\x66\xc5\x7a\x53\xe4\x2e\xaf\x1f\x6f\x44\x7f\x3b\x7d\x0a\xe4\x87\x7e\x72\x6e\xde\xdc\xf5\x87\x0c\x2c\x3a\xdd\x7b\xc6\x9f\xbf\x68\xfa\xc4\x87\x27\x7e\xe0\xe4\x51\x76\x06\x2c\x1d\x3e\x30\xaa\x97\x77\xb3\x96\x7d\x67\xa3\xf3\xd6\x0b\x43\x60\xe1\x1d\x11\xa1\x60\x76\x9f\x4e\xc3\xda\xbd\x23\xd0\x04\x1d\x72\x0a\x2c\x9e\x6f\xcb\xf2\x75\xe9\x16\x9d\x75\xe0\x0d\x28\x6c\x76\xc1\x3e\x0c\xe5\xc4\xf4\x04\x7a\xca\x93\xd3\x7b\xd4\x98\xdd\xaf\x5f\x2d\xd1\x74\xc7\x05\x97\xce\x77\xbc\xc3\x30\x35\x42\x27\x31\xac\x38\xdf\xa5\xce\xfe\x0e\xb0\x72\xb5\xec\x9f\x0c\x9d\xed\xe2\x3f\xbd\x5b\x1c\x36\x0b\xf2\xfd\x1b\xbb\x1d\x72\x6b\xe9\xe2\x04\x50\xf9\x5d\x9a\x85\xd7\xb0\xa1\x52\xea\x86\xdd\x36\x4f\xbd\xbf\x7e\x3d\x79\x09\x9b\xfe\xc3\x7f\x7e\xef\x3f\xff\xee\x3f\xdf\xfb\xcf\x1f\xfd\xe7\x2b\xff\xf9\x2f\xff\xf9\xcb\xcb\x93\xdf\xf6\x9e\x0f\xf1\xe3\x6f\x6f\x56\x69\x16\xf6\x41\x17\x53\x44\x09\x13\xfb\xc0\x01\xe2\x38\x10\x1b\xd1\xbf\xf9\x26\xed\x66\xfd\x06\xfc\x9b\xb8\xac\xdc\x77\x59\x11\xd7\x41\x60\x5c\x17\xdf\xa5\xb7\xce\xbf\x47\xff\x0d\x3a\x41\x27\xe8\x9b\x20\xf9\xaf\xe9\x6f\x4d\x02\xec\xa9\xdd\x7d\xef\xbc\x9b\x63\xd2\x4f\xee\x79\x70\xee\xf2\x1f\x4c\x1b\x9c\x76\xd3\xc3\x5e\xc5\x90\x22\x80\xcf\xc1\xd4\xb0\xda\xae\xe3\x1c\xf6\x45\xd3\xc3\xb6\xf7\x0e\x4c\xf3\xdc\x95\xaf\xdf\x7f\x7f\xd9\xba\xf7\xe9\x08\x9a\xa2\x1d\xaf\x8e\x77\xc3\x63\xa9\xb6\x4c\x9b\x8c\x43\x6d\x37\x19\x87\x3f\xc8\xfc\x7f\x01\x00\x00\xff\xff\xd1\xe7\x7f\xef\x40\x3c\x00\x00"), + }, + } + fs["/"].(*vfsgen۰DirInfo).entries = []os.FileInfo{ + fs["/index.html"].(os.FileInfo), + } + + return fs +}() + +type vfsgen۰FS map[string]interface{} + +func (fs vfsgen۰FS) Open(path string) (http.File, error) { + path = pathpkg.Clean("/" + path) + f, ok := fs[path] + if !ok { + return nil, &os.PathError{Op: "open", Path: path, Err: os.ErrNotExist} + } + + switch f := f.(type) { + case *vfsgen۰CompressedFileInfo: + gr, err := gzip.NewReader(bytes.NewReader(f.compressedContent)) + if err != nil { + // This should never happen because we generate the gzip bytes such that they are always valid. + panic("unexpected error reading own gzip compressed bytes: " + err.Error()) + } + return &vfsgen۰CompressedFile{ + vfsgen۰CompressedFileInfo: f, + gr: gr, + }, nil + case *vfsgen۰DirInfo: + return &vfsgen۰Dir{ + vfsgen۰DirInfo: f, + }, nil + default: + // This should never happen because we generate only the above types. + panic(fmt.Sprintf("unexpected type %T", f)) + } +} + +// vfsgen۰CompressedFileInfo is a static definition of a gzip compressed file. +type vfsgen۰CompressedFileInfo struct { + name string + modTime time.Time + compressedContent []byte + uncompressedSize int64 +} + +func (f *vfsgen۰CompressedFileInfo) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot Readdir from file %s", f.name) +} +func (f *vfsgen۰CompressedFileInfo) Stat() (os.FileInfo, error) { return f, nil } + +func (f *vfsgen۰CompressedFileInfo) GzipBytes() []byte { + return f.compressedContent +} + +func (f *vfsgen۰CompressedFileInfo) Name() string { return f.name } +func (f *vfsgen۰CompressedFileInfo) Size() int64 { return f.uncompressedSize } +func (f *vfsgen۰CompressedFileInfo) Mode() os.FileMode { return 0444 } +func (f *vfsgen۰CompressedFileInfo) ModTime() time.Time { return f.modTime } +func (f *vfsgen۰CompressedFileInfo) IsDir() bool { return false } +func (f *vfsgen۰CompressedFileInfo) Sys() interface{} { return nil } + +// vfsgen۰CompressedFile is an opened compressedFile instance. +type vfsgen۰CompressedFile struct { + *vfsgen۰CompressedFileInfo + gr *gzip.Reader + grPos int64 // Actual gr uncompressed position. + seekPos int64 // Seek uncompressed position. +} + +func (f *vfsgen۰CompressedFile) Read(p []byte) (n int, err error) { + if f.grPos > f.seekPos { + // Rewind to beginning. + err = f.gr.Reset(bytes.NewReader(f.compressedContent)) + if err != nil { + return 0, err + } + f.grPos = 0 + } + if f.grPos < f.seekPos { + // Fast-forward. + _, err = io.CopyN(ioutil.Discard, f.gr, f.seekPos-f.grPos) + if err != nil { + return 0, err + } + f.grPos = f.seekPos + } + n, err = f.gr.Read(p) + f.grPos += int64(n) + f.seekPos = f.grPos + return n, err +} +func (f *vfsgen۰CompressedFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + f.seekPos = 0 + offset + case io.SeekCurrent: + f.seekPos += offset + case io.SeekEnd: + f.seekPos = f.uncompressedSize + offset + default: + panic(fmt.Errorf("invalid whence value: %v", whence)) + } + return f.seekPos, nil +} +func (f *vfsgen۰CompressedFile) Close() error { + return f.gr.Close() +} + +// vfsgen۰DirInfo is a static definition of a directory. +type vfsgen۰DirInfo struct { + name string + modTime time.Time + entries []os.FileInfo +} + +func (d *vfsgen۰DirInfo) Read([]byte) (int, error) { + return 0, fmt.Errorf("cannot Read from directory %s", d.name) +} +func (d *vfsgen۰DirInfo) Close() error { return nil } +func (d *vfsgen۰DirInfo) Stat() (os.FileInfo, error) { return d, nil } + +func (d *vfsgen۰DirInfo) Name() string { return d.name } +func (d *vfsgen۰DirInfo) Size() int64 { return 0 } +func (d *vfsgen۰DirInfo) Mode() os.FileMode { return 0755 | os.ModeDir } +func (d *vfsgen۰DirInfo) ModTime() time.Time { return d.modTime } +func (d *vfsgen۰DirInfo) IsDir() bool { return true } +func (d *vfsgen۰DirInfo) Sys() interface{} { return nil } + +// vfsgen۰Dir is an opened dir instance. +type vfsgen۰Dir struct { + *vfsgen۰DirInfo + pos int // Position within entries for Seek and Readdir. +} + +func (d *vfsgen۰Dir) Seek(offset int64, whence int) (int64, error) { + if offset == 0 && whence == io.SeekStart { + d.pos = 0 + return 0, nil + } + return 0, fmt.Errorf("unsupported Seek in directory %s", d.name) +} + +func (d *vfsgen۰Dir) Readdir(count int) ([]os.FileInfo, error) { + if d.pos >= len(d.entries) && count > 0 { + return nil, io.EOF + } + if count <= 0 || count > len(d.entries)-d.pos { + count = len(d.entries) - d.pos + } + e := d.entries[d.pos : d.pos+count] + d.pos += count + return e, nil +} diff --git a/vendor/github.com/rclone/rclone/cmd/serve/httplib/serve/data/data.go b/vendor/github.com/rclone/rclone/cmd/serve/httplib/serve/data/data.go new file mode 100644 index 00000000000..629835d0579 --- /dev/null +++ b/vendor/github.com/rclone/rclone/cmd/serve/httplib/serve/data/data.go @@ -0,0 +1,56 @@ +//go:generate go run assets_generate.go +// The "go:generate" directive compiles static assets by running assets_generate.go + +package data + +import ( + "html/template" + "io/ioutil" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" +) + +// AfterEpoch returns the time since the epoch for the given time +func AfterEpoch(t time.Time) bool { + return t.After(time.Time{}) +} + +// GetTemplate returns the HTML template for serving directories via HTTP/Webdav +func GetTemplate(tmpl string) (tpl *template.Template, err error) { + var templateString string + if tmpl == "" { + templateFile, err := Assets.Open("index.html") + if err != nil { + return nil, errors.Wrap(err, "get template open") + } + + defer fs.CheckClose(templateFile, &err) + + templateBytes, err := ioutil.ReadAll(templateFile) + if err != nil { + return nil, errors.Wrap(err, "get template read") + } + + templateString = string(templateBytes) + + } else { + templateFile, err := ioutil.ReadFile(tmpl) + if err != nil { + return nil, errors.Wrap(err, "get template open") + } + + templateString = string(templateFile) + } + + funcMap := template.FuncMap{ + "afterEpoch": AfterEpoch, + } + tpl, err = template.New("index").Funcs(funcMap).Parse(templateString) + if err != nil { + return nil, errors.Wrap(err, "get template parse") + } + + return +} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/accounting.go b/vendor/github.com/rclone/rclone/fs/accounting/accounting.go new file mode 100644 index 00000000000..2ffa37d4976 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/accounting.go @@ -0,0 +1,607 @@ +// Package accounting providers an accounting and limiting reader +package accounting + +import ( + "context" + "fmt" + "io" + "sync" + "time" + "unicode/utf8" + + "github.com/rclone/rclone/fs/rc" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/asyncreader" + "github.com/rclone/rclone/fs/fserrors" +) + +// ErrorMaxTransferLimitReached defines error when transfer limit is reached. +// Used for checking on exit and matching to correct exit code. +var ErrorMaxTransferLimitReached = errors.New("Max transfer limit reached as set by --max-transfer") + +// ErrorMaxTransferLimitReachedFatal is returned from Read when the max +// transfer limit is reached. +var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached) + +// ErrorMaxTransferLimitReachedGraceful is returned from operations.Copy when the max +// transfer limit is reached and a graceful stop is required. +var ErrorMaxTransferLimitReachedGraceful = fserrors.NoRetryError(ErrorMaxTransferLimitReached) + +// Account limits and accounts for one transfer +type Account struct { + stats *StatsInfo + // The mutex is to make sure Read() and Close() aren't called + // concurrently. Unfortunately the persistent connection loop + // in http transport calls Read() after Do() returns on + // CancelRequest so this race can happen when it apparently + // shouldn't. + mu sync.Mutex // mutex protects these values + in io.Reader + ctx context.Context // current context for transfer - may change + ci *fs.ConfigInfo + origIn io.ReadCloser + close io.Closer + size int64 + name string + closed bool // set if the file is closed + exit chan struct{} // channel that will be closed when transfer is finished + withBuf bool // is using a buffered in + + tokenBucket buckets // per file bandwidth limiter (may be nil) + + values accountValues +} + +// accountValues holds statistics for this Account +type accountValues struct { + mu sync.Mutex // Mutex for stat values. + bytes int64 // Total number of bytes read + max int64 // if >=0 the max number of bytes to transfer + start time.Time // Start time of first read + lpTime time.Time // Time of last average measurement + lpBytes int // Number of bytes read since last measurement + avg float64 // Moving average of last few measurements in bytes/s +} + +const averagePeriod = 16 // period to do exponentially weighted averages over + +// newAccountSizeName makes an Account reader for an io.ReadCloser of +// the given size and name +func newAccountSizeName(ctx context.Context, stats *StatsInfo, in io.ReadCloser, size int64, name string) *Account { + acc := &Account{ + stats: stats, + in: in, + ctx: ctx, + ci: fs.GetConfig(ctx), + close: in, + origIn: in, + size: size, + name: name, + exit: make(chan struct{}), + values: accountValues{ + avg: 0, + lpTime: time.Now(), + max: -1, + }, + } + if acc.ci.CutoffMode == fs.CutoffModeHard { + acc.values.max = int64((acc.ci.MaxTransfer)) + } + currLimit := acc.ci.BwLimitFile.LimitAt(time.Now()) + if currLimit.Bandwidth.IsSet() { + fs.Debugf(acc.name, "Limiting file transfer to %v", currLimit.Bandwidth) + acc.tokenBucket = newTokenBucket(currLimit.Bandwidth) + } + + go acc.averageLoop() + stats.inProgress.set(acc.name, acc) + return acc +} + +// WithBuffer - If the file is above a certain size it adds an Async reader +func (acc *Account) WithBuffer() *Account { + // if already have a buffer then just return + if acc.withBuf { + return acc + } + acc.withBuf = true + var buffers int + if acc.size >= int64(acc.ci.BufferSize) || acc.size == -1 { + buffers = int(int64(acc.ci.BufferSize) / asyncreader.BufferSize) + } else { + buffers = int(acc.size / asyncreader.BufferSize) + } + // On big files add a buffer + if buffers > 0 { + rc, err := asyncreader.New(acc.ctx, acc.origIn, buffers) + if err != nil { + fs.Errorf(acc.name, "Failed to make buffer: %v", err) + } else { + acc.in = rc + acc.close = rc + } + } + return acc +} + +// HasBuffer - returns true if this Account has an AsyncReader with a buffer +func (acc *Account) HasBuffer() bool { + acc.mu.Lock() + defer acc.mu.Unlock() + _, ok := acc.in.(*asyncreader.AsyncReader) + return ok +} + +// GetReader returns the underlying io.ReadCloser under any Buffer +func (acc *Account) GetReader() io.ReadCloser { + acc.mu.Lock() + defer acc.mu.Unlock() + return acc.origIn +} + +// GetAsyncReader returns the current AsyncReader or nil if Account is unbuffered +func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader { + acc.mu.Lock() + defer acc.mu.Unlock() + if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok { + return asyncIn + } + return nil +} + +// StopBuffering stops the async buffer doing any more buffering +func (acc *Account) StopBuffering() { + if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok { + asyncIn.StopBuffering() + } +} + +// Abandon stops the async buffer doing any more buffering +func (acc *Account) Abandon() { + if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok { + asyncIn.Abandon() + } +} + +// UpdateReader updates the underlying io.ReadCloser stopping the +// async buffer (if any) and re-adding it +func (acc *Account) UpdateReader(ctx context.Context, in io.ReadCloser) { + acc.mu.Lock() + withBuf := acc.withBuf + if withBuf { + acc.Abandon() + acc.withBuf = false + } + acc.in = in + acc.ctx = ctx + acc.close = in + acc.origIn = in + acc.closed = false + if withBuf { + acc.WithBuffer() + } + acc.mu.Unlock() + + // Reset counter to stop percentage going over 100% + acc.values.mu.Lock() + acc.values.lpBytes = 0 + acc.values.bytes = 0 + acc.values.mu.Unlock() +} + +// averageLoop calculates averages for the stats in the background +func (acc *Account) averageLoop() { + tick := time.NewTicker(time.Second) + var period float64 + defer tick.Stop() + for { + select { + case now := <-tick.C: + acc.values.mu.Lock() + // Add average of last second. + elapsed := now.Sub(acc.values.lpTime).Seconds() + avg := float64(acc.values.lpBytes) / elapsed + // Soft start the moving average + if period < averagePeriod { + period++ + } + acc.values.avg = (avg + (period-1)*acc.values.avg) / period + acc.values.lpBytes = 0 + acc.values.lpTime = now + // Unlock stats + acc.values.mu.Unlock() + case <-acc.exit: + return + } + } +} + +// Check the read before it has happened is valid returning the number +// of bytes remaining to read. +func (acc *Account) checkReadBefore() (bytesUntilLimit int64, err error) { + // Check to see if context is cancelled + if err = acc.ctx.Err(); err != nil { + return 0, err + } + acc.values.mu.Lock() + if acc.values.max >= 0 { + bytesUntilLimit = acc.values.max - acc.stats.GetBytes() + if bytesUntilLimit < 0 { + acc.values.mu.Unlock() + return bytesUntilLimit, ErrorMaxTransferLimitReachedFatal + } + } else { + bytesUntilLimit = 1 << 62 + } + // Set start time. + if acc.values.start.IsZero() { + acc.values.start = time.Now() + } + acc.values.mu.Unlock() + return bytesUntilLimit, nil +} + +// Check the read call after the read has happened +func (acc *Account) checkReadAfter(bytesUntilLimit int64, n int, err error) (outN int, outErr error) { + bytesUntilLimit -= int64(n) + if bytesUntilLimit < 0 { + // chop the overage off + n += int(bytesUntilLimit) + if n < 0 { + n = 0 + } + err = ErrorMaxTransferLimitReachedFatal + } + return n, err +} + +// ServerSideCopyStart should be called at the start of a server-side copy +// +// This pretends a transfer has started +func (acc *Account) ServerSideCopyStart() { + acc.values.mu.Lock() + // Set start time. + if acc.values.start.IsZero() { + acc.values.start = time.Now() + } + acc.values.mu.Unlock() +} + +// ServerSideCopyEnd accounts for a read of n bytes in a sever side copy +func (acc *Account) ServerSideCopyEnd(n int64) { + // Update Stats + acc.values.mu.Lock() + acc.values.bytes += n + acc.values.mu.Unlock() + + acc.stats.Bytes(n) +} + +// DryRun accounts for statistics without running the operation +func (acc *Account) DryRun(n int64) { + acc.ServerSideCopyStart() + acc.ServerSideCopyEnd(n) +} + +// Account for n bytes from the current file bandwidth limit (if any) +func (acc *Account) limitPerFileBandwidth(n int) { + acc.values.mu.Lock() + tokenBucket := acc.tokenBucket[TokenBucketSlotAccounting] + acc.values.mu.Unlock() + + if tokenBucket != nil { + err := tokenBucket.WaitN(context.Background(), n) + if err != nil { + fs.Errorf(nil, "Token bucket error: %v", err) + } + } +} + +// Account the read and limit bandwidth +func (acc *Account) accountRead(n int) { + // Update Stats + acc.values.mu.Lock() + acc.values.lpBytes += n + acc.values.bytes += int64(n) + acc.values.mu.Unlock() + + acc.stats.Bytes(int64(n)) + + TokenBucket.LimitBandwidth(TokenBucketSlotAccounting, n) + acc.limitPerFileBandwidth(n) +} + +// read bytes from the io.Reader passed in and account them +func (acc *Account) read(in io.Reader, p []byte) (n int, err error) { + bytesUntilLimit, err := acc.checkReadBefore() + if err == nil { + n, err = in.Read(p) + acc.accountRead(n) + n, err = acc.checkReadAfter(bytesUntilLimit, n, err) + } + return n, err +} + +// Read bytes from the object - see io.Reader +func (acc *Account) Read(p []byte) (n int, err error) { + acc.mu.Lock() + defer acc.mu.Unlock() + return acc.read(acc.in, p) +} + +// Thin wrapper for w +type accountWriteTo struct { + w io.Writer + acc *Account +} + +// Write writes len(p) bytes from p to the underlying data stream. It +// returns the number of bytes written from p (0 <= n <= len(p)) and +// any error encountered that caused the write to stop early. Write +// must return a non-nil error if it returns n < len(p). Write must +// not modify the slice data, even temporarily. +// +// Implementations must not retain p. +func (awt *accountWriteTo) Write(p []byte) (n int, err error) { + bytesUntilLimit, err := awt.acc.checkReadBefore() + if err == nil { + n, err = awt.w.Write(p) + n, err = awt.acc.checkReadAfter(bytesUntilLimit, n, err) + awt.acc.accountRead(n) + } + return n, err +} + +// WriteTo writes data to w until there's no more data to write or +// when an error occurs. The return value n is the number of bytes +// written. Any error encountered during the write is also returned. +func (acc *Account) WriteTo(w io.Writer) (n int64, err error) { + acc.mu.Lock() + in := acc.in + acc.mu.Unlock() + wrappedWriter := accountWriteTo{w: w, acc: acc} + if do, ok := in.(io.WriterTo); ok { + n, err = do.WriteTo(&wrappedWriter) + } else { + n, err = io.Copy(&wrappedWriter, in) + } + return +} + +// AccountRead account having read n bytes +func (acc *Account) AccountRead(n int) (err error) { + acc.mu.Lock() + defer acc.mu.Unlock() + bytesUntilLimit, err := acc.checkReadBefore() + if err == nil { + n, err = acc.checkReadAfter(bytesUntilLimit, n, err) + acc.accountRead(n) + } + return err +} + +// Close the object +func (acc *Account) Close() error { + acc.mu.Lock() + defer acc.mu.Unlock() + if acc.closed { + return nil + } + acc.closed = true + if acc.close == nil { + return nil + } + return acc.close.Close() +} + +// Done with accounting - must be called to free accounting goroutine +func (acc *Account) Done() { + acc.mu.Lock() + defer acc.mu.Unlock() + close(acc.exit) + acc.stats.inProgress.clear(acc.name) +} + +// progress returns bytes read as well as the size. +// Size can be <= 0 if the size is unknown. +func (acc *Account) progress() (bytes, size int64) { + if acc == nil { + return 0, 0 + } + acc.values.mu.Lock() + bytes, size = acc.values.bytes, acc.size + acc.values.mu.Unlock() + return bytes, size +} + +// speed returns the speed of the current file transfer +// in bytes per second, as well an exponentially weighted moving average +// If no read has completed yet, 0 is returned for both values. +func (acc *Account) speed() (bps, current float64) { + if acc == nil { + return 0, 0 + } + acc.values.mu.Lock() + defer acc.values.mu.Unlock() + if acc.values.bytes == 0 { + return 0, 0 + } + // Calculate speed from first read. + total := float64(time.Now().Sub(acc.values.start)) / float64(time.Second) + bps = float64(acc.values.bytes) / total + current = acc.values.avg + return +} + +// eta returns the ETA of the current operation, +// rounded to full seconds. +// If the ETA cannot be determined 'ok' returns false. +func (acc *Account) eta() (etaDuration time.Duration, ok bool) { + if acc == nil { + return 0, false + } + acc.values.mu.Lock() + defer acc.values.mu.Unlock() + return eta(acc.values.bytes, acc.size, acc.values.avg) +} + +// shortenName shortens in to size runes long +// If size <= 0 then in is left untouched +func shortenName(in string, size int) string { + if size <= 0 { + return in + } + if utf8.RuneCountInString(in) <= size { + return in + } + name := []rune(in) + size-- // don't count ellipsis rune + suffixLength := size / 2 + prefixLength := size - suffixLength + suffixStart := len(name) - suffixLength + name = append(append(name[:prefixLength], '…'), name[suffixStart:]...) + return string(name) +} + +// String produces stats for this file +func (acc *Account) String() string { + a, b := acc.progress() + _, cur := acc.speed() + eta, etaok := acc.eta() + etas := "-" + if etaok { + if eta > 0 { + etas = fmt.Sprintf("%v", eta) + } else { + etas = "0s" + } + } + + if acc.ci.DataRateUnit == "bits" { + cur = cur * 8 + } + + percentageDone := 0 + if b > 0 { + percentageDone = int(100 * float64(a) / float64(b)) + } + + return fmt.Sprintf("%*s:%3d%% /%s, %s/s, %s", + acc.ci.StatsFileNameLength, + shortenName(acc.name, acc.ci.StatsFileNameLength), + percentageDone, + fs.SizeSuffix(b), + fs.SizeSuffix(cur), + etas, + ) +} + +// rcStats produces remote control stats for this file +func (acc *Account) rcStats() (out rc.Params) { + out = make(rc.Params) + a, b := acc.progress() + out["bytes"] = a + out["size"] = b + spd, cur := acc.speed() + out["speed"] = spd + out["speedAvg"] = cur + + eta, etaok := acc.eta() + out["eta"] = nil + if etaok { + if eta > 0 { + out["eta"] = eta.Seconds() + } else { + out["eta"] = 0 + } + } + out["name"] = acc.name + + percentageDone := 0 + if b > 0 { + percentageDone = int(100 * float64(a) / float64(b)) + } + out["percentage"] = percentageDone + out["group"] = acc.stats.group + + return out +} + +// OldStream returns the top io.Reader +func (acc *Account) OldStream() io.Reader { + acc.mu.Lock() + defer acc.mu.Unlock() + return acc.in +} + +// SetStream updates the top io.Reader +func (acc *Account) SetStream(in io.Reader) { + acc.mu.Lock() + acc.in = in + acc.mu.Unlock() +} + +// WrapStream wraps an io Reader so it will be accounted in the same +// way as account +func (acc *Account) WrapStream(in io.Reader) io.Reader { + return &accountStream{ + acc: acc, + in: in, + } +} + +// accountStream accounts a single io.Reader into a parent *Account +type accountStream struct { + acc *Account + in io.Reader +} + +// OldStream return the underlying stream +func (a *accountStream) OldStream() io.Reader { + return a.in +} + +// SetStream set the underlying stream +func (a *accountStream) SetStream(in io.Reader) { + a.in = in +} + +// WrapStream wrap in in an accounter +func (a *accountStream) WrapStream(in io.Reader) io.Reader { + return a.acc.WrapStream(in) +} + +// Read bytes from the object - see io.Reader +func (a *accountStream) Read(p []byte) (n int, err error) { + return a.acc.read(a.in, p) +} + +// Accounter accounts a stream allowing the accounting to be removed and re-added +type Accounter interface { + io.Reader + OldStream() io.Reader + SetStream(io.Reader) + WrapStream(io.Reader) io.Reader +} + +// WrapFn wraps an io.Reader (for accounting purposes usually) +type WrapFn func(io.Reader) io.Reader + +// UnWrap unwraps a reader returning unwrapped and wrap, a function to +// wrap it back up again. If `in` is an Accounter then this function +// will take the accounting unwrapped and wrap will put it back on +// again the new Reader passed in. +// +// This allows functions which wrap io.Readers to move the accounting +// to the end of the wrapped chain of readers. This is very important +// if buffering is being introduced and if the Reader might be wrapped +// again. +func UnWrap(in io.Reader) (unwrapped io.Reader, wrap WrapFn) { + acc, ok := in.(Accounter) + if !ok { + return in, func(r io.Reader) io.Reader { return r } + } + return acc.OldStream(), acc.WrapStream +} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/accounting_other.go b/vendor/github.com/rclone/rclone/fs/accounting/accounting_other.go new file mode 100644 index 00000000000..294677a3c8f --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/accounting_other.go @@ -0,0 +1,10 @@ +// Accounting and limiting reader +// Non-unix specific functions. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package accounting + +// startSignalHandler() is Unix specific and does nothing under non-Unix +// platforms. +func (tb *tokenBucket) startSignalHandler() {} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/accounting_unix.go b/vendor/github.com/rclone/rclone/fs/accounting/accounting_unix.go new file mode 100644 index 00000000000..a59fe83e15e --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/accounting_unix.go @@ -0,0 +1,36 @@ +// Accounting and limiting reader +// Unix specific functions. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package accounting + +import ( + "os" + "os/signal" + "syscall" + + "github.com/rclone/rclone/fs" +) + +// startSignalHandler() sets a signal handler to catch SIGUSR2 and toggle throttling. +func (tb *tokenBucket) startSignalHandler() { + signals := make(chan os.Signal, 1) + signal.Notify(signals, syscall.SIGUSR2) + + go func() { + // This runs forever, but blocks until the signal is received. + for { + <-signals + tb.mu.Lock() + tb.toggledOff = !tb.toggledOff + tb.curr, tb.prev = tb.prev, tb.curr + s := "disabled" + if !tb.curr._isOff() { + s = "enabled" + } + tb.mu.Unlock() + fs.Logf(nil, "Bandwidth limit %s by user", s) + } + }() +} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/inprogress.go b/vendor/github.com/rclone/rclone/fs/accounting/inprogress.go new file mode 100644 index 00000000000..7fcbad7816d --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/inprogress.go @@ -0,0 +1,54 @@ +package accounting + +import ( + "context" + "sync" + + "github.com/rclone/rclone/fs" +) + +// inProgress holds a synchronized map of in progress transfers +type inProgress struct { + mu sync.Mutex + m map[string]*Account +} + +// newInProgress makes a new inProgress object +func newInProgress(ctx context.Context) *inProgress { + ci := fs.GetConfig(ctx) + return &inProgress{ + m: make(map[string]*Account, ci.Transfers), + } +} + +// set marks the name as in progress +func (ip *inProgress) set(name string, acc *Account) { + ip.mu.Lock() + defer ip.mu.Unlock() + ip.m[name] = acc +} + +// clear marks the name as no longer in progress +func (ip *inProgress) clear(name string) { + ip.mu.Lock() + defer ip.mu.Unlock() + delete(ip.m, name) +} + +// get gets the account for name, of nil if not found +func (ip *inProgress) get(name string) *Account { + ip.mu.Lock() + defer ip.mu.Unlock() + return ip.m[name] +} + +// merge adds items from another inProgress +func (ip *inProgress) merge(m *inProgress) { + ip.mu.Lock() + defer ip.mu.Unlock() + m.mu.Lock() + defer m.mu.Unlock() + for key, val := range m.m { + ip.m[key] = val + } +} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/prometheus.go b/vendor/github.com/rclone/rclone/fs/accounting/prometheus.go new file mode 100644 index 00000000000..43a6f2fbee6 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/prometheus.go @@ -0,0 +1,115 @@ +package accounting + +import ( + "context" + "strings" + + "github.com/prometheus/client_golang/prometheus" +) + +// RcloneCollector is a Prometheus collector for Rclone +type RcloneCollector struct { + ctx context.Context + bytesTransferred *prometheus.Desc + transferSpeed *prometheus.Desc + numOfErrors *prometheus.Desc + numOfCheckFiles *prometheus.Desc + transferredFiles *prometheus.Desc + deletes *prometheus.Desc + deletedDirs *prometheus.Desc + renames *prometheus.Desc + fatalError *prometheus.Desc + retryError *prometheus.Desc +} + +// NewRcloneCollector make a new RcloneCollector +func NewRcloneCollector(ctx context.Context, namespace string) *RcloneCollector { + if !strings.HasSuffix(namespace, "_") { + namespace = namespace + "_" + } + + return &RcloneCollector{ + ctx: ctx, + bytesTransferred: prometheus.NewDesc(namespace+"bytes_transferred_total", + "Total transferred bytes since the start of the Rclone process", + nil, nil, + ), + transferSpeed: prometheus.NewDesc(namespace+"speed", + "Average speed in bytes/sec since the start of the Rclone process", + nil, nil, + ), + numOfErrors: prometheus.NewDesc(namespace+"errors_total", + "Number of errors thrown", + nil, nil, + ), + numOfCheckFiles: prometheus.NewDesc(namespace+"checked_files_total", + "Number of checked files", + nil, nil, + ), + transferredFiles: prometheus.NewDesc(namespace+"files_transferred_total", + "Number of transferred files", + nil, nil, + ), + deletes: prometheus.NewDesc(namespace+"files_deleted_total", + "Total number of files deleted", + nil, nil, + ), + deletedDirs: prometheus.NewDesc(namespace+"dirs_deleted_total", + "Total number of directories deleted", + nil, nil, + ), + renames: prometheus.NewDesc(namespace+"files_renamed_total", + "Total number of files renamed", + nil, nil, + ), + fatalError: prometheus.NewDesc(namespace+"fatal_error", + "Whether a fatal error has occurred", + nil, nil, + ), + retryError: prometheus.NewDesc(namespace+"retry_error", + "Whether there has been an error that will be retried", + nil, nil, + ), + } +} + +// Describe is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector +func (c *RcloneCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.bytesTransferred + ch <- c.transferSpeed + ch <- c.numOfErrors + ch <- c.numOfCheckFiles + ch <- c.transferredFiles + ch <- c.deletes + ch <- c.deletedDirs + ch <- c.renames + ch <- c.fatalError + ch <- c.retryError +} + +// Collect is part of the Collector interface: https://godoc.org/github.com/prometheus/client_golang/prometheus#Collector +func (c *RcloneCollector) Collect(ch chan<- prometheus.Metric) { + s := groups.sum(c.ctx) + s.mu.RLock() + + ch <- prometheus.MustNewConstMetric(c.bytesTransferred, prometheus.CounterValue, float64(s.bytes)) + ch <- prometheus.MustNewConstMetric(c.transferSpeed, prometheus.GaugeValue, s.Speed()) + ch <- prometheus.MustNewConstMetric(c.numOfErrors, prometheus.CounterValue, float64(s.errors)) + ch <- prometheus.MustNewConstMetric(c.numOfCheckFiles, prometheus.CounterValue, float64(s.checks)) + ch <- prometheus.MustNewConstMetric(c.transferredFiles, prometheus.CounterValue, float64(s.transfers)) + ch <- prometheus.MustNewConstMetric(c.deletes, prometheus.CounterValue, float64(s.deletes)) + ch <- prometheus.MustNewConstMetric(c.deletedDirs, prometheus.CounterValue, float64(s.deletedDirs)) + ch <- prometheus.MustNewConstMetric(c.renames, prometheus.CounterValue, float64(s.renames)) + ch <- prometheus.MustNewConstMetric(c.fatalError, prometheus.GaugeValue, bool2Float(s.fatalError)) + ch <- prometheus.MustNewConstMetric(c.retryError, prometheus.GaugeValue, bool2Float(s.retryError)) + + s.mu.RUnlock() +} + +// bool2Float is a small function to convert a boolean into a float64 value that can be used for Prometheus +func bool2Float(e bool) float64 { + if e { + return 1 + } + return 0 +} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/stats.go b/vendor/github.com/rclone/rclone/fs/accounting/stats.go new file mode 100644 index 00000000000..fc810fc0a5b --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/stats.go @@ -0,0 +1,727 @@ +package accounting + +import ( + "bytes" + "context" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/rc" + "github.com/rclone/rclone/lib/terminal" +) + +// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list +var MaxCompletedTransfers = 100 + +var startTime = time.Now() + +// StatsInfo accounts all transfers +type StatsInfo struct { + mu sync.RWMutex + ctx context.Context + ci *fs.ConfigInfo + bytes int64 + errors int64 + lastError error + fatalError bool + retryError bool + retryAfter time.Time + checks int64 + checking *transferMap + checkQueue int + checkQueueSize int64 + transfers int64 + transferring *transferMap + transferQueue int + transferQueueSize int64 + renames int64 + renameQueue int + renameQueueSize int64 + deletes int64 + deletedDirs int64 + inProgress *inProgress + + // The root cause of SM issue #3298 were transfer stats (startedTransfers) + // accumulated and never pruned during the whole backup run. + // Even though by default MaxCompletedTransfers should take care of that, + // SM relies on full access to transfer statistics and disables it by setting it to -1 in agent setup. + // The solution is to keep aggregated stats for transfers pruned from startedTransfers in oldTransfers. + startedTransfers []*Transfer // currently active transfers + oldTransfers AggregatedTransferInfo + oldTimeRanges timeRanges // a merged list of time ranges for the transfers + oldDuration time.Duration // duration of transfers we have culled + group string +} + +// NewStats creates an initialised StatsInfo +func NewStats(ctx context.Context) *StatsInfo { + ci := fs.GetConfig(ctx) + return &StatsInfo{ + ctx: ctx, + ci: ci, + checking: newTransferMap(ci.Checkers, "checking"), + transferring: newTransferMap(ci.Transfers, "transferring"), + inProgress: newInProgress(ctx), + } +} + +// RemoteStats returns stats for rc +func (s *StatsInfo) RemoteStats() (out rc.Params, err error) { + out = make(rc.Params) + s.mu.RLock() + out["speed"] = s.Speed() + out["bytes"] = s.bytes + out["errors"] = s.errors + out["fatalError"] = s.fatalError + out["retryError"] = s.retryError + out["checks"] = s.checks + out["transfers"] = s.transfers + out["deletes"] = s.deletes + out["deletedDirs"] = s.deletedDirs + out["renames"] = s.renames + out["transferTime"] = s.totalDuration().Seconds() + out["elapsedTime"] = time.Since(startTime).Seconds() + s.mu.RUnlock() + if !s.checking.empty() { + out["checking"] = s.checking.remotes() + } + if !s.transferring.empty() { + out["transferring"] = s.transferring.rcStats(s.inProgress) + } + if s.errors > 0 { + out["lastError"] = s.lastError.Error() + } + return out, nil +} + +// Speed returns the average speed of the transfer in bytes/second +func (s *StatsInfo) Speed() float64 { + dt := s.totalDuration() + dtSeconds := dt.Seconds() + speed := 0.0 + if dt > 0 { + speed = float64(s.bytes) / dtSeconds + } + return speed +} + +// timeRange is a start and end time of a transfer +type timeRange struct { + start time.Time + end time.Time +} + +// timeRanges is a list of non-overlapping start and end times for +// transfers +type timeRanges []timeRange + +// merge all the overlapping time ranges +func (trs *timeRanges) merge() { + Trs := *trs + + // Sort by the starting time. + sort.Slice(Trs, func(i, j int) bool { + return Trs[i].start.Before(Trs[j].start) + }) + + // Merge overlaps and add distinctive ranges together + var ( + newTrs = Trs[:0] + i, j = 0, 1 + ) + for i < len(Trs) { + if j < len(Trs) { + if !Trs[i].end.Before(Trs[j].start) { + if Trs[i].end.Before(Trs[j].end) { + Trs[i].end = Trs[j].end + } + j++ + continue + } + } + newTrs = append(newTrs, Trs[i]) + i = j + j++ + } + + *trs = newTrs +} + +// cull remove any ranges whose start and end are before cutoff +// returning their duration sum +func (trs *timeRanges) cull(cutoff time.Time) (d time.Duration) { + var newTrs = (*trs)[:0] + for _, tr := range *trs { + if cutoff.Before(tr.start) || cutoff.Before(tr.end) { + newTrs = append(newTrs, tr) + } else { + d += tr.end.Sub(tr.start) + } + } + *trs = newTrs + return d +} + +// total the time out of the time ranges +func (trs timeRanges) total() (total time.Duration) { + for _, tr := range trs { + total += tr.end.Sub(tr.start) + } + return total +} + +// Total duration is union of durations of all transfers belonging to this +// object. +// Needs to be protected by mutex. +func (s *StatsInfo) totalDuration() time.Duration { + // copy of s.oldTimeRanges with extra room for the current transfers + timeRanges := make(timeRanges, len(s.oldTimeRanges), len(s.oldTimeRanges)+len(s.startedTransfers)) + copy(timeRanges, s.oldTimeRanges) + + // Extract time ranges of all transfers. + now := time.Now() + for i := range s.startedTransfers { + start, end := s.startedTransfers[i].TimeRange() + if end.IsZero() { + end = now + } + timeRanges = append(timeRanges, timeRange{start, end}) + } + + timeRanges.merge() + return s.oldDuration + timeRanges.total() +} + +// eta returns the ETA of the current operation, +// rounded to full seconds. +// If the ETA cannot be determined 'ok' returns false. +func eta(size, total int64, rate float64) (eta time.Duration, ok bool) { + if total <= 0 || size < 0 || rate <= 0 { + return 0, false + } + remaining := total - size + if remaining < 0 { + return 0, false + } + seconds := float64(remaining) / rate + return time.Second * time.Duration(seconds), true +} + +// etaString returns the ETA of the current operation, +// rounded to full seconds. +// If the ETA cannot be determined it returns "-" +func etaString(done, total int64, rate float64) string { + d, ok := eta(done, total, rate) + if !ok { + return "-" + } + return fs.Duration(d).ReadableString() +} + +// percent returns a/b as a percentage rounded to the nearest integer +// as a string +// +// if the percentage is invalid it returns "-" +func percent(a int64, b int64) string { + if a < 0 || b <= 0 { + return "-" + } + return fmt.Sprintf("%d%%", int(float64(a)*100/float64(b)+0.5)) +} + +// String convert the StatsInfo to a string for printing +func (s *StatsInfo) String() string { + // checking and transferring have their own locking so read + // here before lock to prevent deadlock on GetBytes + transferring, checking := s.transferring.count(), s.checking.count() + transferringBytesDone, transferringBytesTotal := s.transferring.progress(s) + + s.mu.RLock() + + elapsedTime := time.Since(startTime) + elapsedTimeSecondsOnly := elapsedTime.Truncate(time.Second/10) % time.Minute + dt := s.totalDuration() + dtSeconds := dt.Seconds() + speed := 0.0 + if dt > 0 { + speed = float64(s.bytes) / dtSeconds + } + + displaySpeed := speed + if s.ci.DataRateUnit == "bits" { + displaySpeed *= 8 + } + + var ( + totalChecks = int64(s.checkQueue) + s.checks + int64(checking) + totalTransfer = int64(s.transferQueue) + s.transfers + int64(transferring) + // note that s.bytes already includes transferringBytesDone so + // we take it off here to avoid double counting + totalSize = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone + currentSize = s.bytes + buf = &bytes.Buffer{} + xfrchkString = "" + dateString = "" + ) + + if !s.ci.StatsOneLine { + _, _ = fmt.Fprintf(buf, "\nTransferred: ") + } else { + xfrchk := []string{} + if totalTransfer > 0 && s.transferQueue > 0 { + xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, totalTransfer)) + } + if totalChecks > 0 && s.checkQueue > 0 { + xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, totalChecks)) + } + if len(xfrchk) > 0 { + xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", ")) + } + if s.ci.StatsOneLineDate { + t := time.Now() + dateString = t.Format(s.ci.StatsOneLineDateFormat) // Including the separator so people can customize it + } + } + + _, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s", + dateString, + fs.SizeSuffix(s.bytes), + fs.SizeSuffix(totalSize).Unit("Bytes"), + percent(s.bytes, totalSize), + fs.SizeSuffix(displaySpeed).Unit(strings.Title(s.ci.DataRateUnit)+"/s"), + etaString(currentSize, totalSize, speed), + xfrchkString, + ) + + if s.ci.ProgressTerminalTitle { + // Writes ETA to the terminal title + terminal.WriteTerminalTitle("ETA: " + etaString(currentSize, totalSize, speed)) + } + + if !s.ci.StatsOneLine { + _, _ = buf.WriteRune('\n') + errorDetails := "" + switch { + case s.fatalError: + errorDetails = " (fatal error encountered)" + case s.retryError: + errorDetails = " (retrying may help)" + case s.errors != 0: + errorDetails = " (no need to retry)" + + } + + // Add only non zero stats + if s.errors != 0 { + _, _ = fmt.Fprintf(buf, "Errors: %10d%s\n", + s.errors, errorDetails) + } + if s.checks != 0 || totalChecks != 0 { + _, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n", + s.checks, totalChecks, percent(s.checks, totalChecks)) + } + if s.deletes != 0 || s.deletedDirs != 0 { + _, _ = fmt.Fprintf(buf, "Deleted: %10d (files), %d (dirs)\n", s.deletes, s.deletedDirs) + } + if s.renames != 0 { + _, _ = fmt.Fprintf(buf, "Renamed: %10d\n", s.renames) + } + if s.transfers != 0 || totalTransfer != 0 { + _, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n", + s.transfers, totalTransfer, percent(s.transfers, totalTransfer)) + } + _, _ = fmt.Fprintf(buf, "Elapsed time: %10ss\n", strings.TrimRight(elapsedTime.Truncate(time.Minute).String(), "0s")+fmt.Sprintf("%.1f", elapsedTimeSecondsOnly.Seconds())) + } + + // checking and transferring have their own locking so unlock + // here to prevent deadlock on GetBytes + s.mu.RUnlock() + + // Add per transfer stats if required + if !s.ci.StatsOneLine { + if !s.checking.empty() { + _, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.ctx, s.inProgress, s.transferring)) + } + if !s.transferring.empty() { + _, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring.String(s.ctx, s.inProgress, nil)) + } + } + + return buf.String() +} + +// Transferred returns list of all completed transfers including checked and +// failed ones. +func (s *StatsInfo) Transferred() []TransferSnapshot { + s.mu.RLock() + defer s.mu.RUnlock() + ts := make([]TransferSnapshot, 0, len(s.startedTransfers)) + + for _, tr := range s.startedTransfers { + if tr.IsDone() { + ts = append(ts, tr.Snapshot()) + } + } + + return ts +} + +// Aggregated returns aggregated stats for all completed and running transfers. +func (s *StatsInfo) Aggregated() AggregatedTransferInfo { + s.mu.RLock() + defer s.mu.RUnlock() + ai := s.oldTransfers + for _, tr := range s.startedTransfers { + ai.update(tr) + } + return ai +} + +// Log outputs the StatsInfo to the log +func (s *StatsInfo) Log() { + if s.ci.UseJSONLog { + out, _ := s.RemoteStats() + fs.LogLevelPrintf(s.ci.StatsLogLevel, nil, "%v%v\n", s, fs.LogValueHide("stats", out)) + } else { + fs.LogLevelPrintf(s.ci.StatsLogLevel, nil, "%v\n", s) + } + +} + +// Bytes updates the stats for bytes bytes +func (s *StatsInfo) Bytes(bytes int64) { + s.mu.Lock() + defer s.mu.Unlock() + s.bytes += bytes +} + +// GetBytes returns the number of bytes transferred so far +func (s *StatsInfo) GetBytes() int64 { + s.mu.RLock() + defer s.mu.RUnlock() + return s.bytes +} + +// GetBytesWithPending returns the number of bytes transferred and remaining transfers +func (s *StatsInfo) GetBytesWithPending() int64 { + s.mu.RLock() + defer s.mu.RUnlock() + pending := int64(0) + for _, tr := range s.startedTransfers { + if tr.acc != nil { + bytes, size := tr.acc.progress() + if bytes < size { + pending += size - bytes + } + } + } + return s.bytes + pending +} + +// Errors updates the stats for errors +func (s *StatsInfo) Errors(errors int64) { + s.mu.Lock() + defer s.mu.Unlock() + s.errors += errors +} + +// GetErrors reads the number of errors +func (s *StatsInfo) GetErrors() int64 { + s.mu.RLock() + defer s.mu.RUnlock() + return s.errors +} + +// GetLastError returns the lastError +func (s *StatsInfo) GetLastError() error { + s.mu.RLock() + defer s.mu.RUnlock() + return s.lastError +} + +// GetChecks returns the number of checks +func (s *StatsInfo) GetChecks() int64 { + s.mu.RLock() + defer s.mu.RUnlock() + return s.checks +} + +// FatalError sets the fatalError flag +func (s *StatsInfo) FatalError() { + s.mu.Lock() + defer s.mu.Unlock() + s.fatalError = true +} + +// HadFatalError returns whether there has been at least one FatalError +func (s *StatsInfo) HadFatalError() bool { + s.mu.RLock() + defer s.mu.RUnlock() + return s.fatalError +} + +// RetryError sets the retryError flag +func (s *StatsInfo) RetryError() { + s.mu.Lock() + defer s.mu.Unlock() + s.retryError = true +} + +// HadRetryError returns whether there has been at least one non-NoRetryError +func (s *StatsInfo) HadRetryError() bool { + s.mu.RLock() + defer s.mu.RUnlock() + return s.retryError +} + +// Deletes updates the stats for deletes +func (s *StatsInfo) Deletes(deletes int64) int64 { + s.mu.Lock() + defer s.mu.Unlock() + s.deletes += deletes + return s.deletes +} + +// DeletedDirs updates the stats for deletedDirs +func (s *StatsInfo) DeletedDirs(deletedDirs int64) int64 { + s.mu.Lock() + defer s.mu.Unlock() + s.deletedDirs += deletedDirs + return s.deletedDirs +} + +// Renames updates the stats for renames +func (s *StatsInfo) Renames(renames int64) int64 { + s.mu.Lock() + defer s.mu.Unlock() + s.renames += renames + return s.renames +} + +// ResetCounters sets the counters (bytes, checks, errors, transfers, deletes, renames) to 0 and resets lastError, fatalError and retryError +func (s *StatsInfo) ResetCounters() { + s.mu.Lock() + defer s.mu.Unlock() + s.bytes = 0 + s.errors = 0 + s.lastError = nil + s.fatalError = false + s.retryError = false + s.retryAfter = time.Time{} + s.checks = 0 + s.transfers = 0 + s.deletes = 0 + s.deletedDirs = 0 + s.renames = 0 + s.startedTransfers = nil + s.oldTransfers = AggregatedTransferInfo{} + s.oldDuration = 0 +} + +// ResetErrors sets the errors count to 0 and resets lastError, fatalError and retryError +func (s *StatsInfo) ResetErrors() { + s.mu.Lock() + defer s.mu.Unlock() + s.errors = 0 + s.lastError = nil + s.fatalError = false + s.retryError = false + s.retryAfter = time.Time{} + s.oldTransfers.Failed = 0 + s.oldTransfers.Error = nil +} + +// Errored returns whether there have been any errors +func (s *StatsInfo) Errored() bool { + s.mu.RLock() + defer s.mu.RUnlock() + return s.errors != 0 +} + +// Error adds a single error into the stats, assigns lastError and eventually sets fatalError or retryError +func (s *StatsInfo) Error(err error) error { + if err == nil || fserrors.IsCounted(err) { + return err + } + s.mu.Lock() + defer s.mu.Unlock() + s.errors++ + s.lastError = err + err = fserrors.FsError(err) + fserrors.Count(err) + switch { + case fserrors.IsFatalError(err): + s.fatalError = true + case fserrors.IsRetryAfterError(err): + retryAfter := fserrors.RetryAfterErrorTime(err) + if s.retryAfter.IsZero() || retryAfter.Sub(s.retryAfter) > 0 { + s.retryAfter = retryAfter + } + s.retryError = true + case !fserrors.IsNoRetryError(err): + s.retryError = true + } + return err +} + +// RetryAfter returns the time to retry after if it is set. It will +// be Zero if it isn't set. +func (s *StatsInfo) RetryAfter() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.retryAfter +} + +// NewCheckingTransfer adds a checking transfer to the stats, from the object. +func (s *StatsInfo) NewCheckingTransfer(obj fs.Object) *Transfer { + tr := newCheckingTransfer(s, obj) + s.checking.add(tr) + return tr +} + +// DoneChecking removes a check from the stats +func (s *StatsInfo) DoneChecking(remote string) { + s.checking.del(remote) + s.mu.Lock() + s.checks++ + s.mu.Unlock() +} + +// GetTransfers reads the number of transfers +func (s *StatsInfo) GetTransfers() int64 { + s.mu.RLock() + defer s.mu.RUnlock() + return s.transfers +} + +// NewTransfer adds a transfer to the stats from the object. +func (s *StatsInfo) NewTransfer(obj fs.Object) *Transfer { + tr := newTransfer(s, obj) + s.transferring.add(tr) + return tr +} + +// NewTransferRemoteSize adds a transfer to the stats based on remote and size. +func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64) *Transfer { + tr := newTransferRemoteSize(s, remote, size, false) + s.transferring.add(tr) + return tr +} + +// DoneTransferring removes a transfer from the stats +// +// if ok is true then it increments the transfers count +func (s *StatsInfo) DoneTransferring(remote string, ok bool) { + s.transferring.del(remote) + if ok { + s.mu.Lock() + s.transfers++ + s.mu.Unlock() + } +} + +// UpdateSkipped marks file bytes as skipped in oldTransfers. +func (s *StatsInfo) UpdateSkipped(size int64) { + s.mu.Lock() + s.oldTransfers.Skipped += size + s.mu.Unlock() +} + +// SetCheckQueue sets the number of queued checks +func (s *StatsInfo) SetCheckQueue(n int, size int64) { + s.mu.Lock() + s.checkQueue = n + s.checkQueueSize = size + s.mu.Unlock() +} + +// SetTransferQueue sets the number of queued transfers +func (s *StatsInfo) SetTransferQueue(n int, size int64) { + s.mu.Lock() + s.transferQueue = n + s.transferQueueSize = size + s.mu.Unlock() +} + +// SetRenameQueue sets the number of queued transfers +func (s *StatsInfo) SetRenameQueue(n int, size int64) { + s.mu.Lock() + s.renameQueue = n + s.renameQueueSize = size + s.mu.Unlock() +} + +// AddTransfer adds reference to the started transfer. +func (s *StatsInfo) AddTransfer(transfer *Transfer) { + s.mu.Lock() + s.startedTransfers = append(s.startedTransfers, transfer) + s.mu.Unlock() +} + +// removeTransfer removes a reference to the started transfer in +// position i. +// +// Must be called with the lock held +func (s *StatsInfo) removeTransfer(transfer *Transfer, i int) { + now := time.Now() + + // add finished transfer onto old time ranges + start, end := transfer.TimeRange() + if end.IsZero() { + end = now + } + s.oldTimeRanges = append(s.oldTimeRanges, timeRange{start, end}) + s.oldTimeRanges.merge() + s.oldTransfers.update(transfer) + + // remove the found entry + s.startedTransfers = append(s.startedTransfers[:i], s.startedTransfers[i+1:]...) + + // Find the youngest active transfer + oldestStart := now + for i := range s.startedTransfers { + start, _ := s.startedTransfers[i].TimeRange() + if start.Before(oldestStart) { + oldestStart = start + } + } + + // remove old entries older than that + s.oldDuration += s.oldTimeRanges.cull(oldestStart) +} + +// RemoveTransfer removes a reference to the started transfer. +func (s *StatsInfo) RemoveTransfer(transfer *Transfer) { + s.mu.Lock() + for i, tr := range s.startedTransfers { + if tr == transfer { + s.removeTransfer(tr, i) + break + } + } + s.mu.Unlock() +} + +// PruneTransfers makes sure there aren't too many old transfers by removing +// single finished transfer. +func (s *StatsInfo) PruneTransfers() { + if MaxCompletedTransfers < 0 { + return + } + s.mu.Lock() + // remove a transfer from the start if we are over quota + if len(s.startedTransfers) > MaxCompletedTransfers+s.ci.Transfers { + for i, tr := range s.startedTransfers { + if tr.IsDone() { + s.removeTransfer(tr, i) + break + } + } + } + s.mu.Unlock() +} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/stats_groups.go b/vendor/github.com/rclone/rclone/fs/accounting/stats_groups.go new file mode 100644 index 00000000000..c1bf64bbe80 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/stats_groups.go @@ -0,0 +1,467 @@ +package accounting + +import ( + "context" + "sync" + + "github.com/rclone/rclone/fs/rc" + + "github.com/rclone/rclone/fs" +) + +const globalStats = "global_stats" + +var groups *statsGroups + +func init() { + // Init stats container + groups = newStatsGroups() + + // Set the function pointer up in fs + fs.CountError = GlobalStats().Error +} + +func rcListStats(ctx context.Context, in rc.Params) (rc.Params, error) { + out := make(rc.Params) + + out["groups"] = groups.names() + + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "core/group-list", + Fn: rcListStats, + Title: "Returns list of stats.", + Help: ` +This returns list of stats groups currently in memory. + +Returns the following values: +` + "```" + ` +{ + "groups": an array of group names: + [ + "group1", + "group2", + ... + ] +} +` + "```" + ` +`, + }) +} + +func rcRemoteStats(ctx context.Context, in rc.Params) (rc.Params, error) { + // Check to see if we should filter by group. + group, err := in.GetString("group") + if rc.NotErrParamNotFound(err) { + return rc.Params{}, err + } + if group != "" { + return StatsGroup(ctx, group).RemoteStats() + } + + return groups.sum(ctx).RemoteStats() +} + +func init() { + rc.Add(rc.Call{ + Path: "core/stats", + Fn: rcRemoteStats, + Title: "Returns stats about current transfers.", + Help: ` +This returns all available stats: + + rclone rc core/stats + +If group is not provided then summed up stats for all groups will be +returned. + +Parameters + +- group - name of the stats group (string) + +Returns the following values: + +` + "```" + ` +{ + "speed": average speed in bytes/sec since start of the process, + "bytes": total transferred bytes since the start of the process, + "errors": number of errors, + "fatalError": whether there has been at least one FatalError, + "retryError": whether there has been at least one non-NoRetryError, + "checks": number of checked files, + "transfers": number of transferred files, + "deletes" : number of deleted files, + "renames" : number of renamed files, + "transferTime" : total time spent on running jobs, + "elapsedTime": time in seconds since the start of the process, + "lastError": last occurred error, + "transferring": an array of currently active file transfers: + [ + { + "bytes": total transferred bytes for this file, + "eta": estimated time in seconds until file transfer completion + "name": name of the file, + "percentage": progress of the file transfer in percent, + "speed": average speed over the whole transfer in bytes/sec, + "speedAvg": current speed in bytes/sec as an exponentially weighted moving average, + "size": size of the file in bytes + } + ], + "checking": an array of names of currently active file checks + [] +} +` + "```" + ` +Values for "transferring", "checking" and "lastError" are only assigned if data is available. +The value for "eta" is null if an eta cannot be determined. +`, + }) +} + +func rcTransferredStats(ctx context.Context, in rc.Params) (rc.Params, error) { + // Check to see if we should filter by group. + group, err := in.GetString("group") + if rc.NotErrParamNotFound(err) { + return rc.Params{}, err + } + + out := make(rc.Params) + if group != "" { + out["transferred"] = StatsGroup(ctx, group).Transferred() + } else { + out["transferred"] = groups.sum(ctx).Transferred() + } + + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "core/transferred", + Fn: rcTransferredStats, + Title: "Returns stats about completed transfers.", + Help: ` +This returns stats about completed transfers: + + rclone rc core/transferred + +If group is not provided then completed transfers for all groups will be +returned. + +Note only the last 100 completed transfers are returned. + +Parameters + +- group - name of the stats group (string) + +Returns the following values: +` + "```" + ` +{ + "transferred": an array of completed transfers (including failed ones): + [ + { + "name": name of the file, + "size": size of the file in bytes, + "bytes": total transferred bytes for this file, + "checked": if the transfer is only checked (skipped, deleted), + "timestamp": integer representing millisecond unix epoch, + "error": string description of the error (empty if successful), + "jobid": id of the job that this transfer belongs to + } + ] +} +` + "```" + ` +`, + }) +} + +func rcAggregatedStats(ctx context.Context, in rc.Params) (rc.Params, error) { + // Check to see if we should filter by group. + group, err := in.GetString("group") + if rc.NotErrParamNotFound(err) { + return rc.Params{}, err + } + + out := make(rc.Params) + if group != "" { + out["aggregated"] = StatsGroup(ctx, group).Aggregated() + } else { + out["aggregated"] = groups.sum(ctx).Aggregated() + } + + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "core/aggregated", + Fn: rcAggregatedStats, + Title: "Returns aggregated stats about all transfers since last stats reset.", + Help: ` +This returns all stats about transfers since last stats reset: + + rclone rc core/aggregated + +If group is not provided then completed transfers for all groups will be +returned. + +Parameters + +- group - name of the stats group (string) + +Returns the following values: +` + "```" + ` +{ + "aggregated": aggregated stats of all transfers: + { + "uploaded": total transferred bytes, + "skipped": total skipped bytes because file has already been present at destination, + "failed": total failed bytes, + "size": total size in bytes, + "error": string description of the error (empty if successful), + "checked": if the transfer is only checked (skipped, deleted), + "started_at": the earliest transfer start time, + "completed_at": the latest transfer end time, + } +} +` + "```" + ` +`, + }) +} + +func rcResetStats(ctx context.Context, in rc.Params) (rc.Params, error) { + // Check to see if we should filter by group. + group, err := in.GetString("group") + if rc.NotErrParamNotFound(err) { + return rc.Params{}, err + } + + if group != "" { + stats := groups.get(group) + stats.ResetErrors() + stats.ResetCounters() + } else { + groups.reset() + } + + return rc.Params{}, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "core/stats-reset", + Fn: rcResetStats, + Title: "Reset stats.", + Help: ` +This clears counters, errors and finished transfers for all stats or specific +stats group if group is provided. + +Parameters + +- group - name of the stats group (string) +`, + }) +} + +func rcDeleteStats(ctx context.Context, in rc.Params) (rc.Params, error) { + // Group name required because we only do single group. + group, err := in.GetString("group") + if rc.NotErrParamNotFound(err) { + return rc.Params{}, err + } + + if group != "" { + groups.delete(group) + } + + return rc.Params{}, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "core/stats-delete", + Fn: rcDeleteStats, + Title: "Delete stats group.", + Help: ` +This deletes entire stats group + +Parameters + +- group - name of the stats group (string) +`, + }) +} + +type statsGroupCtx int64 + +const statsGroupKey statsGroupCtx = 1 + +// WithStatsGroup returns copy of the parent context with assigned group. +func WithStatsGroup(parent context.Context, group string) context.Context { + return context.WithValue(parent, statsGroupKey, group) +} + +// StatsGroupFromContext returns group from the context if it's available. +// Returns false if group is empty. +func StatsGroupFromContext(ctx context.Context) (string, bool) { + statsGroup, ok := ctx.Value(statsGroupKey).(string) + if statsGroup == "" { + ok = false + } + return statsGroup, ok +} + +// Stats gets stats by extracting group from context. +func Stats(ctx context.Context) *StatsInfo { + group, ok := StatsGroupFromContext(ctx) + if !ok { + return GlobalStats() + } + return StatsGroup(ctx, group) +} + +// StatsGroup gets stats by group name. +func StatsGroup(ctx context.Context, group string) *StatsInfo { + stats := groups.get(group) + if stats == nil { + return NewStatsGroup(ctx, group) + } + return stats +} + +// GlobalStats returns special stats used for global accounting. +func GlobalStats() *StatsInfo { + return StatsGroup(context.Background(), globalStats) +} + +// NewStatsGroup creates new stats under named group. +func NewStatsGroup(ctx context.Context, group string) *StatsInfo { + stats := NewStats(ctx) + stats.group = group + groups.set(ctx, group, stats) + return stats +} + +// statsGroups holds a synchronized map of stats +type statsGroups struct { + mu sync.Mutex + m map[string]*StatsInfo + order []string +} + +// newStatsGroups makes a new statsGroups object +func newStatsGroups() *statsGroups { + return &statsGroups{ + m: make(map[string]*StatsInfo), + } +} + +// set marks the stats as belonging to a group +func (sg *statsGroups) set(ctx context.Context, group string, stats *StatsInfo) { + sg.mu.Lock() + defer sg.mu.Unlock() + ci := fs.GetConfig(ctx) + + // Limit number of groups kept in memory. + if len(sg.order) >= ci.MaxStatsGroups { + group := sg.order[0] + fs.LogPrintf(fs.LogLevelDebug, nil, "Max number of stats groups reached removing %s", group) + delete(sg.m, group) + r := (len(sg.order) - ci.MaxStatsGroups) + 1 + sg.order = sg.order[r:] + } + + // Exclude global stats from listing + if group != globalStats { + sg.order = append(sg.order, group) + } + sg.m[group] = stats +} + +// get gets the stats for group, or nil if not found +func (sg *statsGroups) get(group string) *StatsInfo { + sg.mu.Lock() + defer sg.mu.Unlock() + stats, ok := sg.m[group] + if !ok { + return nil + } + return stats +} + +func (sg *statsGroups) names() []string { + sg.mu.Lock() + defer sg.mu.Unlock() + return sg.order +} + +// sum returns aggregate stats that contains summation of all groups. +func (sg *statsGroups) sum(ctx context.Context) *StatsInfo { + sg.mu.Lock() + defer sg.mu.Unlock() + + sum := NewStats(ctx) + for _, stats := range sg.m { + stats.mu.RLock() + { + sum.bytes += stats.bytes + sum.errors += stats.errors + sum.fatalError = sum.fatalError || stats.fatalError + sum.retryError = sum.retryError || stats.retryError + sum.checks += stats.checks + sum.transfers += stats.transfers + sum.deletes += stats.deletes + sum.deletedDirs += stats.deletedDirs + sum.renames += stats.renames + sum.checking.merge(stats.checking) + sum.transferring.merge(stats.transferring) + sum.inProgress.merge(stats.inProgress) + if sum.lastError == nil && stats.lastError != nil { + sum.lastError = stats.lastError + } + sum.startedTransfers = append(sum.startedTransfers, stats.startedTransfers...) + sum.oldDuration += stats.oldDuration + sum.oldTimeRanges = append(sum.oldTimeRanges, stats.oldTimeRanges...) + sum.oldTransfers.merge(stats.oldTransfers) + } + stats.mu.RUnlock() + } + return sum +} + +func (sg *statsGroups) reset() { + sg.mu.Lock() + defer sg.mu.Unlock() + + for _, stats := range sg.m { + stats.ResetErrors() + stats.ResetCounters() + } + + sg.m = make(map[string]*StatsInfo) + sg.order = nil +} + +// delete removes all references to the group. +func (sg *statsGroups) delete(group string) { + sg.mu.Lock() + defer sg.mu.Unlock() + stats := sg.m[group] + if stats == nil { + return + } + stats.ResetErrors() + stats.ResetCounters() + delete(sg.m, group) + + // Remove group reference from the ordering slice. + tmp := sg.order[:0] + for _, g := range sg.order { + if g != group { + tmp = append(tmp, g) + } + } + sg.order = tmp +} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/token_bucket.go b/vendor/github.com/rclone/rclone/fs/accounting/token_bucket.go new file mode 100644 index 00000000000..377d9af44f8 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/token_bucket.go @@ -0,0 +1,275 @@ +package accounting + +import ( + "context" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/rc" + "golang.org/x/time/rate" +) + +// TokenBucket holds the global token bucket limiter +var TokenBucket tokenBucket + +// TokenBucketSlot is the type to select which token bucket to use +type TokenBucketSlot int + +// Slots for the token bucket +const ( + TokenBucketSlotAccounting TokenBucketSlot = iota + TokenBucketSlotTransportRx + TokenBucketSlotTransportTx + TokenBucketSlots +) + +type buckets [TokenBucketSlots]*rate.Limiter + +// tokenBucket holds info about the rate limiters in use +type tokenBucket struct { + mu sync.RWMutex // protects the token bucket variables + curr buckets + prev buckets + toggledOff bool + currLimitMu sync.Mutex // protects changes to the timeslot + currLimit fs.BwTimeSlot +} + +// Return true if limit is disabled +// +// Call with lock held +func (bs *buckets) _isOff() bool { + return bs[0] == nil +} + +// Disable the limits +// +// Call with lock held +func (bs *buckets) _setOff() { + for i := range bs { + bs[i] = nil + } +} + +const maxBurstSize = 4 * 1024 * 1024 // must be bigger than the biggest request + +// make a new empty token bucket with the bandwidth(s) given +func newTokenBucket(bandwidth fs.BwPair) (tbs buckets) { + bandwidthAccounting := fs.SizeSuffix(-1) + if bandwidth.Tx > 0 { + tbs[TokenBucketSlotTransportTx] = rate.NewLimiter(rate.Limit(bandwidth.Tx), maxBurstSize) + bandwidthAccounting = bandwidth.Tx + } + if bandwidth.Rx > 0 { + tbs[TokenBucketSlotTransportRx] = rate.NewLimiter(rate.Limit(bandwidth.Rx), maxBurstSize) + if bandwidth.Rx > bandwidthAccounting { + bandwidthAccounting = bandwidth.Rx + } + } + if bandwidthAccounting > 0 { + tbs[TokenBucketSlotAccounting] = rate.NewLimiter(rate.Limit(bandwidthAccounting), maxBurstSize) + } + for _, tb := range tbs { + if tb != nil { + // empty the bucket + err := tb.WaitN(context.Background(), maxBurstSize) + if err != nil { + fs.Errorf(nil, "Failed to empty token bucket: %v", err) + } + } + } + return tbs +} + +// StartTokenBucket starts the token bucket if necessary +func (tb *tokenBucket) StartTokenBucket(ctx context.Context) { + tb.mu.Lock() + defer tb.mu.Unlock() + ci := fs.GetConfig(ctx) + tb.currLimit = ci.BwLimit.LimitAt(time.Now()) + if tb.currLimit.Bandwidth.IsSet() { + tb.curr = newTokenBucket(tb.currLimit.Bandwidth) + fs.Infof(nil, "Starting bandwidth limiter at %vBytes/s", &tb.currLimit.Bandwidth) + + // Start the SIGUSR2 signal handler to toggle bandwidth. + // This function does nothing in windows systems. + tb.startSignalHandler() + } +} + +// StartTokenTicker creates a ticker to update the bandwidth limiter every minute. +func (tb *tokenBucket) StartTokenTicker(ctx context.Context) { + ci := fs.GetConfig(ctx) + // If the timetable has a single entry or was not specified, we don't need + // a ticker to update the bandwidth. + if len(ci.BwLimit) <= 1 { + return + } + + ticker := time.NewTicker(time.Minute) + go func() { + for range ticker.C { + limitNow := ci.BwLimit.LimitAt(time.Now()) + tb.currLimitMu.Lock() + + if tb.currLimit.Bandwidth != limitNow.Bandwidth { + tb.mu.Lock() + + // If bwlimit is toggled off, the change should only + // become active on the next toggle, which causes + // an exchange of tb.curr <-> tb.prev + var targetBucket *buckets + if tb.toggledOff { + targetBucket = &tb.prev + } else { + targetBucket = &tb.curr + } + + // Set new bandwidth. If unlimited, set tokenbucket to nil. + if limitNow.Bandwidth.IsSet() { + *targetBucket = newTokenBucket(limitNow.Bandwidth) + if tb.toggledOff { + fs.Logf(nil, "Scheduled bandwidth change. "+ + "Limit will be set to %vBytes/s when toggled on again.", &limitNow.Bandwidth) + } else { + fs.Logf(nil, "Scheduled bandwidth change. Limit set to %vBytes/s", &limitNow.Bandwidth) + } + } else { + targetBucket._setOff() + fs.Logf(nil, "Scheduled bandwidth change. Bandwidth limits disabled") + } + + tb.currLimit = limitNow + tb.mu.Unlock() + } + tb.currLimitMu.Unlock() + } + }() +} + +// LimitBandwidth sleeps for the correct amount of time for the passage +// of n bytes according to the current bandwidth limit +func (tb *tokenBucket) LimitBandwidth(i TokenBucketSlot, n int) { + tb.mu.RLock() + + // Limit the transfer speed if required + if tb.curr[i] != nil { + err := tb.curr[i].WaitN(context.Background(), n) + if err != nil { + fs.Errorf(nil, "Token bucket error: %v", err) + } + } + + tb.mu.RUnlock() +} + +// SetBwLimit sets the current bandwidth limit +func (tb *tokenBucket) SetBwLimit(bandwidth fs.BwPair) { + tb.mu.Lock() + defer tb.mu.Unlock() + if bandwidth.IsSet() { + tb.curr = newTokenBucket(bandwidth) + fs.Logf(nil, "Bandwidth limit set to %v", bandwidth) + } else { + tb.curr._setOff() + fs.Logf(nil, "Bandwidth limit reset to unlimited") + } +} + +// read and set the bandwidth limits +func (tb *tokenBucket) rcBwlimit(ctx context.Context, in rc.Params) (out rc.Params, err error) { + if in["rate"] != nil { + bwlimit, err := in.GetString("rate") + if err != nil { + return out, err + } + var bws fs.BwTimetable + err = bws.Set(bwlimit) + if err != nil { + return out, errors.Wrap(err, "bad bwlimit") + } + if len(bws) != 1 { + return out, errors.New("need exactly 1 bandwidth setting") + } + bw := bws[0] + tb.SetBwLimit(bw.Bandwidth) + } + tb.mu.RLock() + bytesPerSecond := int64(-1) + if tb.curr[TokenBucketSlotAccounting] != nil { + bytesPerSecond = int64(tb.curr[TokenBucketSlotAccounting].Limit()) + } + var bp = fs.BwPair{Tx: -1, Rx: -1} + if tb.curr[TokenBucketSlotTransportTx] != nil { + bp.Tx = fs.SizeSuffix(tb.curr[TokenBucketSlotTransportTx].Limit()) + } + if tb.curr[TokenBucketSlotTransportRx] != nil { + bp.Rx = fs.SizeSuffix(tb.curr[TokenBucketSlotTransportRx].Limit()) + } + tb.mu.RUnlock() + out = rc.Params{ + "rate": bp.String(), + "bytesPerSecond": bytesPerSecond, + "bytesPerSecondTx": int64(bp.Tx), + "bytesPerSecondRx": int64(bp.Rx), + } + return out, nil +} + +// Remote control for the token bucket +func init() { + rc.Add(rc.Call{ + Path: "core/bwlimit", + Fn: func(ctx context.Context, in rc.Params) (out rc.Params, err error) { + return TokenBucket.rcBwlimit(ctx, in) + }, + Title: "Set the bandwidth limit.", + Help: ` +This sets the bandwidth limit to the string passed in. This should be +a single bandwidth limit entry or a pair of upload:download bandwidth. + +Eg + + rclone rc core/bwlimit rate=off + { + "bytesPerSecond": -1, + "bytesPerSecondTx": -1, + "bytesPerSecondRx": -1, + "rate": "off" + } + rclone rc core/bwlimit rate=1M + { + "bytesPerSecond": 1048576, + "bytesPerSecondTx": 1048576, + "bytesPerSecondRx": 1048576, + "rate": "1M" + } + rclone rc core/bwlimit rate=1M:100k + { + "bytesPerSecond": 1048576, + "bytesPerSecondTx": 1048576, + "bytesPerSecondRx": 131072, + "rate": "1M" + } + + +If the rate parameter is not supplied then the bandwidth is queried + + rclone rc core/bwlimit + { + "bytesPerSecond": 1048576, + "bytesPerSecondTx": 1048576, + "bytesPerSecondRx": 1048576, + "rate": "1M" + } + +The format of the parameter is exactly the same as passed to --bwlimit +except only one bandwidth may be specified. + +In either case "rate" is returned as a human readable string, and +"bytesPerSecond" is returned as a number. +`, + }) +} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/tpslimit.go b/vendor/github.com/rclone/rclone/fs/accounting/tpslimit.go new file mode 100644 index 00000000000..b4b834ae04d --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/tpslimit.go @@ -0,0 +1,37 @@ +package accounting + +import ( + "context" + + "github.com/rclone/rclone/fs" + "golang.org/x/time/rate" +) + +var ( + tpsBucket *rate.Limiter // for limiting number of http transactions per second +) + +// StartLimitTPS starts the token bucket for transactions per second +// limiting if necessary +func StartLimitTPS(ctx context.Context) { + ci := fs.GetConfig(ctx) + if ci.TPSLimit > 0 { + tpsBurst := ci.TPSLimitBurst + if tpsBurst < 1 { + tpsBurst = 1 + } + tpsBucket = rate.NewLimiter(rate.Limit(ci.TPSLimit), tpsBurst) + fs.Infof(nil, "Starting transaction limiter: max %g transactions/s with burst %d", ci.TPSLimit, tpsBurst) + } +} + +// LimitTPS limits the number of transactions per second if enabled. +// It should be called once per transaction. +func LimitTPS(ctx context.Context) { + if tpsBucket != nil { + tbErr := tpsBucket.Wait(ctx) + if tbErr != nil && tbErr != context.Canceled { + fs.Errorf(nil, "HTTP token bucket error: %v", tbErr) + } + } +} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/transfer.go b/vendor/github.com/rclone/rclone/fs/accounting/transfer.go new file mode 100644 index 00000000000..24e087e1c7e --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/transfer.go @@ -0,0 +1,253 @@ +package accounting + +import ( + "context" + "encoding/json" + "errors" + "io" + "sync" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/rc" +) + +// AggregatedTransferInfo aggregated transfer statistics. +type AggregatedTransferInfo struct { + Uploaded int64 `json:"uploaded"` + Skipped int64 `json:"skipped"` + Failed int64 `json:"failed"` + Size int64 `json:"size"` + Error error `json:"error"` + StartedAt time.Time `json:"started_at"` + CompletedAt time.Time `json:"completed_at,omitempty"` +} + +func (ai *AggregatedTransferInfo) update(t *Transfer) { + if t.checking { + return + } + + if ai.StartedAt.IsZero() { + ai.StartedAt = t.startedAt + } + if !t.startedAt.IsZero() && t.startedAt.Before(ai.StartedAt) { + ai.StartedAt = t.startedAt + } + + t.mu.RLock() + defer t.mu.RUnlock() + + if !t.completedAt.IsZero() && ai.CompletedAt.Before(t.completedAt) { + ai.CompletedAt = t.completedAt + } + + var b, s int64 = 0, t.size + if t.acc != nil { + b, s = t.acc.progress() + } + ai.Size += s + if t.err != nil { + ai.Failed += s + ai.Error = errors.Join(ai.Error, t.err) + } else { + ai.Uploaded += b + } +} + +func (ai *AggregatedTransferInfo) merge(other AggregatedTransferInfo) { + ai.Uploaded += other.Uploaded + ai.Skipped += other.Skipped + ai.Failed += other.Failed + ai.Size += other.Size + ai.Error = errors.Join(ai.Error, other.Error) + if !other.StartedAt.IsZero() && other.StartedAt.Before(ai.StartedAt) { + ai.StartedAt = other.StartedAt + } + if ai.CompletedAt.Before(other.CompletedAt) { + ai.CompletedAt = other.CompletedAt + } +} + +// TransferSnapshot represents state of an account at point in time. +type TransferSnapshot struct { + Name string `json:"name"` + Size int64 `json:"size"` + Bytes int64 `json:"bytes"` + Checked bool `json:"checked"` + StartedAt time.Time `json:"started_at"` + CompletedAt time.Time `json:"completed_at,omitempty"` + Error error `json:"-"` + Group string `json:"group"` +} + +// MarshalJSON implements json.Marshaler interface. +func (as TransferSnapshot) MarshalJSON() ([]byte, error) { + err := "" + if as.Error != nil { + err = as.Error.Error() + } + + type Alias TransferSnapshot + return json.Marshal(&struct { + Error string `json:"error"` + Alias + }{ + Error: err, + Alias: (Alias)(as), + }) +} + +// Transfer keeps track of initiated transfers and provides access to +// accounting functions. +// Transfer needs to be closed on completion. +type Transfer struct { + // these are initialised at creation and may be accessed without locking + stats *StatsInfo + remote string + size int64 + startedAt time.Time + checking bool + + // Protects all below + // + // NB to avoid deadlocks we must release this lock before + // calling any methods on Transfer.stats. This is because + // StatsInfo calls back into Transfer. + mu sync.RWMutex + acc *Account + err error + completedAt time.Time +} + +// newCheckingTransfer instantiates new checking of the object. +func newCheckingTransfer(stats *StatsInfo, obj fs.Object) *Transfer { + return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), true) +} + +// newTransfer instantiates new transfer. +func newTransfer(stats *StatsInfo, obj fs.Object) *Transfer { + return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), false) +} + +func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking bool) *Transfer { + tr := &Transfer{ + stats: stats, + remote: remote, + size: size, + startedAt: time.Now(), + checking: checking, + } + stats.AddTransfer(tr) + return tr +} + +// Done ends the transfer. +// Must be called after transfer is finished to run proper cleanups. +func (tr *Transfer) Done(ctx context.Context, err error) { + if err != nil { + err = tr.stats.Error(err) + + tr.mu.Lock() + tr.err = err + tr.mu.Unlock() + } + + tr.mu.RLock() + acc := tr.acc + tr.mu.RUnlock() + + ci := fs.GetConfig(ctx) + if acc != nil { + // Close the file if it is still open + if err := acc.Close(); err != nil { + fs.LogLevelPrintf(ci.StatsLogLevel, nil, "can't close account: %+v\n", err) + } + // Signal done with accounting + acc.Done() + // free the account since we may keep the transfer + acc = nil + } + + tr.mu.Lock() + tr.completedAt = time.Now() + tr.mu.Unlock() + + if tr.checking { + tr.stats.DoneChecking(tr.remote) + } else { + tr.stats.DoneTransferring(tr.remote, err == nil) + } + tr.stats.PruneTransfers() +} + +// Reset allows to switch the Account to another transfer method. +func (tr *Transfer) Reset(ctx context.Context) { + tr.mu.RLock() + acc := tr.acc + tr.acc = nil + tr.mu.RUnlock() + ci := fs.GetConfig(ctx) + + if acc != nil { + if err := acc.Close(); err != nil { + fs.LogLevelPrintf(ci.StatsLogLevel, nil, "can't close account: %+v\n", err) + } + } +} + +// Account returns reader that knows how to keep track of transfer progress. +func (tr *Transfer) Account(ctx context.Context, in io.ReadCloser) *Account { + tr.mu.Lock() + if tr.acc == nil { + tr.acc = newAccountSizeName(ctx, tr.stats, in, tr.size, tr.remote) + } else { + tr.acc.UpdateReader(ctx, in) + } + tr.mu.Unlock() + return tr.acc +} + +// TimeRange returns the time transfer started and ended at. If not completed +// it will return zero time for end time. +func (tr *Transfer) TimeRange() (time.Time, time.Time) { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.startedAt, tr.completedAt +} + +// IsDone returns true if transfer is completed. +func (tr *Transfer) IsDone() bool { + tr.mu.RLock() + defer tr.mu.RUnlock() + return !tr.completedAt.IsZero() +} + +// Snapshot produces stats for this account at point in time. +func (tr *Transfer) Snapshot() TransferSnapshot { + tr.mu.RLock() + defer tr.mu.RUnlock() + + var s, b int64 = tr.size, 0 + if tr.acc != nil { + b, s = tr.acc.progress() + } + return TransferSnapshot{ + Name: tr.remote, + Checked: tr.checking, + Size: s, + Bytes: b, + StartedAt: tr.startedAt, + CompletedAt: tr.completedAt, + Error: tr.err, + Group: tr.stats.group, + } +} + +// rcStats returns stats for the transfer suitable for the rc +func (tr *Transfer) rcStats() rc.Params { + return rc.Params{ + "name": tr.remote, // no locking needed to access thess + "size": tr.size, + } +} diff --git a/vendor/github.com/rclone/rclone/fs/accounting/transfermap.go b/vendor/github.com/rclone/rclone/fs/accounting/transfermap.go new file mode 100644 index 00000000000..ed64bf36974 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/accounting/transfermap.go @@ -0,0 +1,159 @@ +package accounting + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/rc" +) + +// transferMap holds name to transfer map +type transferMap struct { + mu sync.RWMutex + items map[string]*Transfer + name string +} + +// newTransferMap creates a new empty transfer map of capacity size +func newTransferMap(size int, name string) *transferMap { + return &transferMap{ + items: make(map[string]*Transfer, size), + name: name, + } +} + +// add adds a new transfer to the map +func (tm *transferMap) add(tr *Transfer) { + tm.mu.Lock() + tm.items[tr.remote] = tr + tm.mu.Unlock() +} + +// del removes a transfer from the map by name +func (tm *transferMap) del(remote string) { + tm.mu.Lock() + delete(tm.items, remote) + tm.mu.Unlock() +} + +// merge adds items from another map +func (tm *transferMap) merge(m *transferMap) { + tm.mu.Lock() + m.mu.Lock() + for name, tr := range m.items { + tm.items[name] = tr + } + m.mu.Unlock() + tm.mu.Unlock() +} + +// empty returns whether the map has any items +func (tm *transferMap) empty() bool { + tm.mu.RLock() + defer tm.mu.RUnlock() + return len(tm.items) == 0 +} + +// count returns the number of items in the map +func (tm *transferMap) count() int { + tm.mu.RLock() + defer tm.mu.RUnlock() + return len(tm.items) +} + +// _sortedSlice returns all transfers sorted by start time +// +// Call with mu.Rlock held +func (tm *transferMap) _sortedSlice() []*Transfer { + s := make([]*Transfer, 0, len(tm.items)) + for _, tr := range tm.items { + s = append(s, tr) + } + // sort by time first and if equal by name. Note that the relatively + // low time resolution on Windows can cause equal times. + sort.Slice(s, func(i, j int) bool { + a, b := s[i], s[j] + if a.startedAt.Before(b.startedAt) { + return true + } else if !a.startedAt.Equal(b.startedAt) { + return false + } + return a.remote < b.remote + }) + return s +} + +// String returns string representation of map items excluding any in +// exclude (if set). +func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude *transferMap) string { + tm.mu.RLock() + defer tm.mu.RUnlock() + ci := fs.GetConfig(ctx) + stringList := make([]string, 0, len(tm.items)) + for _, tr := range tm._sortedSlice() { + if exclude != nil { + exclude.mu.RLock() + _, found := exclude.items[tr.remote] + exclude.mu.RUnlock() + if found { + continue + } + } + var out string + if acc := progress.get(tr.remote); acc != nil { + out = acc.String() + } else { + out = fmt.Sprintf("%*s: %s", + ci.StatsFileNameLength, + shortenName(tr.remote, ci.StatsFileNameLength), + tm.name, + ) + } + stringList = append(stringList, " * "+out) + } + return strings.Join(stringList, "\n") +} + +// progress returns total bytes read as well as the size. +func (tm *transferMap) progress(stats *StatsInfo) (totalBytes, totalSize int64) { + tm.mu.RLock() + defer tm.mu.RUnlock() + for name := range tm.items { + if acc := stats.inProgress.get(name); acc != nil { + bytes, size := acc.progress() + if size >= 0 && bytes >= 0 { + totalBytes += bytes + totalSize += size + } + } + } + return totalBytes, totalSize +} + +// remotes returns a []string of the remote names for the transferMap +func (tm *transferMap) remotes() (c []string) { + tm.mu.RLock() + defer tm.mu.RUnlock() + for _, tr := range tm._sortedSlice() { + c = append(c, tr.remote) + } + return c +} + +// rcStats returns a []rc.Params of the stats for the transferMap +func (tm *transferMap) rcStats(progress *inProgress) (t []rc.Params) { + tm.mu.RLock() + defer tm.mu.RUnlock() + for _, tr := range tm._sortedSlice() { + if acc := progress.get(tr.remote); acc != nil { + t = append(t, acc.rcStats()) + } else { + t = append(t, tr.rcStats()) + } + } + return t +} diff --git a/vendor/github.com/rclone/rclone/fs/asyncreader/asyncreader.go b/vendor/github.com/rclone/rclone/fs/asyncreader/asyncreader.go new file mode 100644 index 00000000000..92d389002ec --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/asyncreader/asyncreader.go @@ -0,0 +1,365 @@ +// Package asyncreader provides an asynchronous reader which reads +// independently of write +package asyncreader + +import ( + "context" + "io" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/lib/pool" + "github.com/rclone/rclone/lib/readers" +) + +const ( + // BufferSize is the default size of the async buffer + BufferSize = 1024 * 1024 + softStartInitial = 4 * 1024 + bufferCacheSize = 64 // max number of buffers to keep in cache + bufferCacheFlushTime = 5 * time.Second // flush the cached buffers after this long +) + +// ErrorStreamAbandoned is returned when the input is closed before the end of the stream +var ErrorStreamAbandoned = errors.New("stream abandoned") + +// AsyncReader will do async read-ahead from the input reader +// and make the data available as an io.Reader. +// This should be fully transparent, except that once an error +// has been returned from the Reader, it will not recover. +type AsyncReader struct { + in io.ReadCloser // Input reader + ready chan *buffer // Buffers ready to be handed to the reader + token chan struct{} // Tokens which allow a buffer to be taken + exit chan struct{} // Closes when finished + buffers int // Number of buffers + err error // If an error has occurred it is here + cur *buffer // Current buffer being served + exited chan struct{} // Channel is closed been the async reader shuts down + size int // size of buffer to use + closed bool // whether we have closed the underlying stream + mu sync.Mutex // lock for Read/WriteTo/Abandon/Close + ci *fs.ConfigInfo // for reading config +} + +// New returns a reader that will asynchronously read from +// the supplied Reader into a number of buffers each of size BufferSize +// It will start reading from the input at once, maybe even before this +// function has returned. +// The input can be read from the returned reader. +// When done use Close to release the buffers and close the supplied input. +func New(ctx context.Context, rd io.ReadCloser, buffers int) (*AsyncReader, error) { + if buffers <= 0 { + return nil, errors.New("number of buffers too small") + } + if rd == nil { + return nil, errors.New("nil reader supplied") + } + a := &AsyncReader{ + ci: fs.GetConfig(ctx), + } + a.init(rd, buffers) + return a, nil +} + +func (a *AsyncReader) init(rd io.ReadCloser, buffers int) { + a.in = rd + a.ready = make(chan *buffer, buffers) + a.token = make(chan struct{}, buffers) + a.exit = make(chan struct{}, 0) + a.exited = make(chan struct{}, 0) + a.buffers = buffers + a.cur = nil + a.size = softStartInitial + + // Create tokens + for i := 0; i < buffers; i++ { + a.token <- struct{}{} + } + + // Start async reader + go func() { + // Ensure that when we exit this is signalled. + defer close(a.exited) + defer close(a.ready) + for { + select { + case <-a.token: + b := a.getBuffer() + if a.size < BufferSize { + b.buf = b.buf[:a.size] + a.size <<= 1 + } + err := b.read(a.in) + a.ready <- b + if err != nil { + return + } + case <-a.exit: + return + } + } + }() +} + +// bufferPool is a global pool of buffers +var bufferPool *pool.Pool +var bufferPoolOnce sync.Once + +// return the buffer to the pool (clearing it) +func (a *AsyncReader) putBuffer(b *buffer) { + bufferPool.Put(b.buf) + b.buf = nil +} + +// get a buffer from the pool +func (a *AsyncReader) getBuffer() *buffer { + bufferPoolOnce.Do(func() { + // Initialise the buffer pool when used + bufferPool = pool.New(bufferCacheFlushTime, BufferSize, bufferCacheSize, a.ci.UseMmap) + }) + return &buffer{ + buf: bufferPool.Get(), + } +} + +// Read will return the next available data. +func (a *AsyncReader) fill() (err error) { + if a.cur.isEmpty() { + if a.cur != nil { + a.putBuffer(a.cur) + a.token <- struct{}{} + a.cur = nil + } + b, ok := <-a.ready + if !ok { + // Return an error to show fill failed + if a.err == nil { + return ErrorStreamAbandoned + } + return a.err + } + a.cur = b + } + return nil +} + +// Read will return the next available data. +func (a *AsyncReader) Read(p []byte) (n int, err error) { + a.mu.Lock() + defer a.mu.Unlock() + + // Swap buffer and maybe return error + err = a.fill() + if err != nil { + return 0, err + } + + // Copy what we can + n = copy(p, a.cur.buffer()) + a.cur.increment(n) + + // If at end of buffer, return any error, if present + if a.cur.isEmpty() { + a.err = a.cur.err + return n, a.err + } + return n, nil +} + +// WriteTo writes data to w until there's no more data to write or when an error occurs. +// The return value n is the number of bytes written. +// Any error encountered during the write is also returned. +func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) { + a.mu.Lock() + defer a.mu.Unlock() + + n = 0 + for { + err = a.fill() + if err == io.EOF { + return n, nil + } + if err != nil { + return n, err + } + n2, err := w.Write(a.cur.buffer()) + a.cur.increment(n2) + n += int64(n2) + if err != nil { + return n, err + } + if a.cur.err == io.EOF { + a.err = a.cur.err + return n, err + } + if a.cur.err != nil { + a.err = a.cur.err + return n, a.cur.err + } + } +} + +// SkipBytes will try to seek 'skip' bytes relative to the current position. +// On success it returns true. If 'skip' is outside the current buffer data or +// an error occurs, Abandon is called and false is returned. +func (a *AsyncReader) SkipBytes(skip int) (ok bool) { + a.mu.Lock() + defer func() { + a.mu.Unlock() + if !ok { + a.Abandon() + } + }() + + if a.err != nil { + return false + } + if skip < 0 { + // seek backwards if skip is inside current buffer + if a.cur != nil && a.cur.offset+skip >= 0 { + a.cur.offset += skip + return true + } + return false + } + // early return if skip is past the maximum buffer capacity + if skip >= (len(a.ready)+1)*BufferSize { + return false + } + + refillTokens := 0 + for { + if a.cur.isEmpty() { + if a.cur != nil { + a.putBuffer(a.cur) + refillTokens++ + a.cur = nil + } + select { + case b, ok := <-a.ready: + if !ok { + return false + } + a.cur = b + default: + return false + } + } + + n := len(a.cur.buffer()) + if n > skip { + n = skip + } + a.cur.increment(n) + skip -= n + if skip == 0 { + for ; refillTokens > 0; refillTokens-- { + a.token <- struct{}{} + } + // If at end of buffer, store any error, if present + if a.cur.isEmpty() && a.cur.err != nil { + a.err = a.cur.err + } + return true + } + if a.cur.err != nil { + a.err = a.cur.err + return false + } + } +} + +// StopBuffering will ensure that the underlying async reader is shut +// down so no more is read from the input. +// +// This does not free the memory so Abandon() or Close() need to be +// called on the input. +// +// This does not wait for Read/WriteTo to complete so can be called +// concurrently to those. +func (a *AsyncReader) StopBuffering() { + select { + case <-a.exit: + // Do nothing if reader routine already exited + return + default: + } + // Close and wait for go routine + close(a.exit) + <-a.exited +} + +// Abandon will ensure that the underlying async reader is shut down +// and memory is returned. It does everything but close the input. +// +// It will NOT close the input supplied on New. +func (a *AsyncReader) Abandon() { + a.StopBuffering() + // take the lock to wait for Read/WriteTo to complete + a.mu.Lock() + defer a.mu.Unlock() + // Return any outstanding buffers to the Pool + if a.cur != nil { + a.putBuffer(a.cur) + a.cur = nil + } + for b := range a.ready { + a.putBuffer(b) + } +} + +// Close will ensure that the underlying async reader is shut down. +// It will also close the input supplied on New. +func (a *AsyncReader) Close() (err error) { + a.Abandon() + if a.closed { + return nil + } + a.closed = true + return a.in.Close() +} + +// Internal buffer +// If an error is present, it must be returned +// once all buffer content has been served. +type buffer struct { + buf []byte + err error + offset int +} + +// isEmpty returns true is offset is at end of +// buffer, or +func (b *buffer) isEmpty() bool { + if b == nil { + return true + } + if len(b.buf)-b.offset <= 0 { + return true + } + return false +} + +// read into start of the buffer from the supplied reader, +// resets the offset and updates the size of the buffer. +// Any error encountered during the read is returned. +func (b *buffer) read(rd io.Reader) error { + var n int + n, b.err = readers.ReadFill(rd, b.buf) + b.buf = b.buf[0:n] + b.offset = 0 + return b.err +} + +// Return the buffer at current offset +func (b *buffer) buffer() []byte { + return b.buf[b.offset:] +} + +// increment the offset +func (b *buffer) increment(n int) { + b.offset += n +} diff --git a/vendor/github.com/rclone/rclone/fs/bwtimetable.go b/vendor/github.com/rclone/rclone/fs/bwtimetable.go new file mode 100644 index 00000000000..86a330de5a7 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/bwtimetable.go @@ -0,0 +1,266 @@ +package fs + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" +) + +// BwPair represents an upload and a download bandwidth +type BwPair struct { + Tx SizeSuffix // upload bandwidth + Rx SizeSuffix // download bandwidth +} + +// String returns a printable representation of a BwPair +func (bp *BwPair) String() string { + var out strings.Builder + out.WriteString(bp.Tx.String()) + if bp.Rx != bp.Tx { + out.WriteRune(':') + out.WriteString(bp.Rx.String()) + } + return out.String() +} + +// Set the bandwidth from a string which is either +// SizeSuffix or SizeSuffix:SizeSuffix (for tx:rx bandwidth) +func (bp *BwPair) Set(s string) (err error) { + colon := strings.Index(s, ":") + stx, srx := s, "" + if colon >= 0 { + stx, srx = s[:colon], s[colon+1:] + } + err = bp.Tx.Set(stx) + if err != nil { + return err + } + if colon < 0 { + bp.Rx = bp.Tx + } else { + err = bp.Rx.Set(srx) + if err != nil { + return err + } + } + return nil +} + +// IsSet returns true if either of the bandwidth limits are set +func (bp *BwPair) IsSet() bool { + return bp.Tx > 0 || bp.Rx > 0 +} + +// BwTimeSlot represents a bandwidth configuration at a point in time. +type BwTimeSlot struct { + DayOfTheWeek int + HHMM int + Bandwidth BwPair +} + +// BwTimetable contains all configured time slots. +type BwTimetable []BwTimeSlot + +// String returns a printable representation of BwTimetable. +func (x BwTimetable) String() string { + var out strings.Builder + bwOnly := len(x) == 1 && x[0].DayOfTheWeek == 0 && x[0].HHMM == 0 + for _, ts := range x { + if out.Len() != 0 { + out.WriteRune(' ') + } + if !bwOnly { + _, _ = fmt.Fprintf(&out, "%s-%02d:%02d,", time.Weekday(ts.DayOfTheWeek).String()[:3], ts.HHMM/100, ts.HHMM%100) + } + out.WriteString(ts.Bandwidth.String()) + } + return out.String() +} + +// Basic hour format checking +func validateHour(HHMM string) error { + if len(HHMM) != 5 { + return errors.Errorf("invalid time specification (hh:mm): %q", HHMM) + } + hh, err := strconv.Atoi(HHMM[0:2]) + if err != nil { + return errors.Errorf("invalid hour in time specification %q: %v", HHMM, err) + } + if hh < 0 || hh > 23 { + return errors.Errorf("invalid hour (must be between 00 and 23): %q", hh) + } + mm, err := strconv.Atoi(HHMM[3:]) + if err != nil { + return errors.Errorf("invalid minute in time specification: %q: %v", HHMM, err) + } + if mm < 0 || mm > 59 { + return errors.Errorf("invalid minute (must be between 00 and 59): %q", hh) + } + return nil +} + +// Basic weekday format checking +func parseWeekday(dayOfWeek string) (int, error) { + dayOfWeek = strings.ToLower(dayOfWeek) + if dayOfWeek == "sun" || dayOfWeek == "sunday" { + return 0, nil + } + if dayOfWeek == "mon" || dayOfWeek == "monday" { + return 1, nil + } + if dayOfWeek == "tue" || dayOfWeek == "tuesday" { + return 2, nil + } + if dayOfWeek == "wed" || dayOfWeek == "wednesday" { + return 3, nil + } + if dayOfWeek == "thu" || dayOfWeek == "thursday" { + return 4, nil + } + if dayOfWeek == "fri" || dayOfWeek == "friday" { + return 5, nil + } + if dayOfWeek == "sat" || dayOfWeek == "saturday" { + return 6, nil + } + return 0, errors.Errorf("invalid weekday: %q", dayOfWeek) +} + +// Set the bandwidth timetable. +func (x *BwTimetable) Set(s string) error { + // The timetable is formatted as: + // "dayOfWeek-hh:mm,bandwidth dayOfWeek-hh:mm,bandwidth..." ex: "Mon-10:00,10G Mon-11:30,1G Tue-18:00,off" + // If only a single bandwidth identifier is provided, we assume constant bandwidth. + + if len(s) == 0 { + return errors.New("empty string") + } + // Single value without time specification. + if !strings.Contains(s, " ") && !strings.Contains(s, ",") { + ts := BwTimeSlot{} + if err := ts.Bandwidth.Set(s); err != nil { + return err + } + ts.DayOfTheWeek = 0 + ts.HHMM = 0 + *x = BwTimetable{ts} + return nil + } + + for _, tok := range strings.Split(s, " ") { + tv := strings.Split(tok, ",") + + // Format must be dayOfWeek-HH:MM,BW + if len(tv) != 2 { + return errors.Errorf("invalid time/bandwidth specification: %q", tok) + } + + weekday := 0 + HHMM := "" + if !strings.Contains(tv[0], "-") { + HHMM = tv[0] + if err := validateHour(HHMM); err != nil { + return err + } + for i := 0; i < 7; i++ { + hh, _ := strconv.Atoi(HHMM[0:2]) + mm, _ := strconv.Atoi(HHMM[3:]) + ts := BwTimeSlot{ + DayOfTheWeek: i, + HHMM: (hh * 100) + mm, + } + if err := ts.Bandwidth.Set(tv[1]); err != nil { + return err + } + *x = append(*x, ts) + } + } else { + timespec := strings.Split(tv[0], "-") + if len(timespec) != 2 { + return errors.Errorf("invalid time specification: %q", tv[0]) + } + var err error + weekday, err = parseWeekday(timespec[0]) + if err != nil { + return err + } + HHMM = timespec[1] + if err := validateHour(HHMM); err != nil { + return err + } + + hh, _ := strconv.Atoi(HHMM[0:2]) + mm, _ := strconv.Atoi(HHMM[3:]) + ts := BwTimeSlot{ + DayOfTheWeek: weekday, + HHMM: (hh * 100) + mm, + } + // Bandwidth limit for this time slot. + if err := ts.Bandwidth.Set(tv[1]); err != nil { + return err + } + *x = append(*x, ts) + } + } + return nil +} + +// Difference in minutes between lateDayOfWeekHHMM and earlyDayOfWeekHHMM +func timeDiff(lateDayOfWeekHHMM int, earlyDayOfWeekHHMM int) int { + + lateTimeMinutes := (lateDayOfWeekHHMM / 10000) * 24 * 60 + lateTimeMinutes += ((lateDayOfWeekHHMM / 100) % 100) * 60 + lateTimeMinutes += lateDayOfWeekHHMM % 100 + + earlyTimeMinutes := (earlyDayOfWeekHHMM / 10000) * 24 * 60 + earlyTimeMinutes += ((earlyDayOfWeekHHMM / 100) % 100) * 60 + earlyTimeMinutes += earlyDayOfWeekHHMM % 100 + + return lateTimeMinutes - earlyTimeMinutes +} + +// LimitAt returns a BwTimeSlot for the time requested. +func (x BwTimetable) LimitAt(tt time.Time) BwTimeSlot { + // If the timetable is empty, we return an unlimited BwTimeSlot starting at Sunday midnight. + if len(x) == 0 { + return BwTimeSlot{Bandwidth: BwPair{-1, -1}} + } + + dayOfWeekHHMM := int(tt.Weekday())*10000 + tt.Hour()*100 + tt.Minute() + + // By default, we return the last element in the timetable. This + // satisfies two conditions: 1) If there's only one element it + // will always be selected, and 2) The last element of the table + // will "wrap around" until overridden by an earlier time slot. + // there's only one time slot in the timetable. + ret := x[len(x)-1] + mindif := 0 + first := true + + // Look for most recent time slot. + for _, ts := range x { + // Ignore the past + if dayOfWeekHHMM < (ts.DayOfTheWeek*10000)+ts.HHMM { + continue + } + dif := timeDiff(dayOfWeekHHMM, (ts.DayOfTheWeek*10000)+ts.HHMM) + if first { + mindif = dif + first = false + } + if dif <= mindif { + mindif = dif + ret = ts + } + } + + return ret +} + +// Type of the value +func (x BwTimetable) Type() string { + return "BwTimetable" +} diff --git a/vendor/github.com/rclone/rclone/fs/cache/cache.go b/vendor/github.com/rclone/rclone/fs/cache/cache.go new file mode 100644 index 00000000000..8b8d5eabc97 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/cache/cache.go @@ -0,0 +1,117 @@ +// Package cache implements the Fs cache +package cache + +import ( + "context" + "runtime" + "sync" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/lib/cache" +) + +var ( + c = cache.NewPermanentCache() + mu sync.Mutex // mutex to protect remap + remap = map[string]string{} // map user supplied names to canonical names +) + +// Canonicalize looks up fsString in the mapping from user supplied +// names to canonical names and return the canonical form +func Canonicalize(fsString string) string { + mu.Lock() + canonicalName, ok := remap[fsString] + mu.Unlock() + if !ok { + return fsString + } + fs.Debugf(nil, "fs cache: switching user supplied name %q for canonical name %q", fsString, canonicalName) + return canonicalName +} + +// Put in a mapping from fsString => canonicalName if they are different +func addMapping(fsString, canonicalName string) { + if canonicalName == fsString { + return + } + mu.Lock() + remap[fsString] = canonicalName + mu.Unlock() +} + +// GetFn gets an fs.Fs named fsString either from the cache or creates +// it afresh with the create function +func GetFn(ctx context.Context, fsString string, create func(ctx context.Context, fsString string) (fs.Fs, error)) (f fs.Fs, err error) { + fsString = Canonicalize(fsString) + created := false + value, err := c.Get(fsString, func(fsString string) (f interface{}, ok bool, err error) { + f, err = create(ctx, fsString) + ok = err == nil || err == fs.ErrorIsFile + created = ok + return f, ok, err + }) + if err != nil && err != fs.ErrorIsFile { + return nil, err + } + f = value.(fs.Fs) + // Check we stored the Fs at the canonical name + if created { + canonicalName := fs.ConfigString(f) + if canonicalName != fsString { + // Note that if err == fs.ErrorIsFile at this moment + // then we can't rename the remote as it will have the + // wrong error status, we need to add a new one. + if err == nil { + fs.Debugf(nil, "fs cache: renaming cache item %q to be canonical %q", fsString, canonicalName) + value, found := c.Rename(fsString, canonicalName) + if found { + f = value.(fs.Fs) + } + addMapping(fsString, canonicalName) + } else { + fs.Debugf(nil, "fs cache: adding new entry for parent of %q, %q", fsString, canonicalName) + Put(canonicalName, f) + } + } + } + return f, err +} + +// Pin f into the cache until Unpin is called +func Pin(f fs.Fs) { + c.Pin(fs.ConfigString(f)) +} + +// PinUntilFinalized pins f into the cache until x is garbage collected +// +// This calls runtime.SetFinalizer on x so it shouldn't have a +// finalizer already. +func PinUntilFinalized(f fs.Fs, x interface{}) { + Pin(f) + runtime.SetFinalizer(x, func(_ interface{}) { + Unpin(f) + }) + +} + +// Unpin f from the cache +func Unpin(f fs.Fs) { + c.Pin(fs.ConfigString(f)) +} + +// Get gets an fs.Fs named fsString either from the cache or creates it afresh +func Get(ctx context.Context, fsString string) (f fs.Fs, err error) { + return GetFn(ctx, fsString, fs.NewFs) +} + +// Put puts an fs.Fs named fsString into the cache +func Put(fsString string, f fs.Fs) { + canonicalName := fs.ConfigString(f) + c.Put(canonicalName, f) + addMapping(fsString, canonicalName) +} + +// Clear removes everything from the cache +func Clear() { + c.Clear() +} diff --git a/vendor/github.com/rclone/rclone/fs/config.go b/vendor/github.com/rclone/rclone/fs/config.go new file mode 100644 index 00000000000..22e3682f995 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config.go @@ -0,0 +1,205 @@ +package fs + +import ( + "context" + "net" + "strings" + "time" + + "github.com/pkg/errors" +) + +// Global +var ( + // globalConfig for rclone + globalConfig = NewConfig() + + // Read a value from the config file + // + // This is a function pointer to decouple the config + // implementation from the fs + ConfigFileGet = func(section, key string) (string, bool) { return "", false } + + // Set a value into the config file and persist it + // + // This is a function pointer to decouple the config + // implementation from the fs + ConfigFileSet = func(section, key, value string) (err error) { + return errors.New("no config file set handler") + } + + // CountError counts an error. If any errors have been + // counted then rclone will exit with a non zero error code. + // + // This is a function pointer to decouple the config + // implementation from the fs + CountError = func(err error) error { return err } + + // ConfigProvider is the config key used for provider options + ConfigProvider = "provider" +) + +// ConfigInfo is filesystem config options +type ConfigInfo struct { + LogLevel LogLevel `yaml:"log_level"` + StatsLogLevel LogLevel `yaml:"stats_log_level"` + UseJSONLog bool `yaml:"use_json_log"` + DryRun bool `yaml:"dry_run"` + Interactive bool `yaml:"interactive"` + CheckSum bool `yaml:"check_sum"` + SizeOnly bool `yaml:"size_only"` + IgnoreTimes bool `yaml:"ignore_times"` + IgnoreExisting bool `yaml:"ignore_existing"` + IgnoreErrors bool `yaml:"ignore_errors"` + ModifyWindow time.Duration `yaml:"modify_window"` + Checkers int `yaml:"checkers"` + Transfers int `yaml:"transfers"` + ConnectTimeout time.Duration `yaml:"connect_timeout"` // Connect timeout + Timeout time.Duration `yaml:"timeout"` // Data channel timeout + ExpectContinueTimeout time.Duration `yaml:"expect_continue_timeout"` + Dump DumpFlags `yaml:"dump"` + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` // Skip server certificate verification + DeleteMode DeleteMode `yaml:"delete_mode"` + MaxDelete int64 `yaml:"max_delete"` + TrackRenames bool `yaml:"track_renames"` // Track file renames + TrackRenamesStrategy string `yaml:"track_renames_strategy"` // Comma separated list of strategies used to track renames + LowLevelRetries int `yaml:"low_level_retries"` + UpdateOlder bool `yaml:"update_older"` // Skip files that are newer on the destination + NoGzip bool `yaml:"no_gzip"` // Disable compression + MaxDepth int `yaml:"max_depth"` + IgnoreSize bool `yaml:"ignore_size"` + IgnoreChecksum bool `yaml:"ignore_checksum"` + IgnoreCaseSync bool `yaml:"ignore_case_sync"` + NoTraverse bool `yaml:"no_traverse"` + CheckFirst bool `yaml:"check_first"` + NoCheckDest bool `yaml:"no_check_dest"` + NoUnicodeNormalization bool `yaml:"no_unicode_normalization"` + NoUpdateModTime bool `yaml:"no_update_mod_time"` + DataRateUnit string `yaml:"data_rate_unit"` + CompareDest string `yaml:"compare_dest"` + CopyDest string `yaml:"copy_dest"` + BackupDir string `yaml:"backup_dir"` + Suffix string `yaml:"suffix"` + SuffixKeepExtension bool `yaml:"suffix_keep_extension"` + UseListR bool `yaml:"use_list_r"` + BufferSize SizeSuffix `yaml:"buffer_size"` + BwLimit BwTimetable `yaml:"bw_limit"` + BwLimitFile BwTimetable `yaml:"bw_limit_file"` + TPSLimit float64 `yaml:"tps_limit"` + TPSLimitBurst int `yaml:"tps_limit_burst"` + BindAddr net.IP `yaml:"bind_addr"` + DisableFeatures []string `yaml:"disable_features"` + UserAgent string `yaml:"user_agent"` + Immutable bool `yaml:"immutable"` + AutoConfirm bool `yaml:"auto_confirm"` + StreamingUploadCutoff SizeSuffix `yaml:"streaming_upload_cutoff"` + StatsFileNameLength int `yaml:"stats_file_name_length"` + AskPassword bool `yaml:"ask_password"` + PasswordCommand SpaceSepList `yaml:"password_command"` + UseServerModTime bool `yaml:"use_server_mod_time"` + MaxTransfer SizeSuffix `yaml:"max_transfer"` + MaxDuration time.Duration `yaml:"max_duration"` + CutoffMode CutoffMode `yaml:"cutoff_mode"` + MaxBacklog int `yaml:"max_backlog"` + MaxStatsGroups int `yaml:"max_stats_groups"` + StatsOneLine bool `yaml:"stats_one_line"` + StatsOneLineDate bool `yaml:"stats_one_line_date"` // If we want a date prefix at all + StatsOneLineDateFormat string `yaml:"stats_one_line_date_format"` // If we want to customize the prefix + ErrorOnNoTransfer bool `yaml:"error_on_no_transfer"` // Set appropriate exit code if no files transferred + Progress bool `yaml:"progress"` + ProgressTerminalTitle bool `yaml:"progress_terminal_title"` + Cookie bool `yaml:"cookie"` + UseMmap bool `yaml:"use_mmap"` + CaCert string `yaml:"ca_cert"` // Client Side CA + ClientCert string `yaml:"client_cert"` // Client Side Cert + ClientKey string `yaml:"client_key"` // Client Side Key + MultiThreadCutoff SizeSuffix `yaml:"multi_thread_cutoff"` + MultiThreadStreams int `yaml:"multi_thread_streams"` + MultiThreadSet bool `yaml:"multi_thread_set"` // whether MultiThreadStreams was set (set in fs/config/configflags) + OrderBy string `yaml:"order_by"` // instructions on how to order the transfer + UploadHeaders []*HTTPOption `yaml:"upload_headers"` + DownloadHeaders []*HTTPOption `yaml:"download_headers"` + Headers []*HTTPOption `yaml:"headers"` + RefreshTimes bool `yaml:"refresh_times"` + NoConsole bool `yaml:"no_console"` +} + +// NewConfig creates a new config with everything set to the default +// value. These are the ultimate defaults and are overridden by the +// config module. +func NewConfig() *ConfigInfo { + c := new(ConfigInfo) + + // Set any values which aren't the zero for the type + c.LogLevel = LogLevelNotice + c.StatsLogLevel = LogLevelInfo + c.ModifyWindow = time.Nanosecond + c.Checkers = 8 + c.Transfers = 4 + c.ConnectTimeout = 60 * time.Second + c.Timeout = 5 * 60 * time.Second + c.ExpectContinueTimeout = 1 * time.Second + c.DeleteMode = DeleteModeDefault + c.MaxDelete = -1 + c.LowLevelRetries = 10 + c.MaxDepth = -1 + c.DataRateUnit = "bytes" + c.BufferSize = SizeSuffix(16 << 20) + c.UserAgent = "rclone/" + Version + c.StreamingUploadCutoff = SizeSuffix(100 * 1024) + c.MaxStatsGroups = 1000 + c.StatsFileNameLength = 45 + c.AskPassword = true + c.TPSLimitBurst = 1 + c.MaxTransfer = -1 + c.MaxBacklog = 10000 + // We do not want to set the default here. We use this variable being empty as part of the fall-through of options. + // c.StatsOneLineDateFormat = "2006/01/02 15:04:05 - " + c.MultiThreadCutoff = SizeSuffix(250 * 1024 * 1024) + c.MultiThreadStreams = 4 + + c.TrackRenamesStrategy = "hash" + + return c +} + +type configContextKeyType struct{} + +// Context key for config +var configContextKey = configContextKeyType{} + +// GetConfig returns the global or context sensitive context +func GetConfig(ctx context.Context) *ConfigInfo { + if ctx == nil { + return globalConfig + } + c := ctx.Value(configContextKey) + if c == nil { + return globalConfig + } + return c.(*ConfigInfo) +} + +// AddConfig returns a mutable config structure based on a shallow +// copy of that found in ctx and returns a new context with that added +// to it. +func AddConfig(ctx context.Context) (context.Context, *ConfigInfo) { + c := GetConfig(ctx) + cCopy := new(ConfigInfo) + *cCopy = *c + newCtx := context.WithValue(ctx, configContextKey, cCopy) + return newCtx, cCopy +} + +// ConfigToEnv converts a config section and name, e.g. ("myremote", +// "ignore-size") into an environment name +// "RCLONE_CONFIG_MYREMOTE_IGNORE_SIZE" +func ConfigToEnv(section, name string) string { + return "RCLONE_CONFIG_" + strings.ToUpper(strings.Replace(section+"_"+name, "-", "_", -1)) +} + +// OptionToEnv converts an option name, e.g. "ignore-size" into an +// environment name "RCLONE_IGNORE_SIZE" +func OptionToEnv(name string) string { + return "RCLONE_" + strings.ToUpper(strings.Replace(name, "-", "_", -1)) +} diff --git a/vendor/github.com/rclone/rclone/fs/config/config.go b/vendor/github.com/rclone/rclone/fs/config/config.go new file mode 100644 index 00000000000..5e70ea7f773 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config/config.go @@ -0,0 +1,1536 @@ +// Package config reads, writes and edits the config file and deals with command line flags +package config + +import ( + "bufio" + "bytes" + "context" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + mathrand "math/rand" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/Unknwon/goconfig" + "github.com/mitchellh/go-homedir" + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/fs/driveletter" + "github.com/rclone/rclone/fs/fspath" + "github.com/rclone/rclone/fs/rc" + "github.com/rclone/rclone/lib/random" + "github.com/rclone/rclone/lib/terminal" + "golang.org/x/crypto/nacl/secretbox" + "golang.org/x/text/unicode/norm" +) + +const ( + configFileName = "rclone.conf" + hiddenConfigFileName = "." + configFileName + + // ConfigToken is the key used to store the token under + ConfigToken = "token" + + // ConfigClientID is the config key used to store the client id + ConfigClientID = "client_id" + + // ConfigClientSecret is the config key used to store the client secret + ConfigClientSecret = "client_secret" + + // ConfigAuthURL is the config key used to store the auth server endpoint + ConfigAuthURL = "auth_url" + + // ConfigTokenURL is the config key used to store the token server endpoint + ConfigTokenURL = "token_url" + + // ConfigEncoding is the config key to change the encoding for a backend + ConfigEncoding = "encoding" + + // ConfigEncodingHelp is the help for ConfigEncoding + ConfigEncodingHelp = "This sets the encoding for the backend.\n\nSee: the [encoding section in the overview](/overview/#encoding) for more info." + + // ConfigAuthorize indicates that we just want "rclone authorize" + ConfigAuthorize = "config_authorize" + + // ConfigAuthNoBrowser indicates that we do not want to open browser + ConfigAuthNoBrowser = "config_auth_no_browser" +) + +// Global +var ( + // configFile is the global config data structure. Don't read it directly, use getConfigData() + configFile *goconfig.ConfigFile + + // ConfigPath points to the config file + ConfigPath = makeConfigPath() + + // CacheDir points to the cache directory. Users of this + // should make a subdirectory and use MkdirAll() to create it + // and any parents. + CacheDir = makeCacheDir() + + // Key to use for password en/decryption. + // When nil, no encryption will be used for saving. + configKey []byte + + // output of prompt for password + PasswordPromptOutput = os.Stderr + + // If set to true, the configKey is obscured with obscure.Obscure and saved to a temp file when it is + // calculated from the password. The path of that temp file is then written to the environment variable + // `_RCLONE_CONFIG_KEY_FILE`. If `_RCLONE_CONFIG_KEY_FILE` is present, password prompt is skipped and `RCLONE_CONFIG_PASS` ignored. + // For security reasons, the temp file is deleted once the configKey is successfully loaded. + // This can be used to pass the configKey to a child process. + PassConfigKeyForDaemonization = false + + // Password can be used to configure the random password generator + Password = random.Password +) + +func init() { + // Set the function pointers up in fs + fs.ConfigFileGet = FileGetFlag + fs.ConfigFileSet = SetValueAndSave +} + +func getConfigData() *goconfig.ConfigFile { + if configFile == nil { + LoadConfig(context.Background()) + } + return configFile +} + +// Return the path to the configuration file +func makeConfigPath() string { + + // Use rclone.conf from rclone executable directory if already existing + exe, err := os.Executable() + if err == nil { + exedir := filepath.Dir(exe) + cfgpath := filepath.Join(exedir, configFileName) + _, err := os.Stat(cfgpath) + if err == nil { + return cfgpath + } + } + + // Find user's home directory + homeDir, err := homedir.Dir() + + // Find user's configuration directory. + // Prefer XDG config path, with fallback to $HOME/.config. + // See XDG Base Directory specification + // https://specifications.freedesktop.org/basedir-spec/latest/), + xdgdir := os.Getenv("XDG_CONFIG_HOME") + var cfgdir string + if xdgdir != "" { + // User's configuration directory for rclone is $XDG_CONFIG_HOME/rclone + cfgdir = filepath.Join(xdgdir, "rclone") + } else if homeDir != "" { + // User's configuration directory for rclone is $HOME/.config/rclone + cfgdir = filepath.Join(homeDir, ".config", "rclone") + } + + // Use rclone.conf from user's configuration directory if already existing + var cfgpath string + if cfgdir != "" { + cfgpath = filepath.Join(cfgdir, configFileName) + _, err := os.Stat(cfgpath) + if err == nil { + return cfgpath + } + } + + // Use .rclone.conf from user's home directory if already existing + var homeconf string + if homeDir != "" { + homeconf = filepath.Join(homeDir, hiddenConfigFileName) + _, err := os.Stat(homeconf) + if err == nil { + return homeconf + } + } + + // Check to see if user supplied a --config variable or environment + // variable. We can't use pflag for this because it isn't initialised + // yet so we search the command line manually. + _, configSupplied := os.LookupEnv("RCLONE_CONFIG") + if !configSupplied { + for _, item := range os.Args { + if item == "--config" || strings.HasPrefix(item, "--config=") { + configSupplied = true + break + } + } + } + + // If user's configuration directory was found, then try to create it + // and assume rclone.conf can be written there. If user supplied config + // then skip creating the directory since it will not be used. + if cfgpath != "" { + // cfgpath != "" implies cfgdir != "" + if configSupplied { + return cfgpath + } + err := os.MkdirAll(cfgdir, os.ModePerm) + if err == nil { + return cfgpath + } + } + + // Assume .rclone.conf can be written to user's home directory. + if homeconf != "" { + return homeconf + } + + // Default to ./.rclone.conf (current working directory) if everything else fails. + if !configSupplied { + fs.Errorf(nil, "Couldn't find home directory or read HOME or XDG_CONFIG_HOME environment variables.") + fs.Errorf(nil, "Defaulting to storing config in current directory.") + fs.Errorf(nil, "Use --config flag to workaround.") + fs.Errorf(nil, "Error was: %v", err) + } + return hiddenConfigFileName +} + +// LoadConfig loads the config file +func LoadConfig(ctx context.Context) { + panic("loading config from file") +} + +var errorConfigFileNotFound = errors.New("config file not found") + +// loadConfigFile will load a config file, and +// automatically decrypt it. +func loadConfigFile() (*goconfig.ConfigFile, error) { + ctx := context.Background() + ci := fs.GetConfig(ctx) + var usingPasswordCommand bool + + b, err := ioutil.ReadFile(ConfigPath) + if err != nil { + if os.IsNotExist(err) { + return nil, errorConfigFileNotFound + } + return nil, err + } + // Find first non-empty line + r := bufio.NewReader(bytes.NewBuffer(b)) + for { + line, _, err := r.ReadLine() + if err != nil { + if err == io.EOF { + return goconfig.LoadFromReader(bytes.NewBuffer(b)) + } + return nil, err + } + l := strings.TrimSpace(string(line)) + if len(l) == 0 || strings.HasPrefix(l, ";") || strings.HasPrefix(l, "#") { + continue + } + // First non-empty or non-comment must be ENCRYPT_V0 + if l == "RCLONE_ENCRYPT_V0:" { + break + } + if strings.HasPrefix(l, "RCLONE_ENCRYPT_V") { + return nil, errors.New("unsupported configuration encryption - update rclone for support") + } + return goconfig.LoadFromReader(bytes.NewBuffer(b)) + } + + if len(configKey) == 0 { + if len(ci.PasswordCommand) != 0 { + var stdout bytes.Buffer + var stderr bytes.Buffer + + cmd := exec.Command(ci.PasswordCommand[0], ci.PasswordCommand[1:]...) + + cmd.Stdout = &stdout + cmd.Stderr = &stderr + cmd.Stdin = os.Stdin + + if err := cmd.Run(); err != nil { + // One does not always get the stderr returned in the wrapped error. + fs.Errorf(nil, "Using --password-command returned: %v", err) + if ers := strings.TrimSpace(stderr.String()); ers != "" { + fs.Errorf(nil, "--password-command stderr: %s", ers) + } + return nil, errors.Wrap(err, "password command failed") + } + if pass := strings.Trim(stdout.String(), "\r\n"); pass != "" { + err := setConfigPassword(pass) + if err != nil { + return nil, errors.Wrap(err, "incorrect password") + } + } else { + return nil, errors.New("password-command returned empty string") + } + + if len(configKey) == 0 { + return nil, errors.New("unable to decrypt configuration: incorrect password") + } + usingPasswordCommand = true + } else { + usingPasswordCommand = false + + envpw := os.Getenv("RCLONE_CONFIG_PASS") + + if envpw != "" { + err := setConfigPassword(envpw) + if err != nil { + fs.Errorf(nil, "Using RCLONE_CONFIG_PASS returned: %v", err) + } else { + fs.Debugf(nil, "Using RCLONE_CONFIG_PASS password.") + } + } + } + } + + // Encrypted content is base64 encoded. + dec := base64.NewDecoder(base64.StdEncoding, r) + box, err := ioutil.ReadAll(dec) + if err != nil { + return nil, errors.Wrap(err, "failed to load base64 encoded data") + } + if len(box) < 24+secretbox.Overhead { + return nil, errors.New("Configuration data too short") + } + + var out []byte + for { + if envKeyFile := os.Getenv("_RCLONE_CONFIG_KEY_FILE"); len(envKeyFile) > 0 { + fs.Debugf(nil, "attempting to obtain configKey from temp file %s", envKeyFile) + obscuredKey, err := ioutil.ReadFile(envKeyFile) + if err != nil { + errRemove := os.Remove(envKeyFile) + if errRemove != nil { + log.Fatalf("unable to read obscured config key and unable to delete the temp file: %v", err) + } + log.Fatalf("unable to read obscured config key: %v", err) + } + errRemove := os.Remove(envKeyFile) + if errRemove != nil { + log.Fatalf("unable to delete temp file with configKey: %v", err) + } + configKey = []byte(obscure.MustReveal(string(obscuredKey))) + fs.Debugf(nil, "using _RCLONE_CONFIG_KEY_FILE for configKey") + } else { + if len(configKey) == 0 { + if usingPasswordCommand { + return nil, errors.New("using --password-command derived password, unable to decrypt configuration") + } + if !ci.AskPassword { + return nil, errors.New("unable to decrypt configuration and not allowed to ask for password - set RCLONE_CONFIG_PASS to your configuration password") + } + getConfigPassword("Enter configuration password:") + } + } + + // Nonce is first 24 bytes of the ciphertext + var nonce [24]byte + copy(nonce[:], box[:24]) + var key [32]byte + copy(key[:], configKey[:32]) + + // Attempt to decrypt + var ok bool + out, ok = secretbox.Open(nil, box[24:], &nonce, &key) + if ok { + break + } + + // Retry + fs.Errorf(nil, "Couldn't decrypt configuration, most likely wrong password.") + configKey = nil + } + return goconfig.LoadFromReader(bytes.NewBuffer(out)) +} + +// checkPassword normalises and validates the password +func checkPassword(password string) (string, error) { + if !utf8.ValidString(password) { + return "", errors.New("password contains invalid utf8 characters") + } + // Check for leading/trailing whitespace + trimmedPassword := strings.TrimSpace(password) + // Warn user if password has leading+trailing whitespace + if len(password) != len(trimmedPassword) { + _, _ = fmt.Fprintln(os.Stderr, "Your password contains leading/trailing whitespace - in previous versions of rclone this was stripped") + } + // Normalize to reduce weird variations. + password = norm.NFKC.String(password) + if len(password) == 0 || len(trimmedPassword) == 0 { + return "", errors.New("no characters in password") + } + return password, nil +} + +// GetPassword asks the user for a password with the prompt given. +func GetPassword(prompt string) string { + _, _ = fmt.Fprintln(PasswordPromptOutput, prompt) + for { + _, _ = fmt.Fprint(PasswordPromptOutput, "password:") + password := ReadPassword() + password, err := checkPassword(password) + if err == nil { + return password + } + _, _ = fmt.Fprintf(os.Stderr, "Bad password: %v\n", err) + } +} + +// ChangePassword will query the user twice for the named password. If +// the same password is entered it is returned. +func ChangePassword(name string) string { + for { + a := GetPassword(fmt.Sprintf("Enter %s password:", name)) + b := GetPassword(fmt.Sprintf("Confirm %s password:", name)) + if a == b { + return a + } + fmt.Println("Passwords do not match!") + } +} + +// getConfigPassword will query the user for a password the +// first time it is required. +func getConfigPassword(q string) { + if len(configKey) != 0 { + return + } + for { + password := GetPassword(q) + err := setConfigPassword(password) + if err == nil { + return + } + _, _ = fmt.Fprintln(os.Stderr, "Error:", err) + } +} + +// setConfigPassword will set the configKey to the hash of +// the password. If the length of the password is +// zero after trimming+normalization, an error is returned. +func setConfigPassword(password string) error { + password, err := checkPassword(password) + if err != nil { + return err + } + // Create SHA256 has of the password + sha := sha256.New() + _, err = sha.Write([]byte("[" + password + "][rclone-config]")) + if err != nil { + return err + } + configKey = sha.Sum(nil) + if PassConfigKeyForDaemonization { + tempFile, err := ioutil.TempFile("", "rclone") + if err != nil { + log.Fatalf("cannot create temp file to store configKey: %v", err) + } + _, err = tempFile.WriteString(obscure.MustObscure(string(configKey))) + if err != nil { + errRemove := os.Remove(tempFile.Name()) + if errRemove != nil { + log.Fatalf("error writing configKey to temp file and also error deleting it: %v", err) + } + log.Fatalf("error writing configKey to temp file: %v", err) + } + err = tempFile.Close() + if err != nil { + errRemove := os.Remove(tempFile.Name()) + if errRemove != nil { + log.Fatalf("error closing temp file with configKey and also error deleting it: %v", err) + } + log.Fatalf("error closing temp file with configKey: %v", err) + } + fs.Debugf(nil, "saving configKey to temp file") + err = os.Setenv("_RCLONE_CONFIG_KEY_FILE", tempFile.Name()) + if err != nil { + errRemove := os.Remove(tempFile.Name()) + if errRemove != nil { + log.Fatalf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE and unable to delete the temp file: %v", err) + } + log.Fatalf("unable to set environment variable _RCLONE_CONFIG_KEY_FILE: %v", err) + } + } + return nil +} + +// changeConfigPassword will query the user twice +// for a password. If the same password is entered +// twice the key is updated. +func changeConfigPassword() { + err := setConfigPassword(ChangePassword("NEW configuration")) + if err != nil { + fmt.Printf("Failed to set config password: %v\n", err) + return + } +} + +// saveConfig saves configuration file. +// if configKey has been set, the file will be encrypted. +func saveConfig() error { + dir, name := filepath.Split(ConfigPath) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return errors.Wrap(err, "failed to create config directory") + } + f, err := ioutil.TempFile(dir, name) + if err != nil { + return errors.Errorf("Failed to create temp file for new config: %v", err) + } + defer func() { + if err := os.Remove(f.Name()); err != nil && !os.IsNotExist(err) { + fs.Errorf(nil, "Failed to remove temp config file: %v", err) + } + }() + + var buf bytes.Buffer + err = goconfig.SaveConfigData(getConfigData(), &buf) + if err != nil { + return errors.Errorf("Failed to save config file: %v", err) + } + + if len(configKey) == 0 { + if _, err := buf.WriteTo(f); err != nil { + return errors.Errorf("Failed to write temp config file: %v", err) + } + } else { + _, _ = fmt.Fprintln(f, "# Encrypted rclone configuration File") + _, _ = fmt.Fprintln(f, "") + _, _ = fmt.Fprintln(f, "RCLONE_ENCRYPT_V0:") + + // Generate new nonce and write it to the start of the ciphertext + var nonce [24]byte + n, _ := rand.Read(nonce[:]) + if n != 24 { + return errors.Errorf("nonce short read: %d", n) + } + enc := base64.NewEncoder(base64.StdEncoding, f) + _, err = enc.Write(nonce[:]) + if err != nil { + return errors.Errorf("Failed to write temp config file: %v", err) + } + + var key [32]byte + copy(key[:], configKey[:32]) + + b := secretbox.Seal(nil, buf.Bytes(), &nonce, &key) + _, err = enc.Write(b) + if err != nil { + return errors.Errorf("Failed to write temp config file: %v", err) + } + _ = enc.Close() + } + + _ = f.Sync() + err = f.Close() + if err != nil { + return errors.Errorf("Failed to close config file: %v", err) + } + + var fileMode os.FileMode = 0600 + info, err := os.Stat(ConfigPath) + if err != nil { + fs.Debugf(nil, "Using default permissions for config file: %v", fileMode) + } else if info.Mode() != fileMode { + fs.Debugf(nil, "Keeping previous permissions for config file: %v", info.Mode()) + fileMode = info.Mode() + } + + attemptCopyGroup(ConfigPath, f.Name()) + + err = os.Chmod(f.Name(), fileMode) + if err != nil { + fs.Errorf(nil, "Failed to set permissions on config file: %v", err) + } + + if err = os.Rename(ConfigPath, ConfigPath+".old"); err != nil && !os.IsNotExist(err) { + return errors.Errorf("Failed to move previous config to backup location: %v", err) + } + if err = os.Rename(f.Name(), ConfigPath); err != nil { + return errors.Errorf("Failed to move newly written config from %s to final location: %v", f.Name(), err) + } + if err := os.Remove(ConfigPath + ".old"); err != nil && !os.IsNotExist(err) { + fs.Errorf(nil, "Failed to remove backup config file: %v", err) + } + return nil +} + +// SaveConfig calling function which saves configuration file. +// if saveConfig returns error trying again after sleep. +func SaveConfig() { + ctx := context.Background() + ci := fs.GetConfig(ctx) + var err error + for i := 0; i < ci.LowLevelRetries+1; i++ { + if err = saveConfig(); err == nil { + return + } + waitingTimeMs := mathrand.Intn(1000) + time.Sleep(time.Duration(waitingTimeMs) * time.Millisecond) + } + log.Fatalf("Failed to save config after %d tries: %v", ci.LowLevelRetries, err) + + return +} + +// SetValueAndSave sets the key to the value and saves just that +// value in the config file. It loads the old config file in from +// disk first and overwrites the given value only. +func SetValueAndSave(name, key, value string) (err error) { + // Set the value in config in case we fail to reload it + getConfigData().SetValue(name, key, value) + // Reload the config file + reloadedConfigFile, err := loadConfigFile() + if err == errorConfigFileNotFound { + // Config file not written yet so ignore reload + return nil + } else if err != nil { + return err + } + _, err = reloadedConfigFile.GetSection(name) + if err != nil { + // Section doesn't exist yet so ignore reload + return nil + } + // Update the config file with the reloaded version + configFile = reloadedConfigFile + // Set the value in the reloaded version + reloadedConfigFile.SetValue(name, key, value) + // Save it again + SaveConfig() + return nil +} + +// FileGetFresh reads the config key under section return the value or +// an error if the config file was not found or that value couldn't be +// read. +func FileGetFresh(section, key string) (value string, err error) { + reloadedConfigFile, err := loadConfigFile() + if err != nil { + return "", err + } + return reloadedConfigFile.GetValue(section, key) +} + +// ShowRemotes shows an overview of the config file +func ShowRemotes() { + remotes := getConfigData().GetSectionList() + if len(remotes) == 0 { + return + } + sort.Strings(remotes) + fmt.Printf("%-20s %s\n", "Name", "Type") + fmt.Printf("%-20s %s\n", "====", "====") + for _, remote := range remotes { + fmt.Printf("%-20s %s\n", remote, FileGet(remote, "type")) + } +} + +// ChooseRemote chooses a remote name +func ChooseRemote() string { + remotes := getConfigData().GetSectionList() + sort.Strings(remotes) + return Choose("remote", remotes, nil, false) +} + +// ReadLine reads some input +var ReadLine = func() string { + buf := bufio.NewReader(os.Stdin) + line, err := buf.ReadString('\n') + if err != nil { + log.Fatalf("Failed to read line: %v", err) + } + return strings.TrimSpace(line) +} + +// ReadNonEmptyLine prints prompt and calls Readline until non empty +func ReadNonEmptyLine(prompt string) string { + result := "" + for result == "" { + fmt.Print(prompt) + result = strings.TrimSpace(ReadLine()) + } + return result +} + +// CommandDefault - choose one. If return is pressed then it will +// chose the defaultIndex if it is >= 0 +func CommandDefault(commands []string, defaultIndex int) byte { + opts := []string{} + for i, text := range commands { + def := "" + if i == defaultIndex { + def = " (default)" + } + fmt.Printf("%c) %s%s\n", text[0], text[1:], def) + opts = append(opts, text[:1]) + } + optString := strings.Join(opts, "") + optHelp := strings.Join(opts, "/") + for { + fmt.Printf("%s> ", optHelp) + result := strings.ToLower(ReadLine()) + if len(result) == 0 && defaultIndex >= 0 { + return optString[defaultIndex] + } + if len(result) != 1 { + continue + } + i := strings.Index(optString, string(result[0])) + if i >= 0 { + return result[0] + } + } +} + +// Command - choose one +func Command(commands []string) byte { + return CommandDefault(commands, -1) +} + +// Confirm asks the user for Yes or No and returns true or false +// +// If the user presses enter then the Default will be used +func Confirm(Default bool) bool { + defaultIndex := 0 + if !Default { + defaultIndex = 1 + } + return CommandDefault([]string{"yYes", "nNo"}, defaultIndex) == 'y' +} + +// ConfirmWithConfig asks the user for Yes or No and returns true or +// false. +// +// If AutoConfirm is set, it will look up the value in m and return +// that, but if it isn't set then it will return the Default value +// passed in +func ConfirmWithConfig(ctx context.Context, m configmap.Getter, configName string, Default bool) bool { + ci := fs.GetConfig(ctx) + if ci.AutoConfirm { + configString, ok := m.Get(configName) + if ok { + configValue, err := strconv.ParseBool(configString) + if err != nil { + fs.Errorf(nil, "Failed to parse config parameter %s=%q as boolean - using default %v: %v", configName, configString, Default, err) + } else { + Default = configValue + } + } + answer := "No" + if Default { + answer = "Yes" + } + fmt.Printf("Auto confirm is set: answering %s, override by setting config parameter %s=%v\n", answer, configName, !Default) + return Default + } + return Confirm(Default) +} + +// Choose one of the defaults or type a new string if newOk is set +func Choose(what string, defaults, help []string, newOk bool) string { + valueDescription := "an existing" + if newOk { + valueDescription = "your own" + } + fmt.Printf("Choose a number from below, or type in %s value\n", valueDescription) + attributes := []string{terminal.HiRedFg, terminal.HiGreenFg} + for i, text := range defaults { + var lines []string + if help != nil { + parts := strings.Split(help[i], "\n") + lines = append(lines, parts...) + } + lines = append(lines, fmt.Sprintf("%q", text)) + pos := i + 1 + terminal.WriteString(attributes[i%len(attributes)]) + if len(lines) == 1 { + fmt.Printf("%2d > %s\n", pos, text) + } else { + mid := (len(lines) - 1) / 2 + for i, line := range lines { + var sep rune + switch i { + case 0: + sep = '/' + case len(lines) - 1: + sep = '\\' + default: + sep = '|' + } + number := " " + if i == mid { + number = fmt.Sprintf("%2d", pos) + } + fmt.Printf("%s %c %s\n", number, sep, line) + } + } + terminal.WriteString(terminal.Reset) + } + for { + fmt.Printf("%s> ", what) + result := ReadLine() + i, err := strconv.Atoi(result) + if err != nil { + if newOk { + return result + } + for _, v := range defaults { + if result == v { + return result + } + } + continue + } + if i >= 1 && i <= len(defaults) { + return defaults[i-1] + } + } +} + +// ChooseNumber asks the user to enter a number between min and max +// inclusive prompting them with what. +func ChooseNumber(what string, min, max int) int { + for { + fmt.Printf("%s> ", what) + result := ReadLine() + i, err := strconv.Atoi(result) + if err != nil { + fmt.Printf("Bad number: %v\n", err) + continue + } + if i < min || i > max { + fmt.Printf("Out of range - %d to %d inclusive\n", min, max) + continue + } + return i + } +} + +// ShowRemote shows the contents of the remote +func ShowRemote(name string) { + fmt.Printf("--------------------\n") + fmt.Printf("[%s]\n", name) + fs := MustFindByName(name) + for _, key := range getConfigData().GetKeyList(name) { + isPassword := false + for _, option := range fs.Options { + if option.Name == key && option.IsPassword { + isPassword = true + break + } + } + value := FileGet(name, key) + if isPassword && value != "" { + fmt.Printf("%s = *** ENCRYPTED ***\n", key) + } else { + fmt.Printf("%s = %s\n", key, value) + } + } + fmt.Printf("--------------------\n") +} + +// OkRemote prints the contents of the remote and ask if it is OK +func OkRemote(name string) bool { + ShowRemote(name) + switch i := CommandDefault([]string{"yYes this is OK", "eEdit this remote", "dDelete this remote"}, 0); i { + case 'y': + return true + case 'e': + return false + case 'd': + getConfigData().DeleteSection(name) + return true + default: + fs.Errorf(nil, "Bad choice %c", i) + } + return false +} + +// MustFindByName finds the RegInfo for the remote name passed in or +// exits with a fatal error. +func MustFindByName(name string) *fs.RegInfo { + fsType := FileGet(name, "type") + if fsType == "" { + log.Fatalf("Couldn't find type of fs for %q", name) + } + return fs.MustFind(fsType) +} + +// RemoteConfig runs the config helper for the remote if needed +func RemoteConfig(ctx context.Context, name string) { + fmt.Printf("Remote config\n") + f := MustFindByName(name) + if f.Config != nil { + m := fs.ConfigMap(f, name) + f.Config(ctx, name, m) + } +} + +// matchProvider returns true if provider matches the providerConfig string. +// +// The providerConfig string can either be a list of providers to +// match, or if it starts with "!" it will be a list of providers not +// to match. +// +// If either providerConfig or provider is blank then it will return true +func matchProvider(providerConfig, provider string) bool { + if providerConfig == "" || provider == "" { + return true + } + negate := false + if strings.HasPrefix(providerConfig, "!") { + providerConfig = providerConfig[1:] + negate = true + } + providers := strings.Split(providerConfig, ",") + matched := false + for _, p := range providers { + if p == provider { + matched = true + break + } + } + if negate { + return !matched + } + return matched +} + +// ChooseOption asks the user to choose an option +func ChooseOption(o *fs.Option, name string) string { + var subProvider = getConfigData().MustValue(name, fs.ConfigProvider, "") + fmt.Println(o.Help) + if o.IsPassword { + actions := []string{"yYes type in my own password", "gGenerate random password"} + defaultAction := -1 + if !o.Required { + defaultAction = len(actions) + actions = append(actions, "nNo leave this optional password blank") + } + var password string + var err error + switch i := CommandDefault(actions, defaultAction); i { + case 'y': + password = ChangePassword("the") + case 'g': + for { + fmt.Printf("Password strength in bits.\n64 is just about memorable\n128 is secure\n1024 is the maximum\n") + bits := ChooseNumber("Bits", 64, 1024) + password, err = Password(bits) + if err != nil { + log.Fatalf("Failed to make password: %v", err) + } + fmt.Printf("Your password is: %s\n", password) + fmt.Printf("Use this password? Please note that an obscured version of this \npassword (and not the " + + "password itself) will be stored under your \nconfiguration file, so keep this generated password " + + "in a safe place.\n") + if Confirm(true) { + break + } + } + case 'n': + return "" + default: + fs.Errorf(nil, "Bad choice %c", i) + } + return obscure.MustObscure(password) + } + what := fmt.Sprintf("%T value", o.Default) + switch o.Default.(type) { + case bool: + what = "boolean value (true or false)" + case fs.SizeSuffix: + what = "size with suffix k,M,G,T" + case fs.Duration: + what = "duration s,m,h,d,w,M,y" + case int, int8, int16, int32, int64: + what = "signed integer" + case uint, byte, uint16, uint32, uint64: + what = "unsigned integer" + } + var in string + for { + fmt.Printf("Enter a %s. Press Enter for the default (%q).\n", what, fmt.Sprint(o.Default)) + if len(o.Examples) > 0 { + var values []string + var help []string + for _, example := range o.Examples { + if matchProvider(example.Provider, subProvider) { + values = append(values, example.Value) + help = append(help, example.Help) + } + } + in = Choose(o.Name, values, help, true) + } else { + fmt.Printf("%s> ", o.Name) + in = ReadLine() + } + if in == "" { + if o.Required && fmt.Sprint(o.Default) == "" { + fmt.Printf("This value is required and it has no default.\n") + continue + } + break + } + newIn, err := configstruct.StringToInterface(o.Default, in) + if err != nil { + fmt.Printf("Failed to parse %q: %v\n", in, err) + continue + } + in = fmt.Sprint(newIn) // canonicalise + break + } + return in +} + +// Suppress the confirm prompts by altering the context config +func suppressConfirm(ctx context.Context) context.Context { + newCtx, ci := fs.AddConfig(ctx) + ci.AutoConfirm = true + return newCtx +} + +// UpdateRemote adds the keyValues passed in to the remote of name. +// keyValues should be key, value pairs. +func UpdateRemote(ctx context.Context, name string, keyValues rc.Params, doObscure, noObscure bool) error { + if doObscure && noObscure { + return errors.New("can't use --obscure and --no-obscure together") + } + err := fspath.CheckConfigName(name) + if err != nil { + return err + } + ctx = suppressConfirm(ctx) + + // Work out which options need to be obscured + needsObscure := map[string]struct{}{} + if !noObscure { + if fsType := FileGet(name, "type"); fsType != "" { + if ri, err := fs.Find(fsType); err != nil { + fs.Debugf(nil, "Couldn't find fs for type %q", fsType) + } else { + for _, opt := range ri.Options { + if opt.IsPassword { + needsObscure[opt.Name] = struct{}{} + } + } + } + } else { + fs.Debugf(nil, "UpdateRemote: Couldn't find fs type") + } + } + + // Set the config + for k, v := range keyValues { + vStr := fmt.Sprint(v) + // Obscure parameter if necessary + if _, ok := needsObscure[k]; ok { + _, err := obscure.Reveal(vStr) + if err != nil || doObscure { + // If error => not already obscured, so obscure it + // or we are forced to obscure + vStr, err = obscure.Obscure(vStr) + if err != nil { + return errors.Wrap(err, "UpdateRemote: obscure failed") + } + } + } + getConfigData().SetValue(name, k, vStr) + } + RemoteConfig(ctx, name) + SaveConfig() + return nil +} + +// CreateRemote creates a new remote with name, provider and a list of +// parameters which are key, value pairs. If update is set then it +// adds the new keys rather than replacing all of them. +func CreateRemote(ctx context.Context, name string, provider string, keyValues rc.Params, doObscure, noObscure bool) error { + err := fspath.CheckConfigName(name) + if err != nil { + return err + } + // Delete the old config if it exists + getConfigData().DeleteSection(name) + // Set the type + getConfigData().SetValue(name, "type", provider) + // Set the remaining values + return UpdateRemote(ctx, name, keyValues, doObscure, noObscure) +} + +// PasswordRemote adds the keyValues passed in to the remote of name. +// keyValues should be key, value pairs. +func PasswordRemote(ctx context.Context, name string, keyValues rc.Params) error { + ctx = suppressConfirm(ctx) + err := fspath.CheckConfigName(name) + if err != nil { + return err + } + for k, v := range keyValues { + keyValues[k] = obscure.MustObscure(fmt.Sprint(v)) + } + return UpdateRemote(ctx, name, keyValues, false, true) +} + +// JSONListProviders prints all the providers and options in JSON format +func JSONListProviders() error { + b, err := json.MarshalIndent(fs.Registry, "", " ") + if err != nil { + return errors.Wrap(err, "failed to marshal examples") + } + _, err = os.Stdout.Write(b) + if err != nil { + return errors.Wrap(err, "failed to write providers list") + } + return nil +} + +// fsOption returns an Option describing the possible remotes +func fsOption() *fs.Option { + o := &fs.Option{ + Name: "Storage", + Help: "Type of storage to configure.", + Default: "", + } + for _, item := range fs.Registry { + example := fs.OptionExample{ + Value: item.Name, + Help: item.Description, + } + o.Examples = append(o.Examples, example) + } + o.Examples.Sort() + return o +} + +// NewRemoteName asks the user for a name for a new remote +func NewRemoteName() (name string) { + for { + fmt.Printf("name> ") + name = ReadLine() + _, err := getConfigData().GetSection(name) + if err == nil { + fmt.Printf("Remote %q already exists.\n", name) + continue + } + err = fspath.CheckConfigName(name) + switch { + case name == "": + fmt.Printf("Can't use empty name.\n") + case driveletter.IsDriveLetter(name): + fmt.Printf("Can't use %q as it can be confused with a drive letter.\n", name) + case err != nil: + fmt.Printf("Can't use %q as %v.\n", name, err) + default: + return name + } + } +} + +// editOptions edits the options. If new is true then it just allows +// entry and doesn't show any old values. +func editOptions(ri *fs.RegInfo, name string, isNew bool) { + fmt.Printf("** See help for %s backend at: https://rclone.org/%s/ **\n\n", ri.Name, ri.FileName()) + hasAdvanced := false + for _, advanced := range []bool{false, true} { + if advanced { + if !hasAdvanced { + break + } + fmt.Printf("Edit advanced config? (y/n)\n") + if !Confirm(false) { + break + } + } + for _, option := range ri.Options { + isVisible := option.Hide&fs.OptionHideConfigurator == 0 + hasAdvanced = hasAdvanced || (option.Advanced && isVisible) + if option.Advanced != advanced { + continue + } + subProvider := getConfigData().MustValue(name, fs.ConfigProvider, "") + if matchProvider(option.Provider, subProvider) && isVisible { + if !isNew { + fmt.Printf("Value %q = %q\n", option.Name, FileGet(name, option.Name)) + fmt.Printf("Edit? (y/n)>\n") + if !Confirm(false) { + continue + } + } + FileSet(name, option.Name, ChooseOption(&option, name)) + } + } + } +} + +// NewRemote make a new remote from its name +func NewRemote(ctx context.Context, name string) { + var ( + newType string + ri *fs.RegInfo + err error + ) + + // Set the type first + for { + newType = ChooseOption(fsOption(), name) + ri, err = fs.Find(newType) + if err != nil { + fmt.Printf("Bad remote %q: %v\n", newType, err) + continue + } + break + } + getConfigData().SetValue(name, "type", newType) + + editOptions(ri, name, true) + RemoteConfig(ctx, name) + if OkRemote(name) { + SaveConfig() + return + } + EditRemote(ctx, ri, name) +} + +// EditRemote gets the user to edit a remote +func EditRemote(ctx context.Context, ri *fs.RegInfo, name string) { + ShowRemote(name) + fmt.Printf("Edit remote\n") + for { + editOptions(ri, name, false) + if OkRemote(name) { + break + } + } + SaveConfig() + RemoteConfig(ctx, name) +} + +// DeleteRemote gets the user to delete a remote +func DeleteRemote(name string) { + getConfigData().DeleteSection(name) + SaveConfig() +} + +// copyRemote asks the user for a new remote name and copies name into +// it. Returns the new name. +func copyRemote(name string) string { + newName := NewRemoteName() + // Copy the keys + for _, key := range getConfigData().GetKeyList(name) { + value := getConfigData().MustValue(name, key, "") + getConfigData().SetValue(newName, key, value) + } + return newName +} + +// RenameRemote renames a config section +func RenameRemote(name string) { + fmt.Printf("Enter new name for %q remote.\n", name) + newName := copyRemote(name) + if name != newName { + getConfigData().DeleteSection(name) + SaveConfig() + } +} + +// CopyRemote copies a config section +func CopyRemote(name string) { + fmt.Printf("Enter name for copy of %q remote.\n", name) + copyRemote(name) + SaveConfig() +} + +// ShowConfigLocation prints the location of the config file in use +func ShowConfigLocation() { + if _, err := os.Stat(ConfigPath); os.IsNotExist(err) { + fmt.Println("Configuration file doesn't exist, but rclone will use this path:") + } else { + fmt.Println("Configuration file is stored at:") + } + fmt.Printf("%s\n", ConfigPath) +} + +// ShowConfig prints the (unencrypted) config options +func ShowConfig() { + var buf bytes.Buffer + if err := goconfig.SaveConfigData(getConfigData(), &buf); err != nil { + log.Fatalf("Failed to serialize config: %v", err) + } + str := buf.String() + if str == "" { + str = "; empty config\n" + } + fmt.Printf("%s", str) +} + +// EditConfig edits the config file interactively +func EditConfig(ctx context.Context) { + for { + haveRemotes := len(getConfigData().GetSectionList()) != 0 + what := []string{"eEdit existing remote", "nNew remote", "dDelete remote", "rRename remote", "cCopy remote", "sSet configuration password", "qQuit config"} + if haveRemotes { + fmt.Printf("Current remotes:\n\n") + ShowRemotes() + fmt.Printf("\n") + } else { + fmt.Printf("No remotes found - make a new one\n") + // take 2nd item and last 2 items of menu list + what = append(what[1:2], what[len(what)-2:]...) + } + switch i := Command(what); i { + case 'e': + name := ChooseRemote() + fs := MustFindByName(name) + EditRemote(ctx, fs, name) + case 'n': + NewRemote(ctx, NewRemoteName()) + case 'd': + name := ChooseRemote() + DeleteRemote(name) + case 'r': + RenameRemote(ChooseRemote()) + case 'c': + CopyRemote(ChooseRemote()) + case 's': + SetPassword() + case 'q': + return + + } + } +} + +// SetPassword will allow the user to modify the current +// configuration encryption settings. +func SetPassword() { + for { + if len(configKey) > 0 { + fmt.Println("Your configuration is encrypted.") + what := []string{"cChange Password", "uUnencrypt configuration", "qQuit to main menu"} + switch i := Command(what); i { + case 'c': + changeConfigPassword() + SaveConfig() + fmt.Println("Password changed") + continue + case 'u': + configKey = nil + SaveConfig() + continue + case 'q': + return + } + + } else { + fmt.Println("Your configuration is not encrypted.") + fmt.Println("If you add a password, you will protect your login information to cloud services.") + what := []string{"aAdd Password", "qQuit to main menu"} + switch i := Command(what); i { + case 'a': + changeConfigPassword() + SaveConfig() + fmt.Println("Password set") + continue + case 'q': + return + } + } + } +} + +// Authorize is for remote authorization of headless machines. +// +// It expects 1 or 3 arguments +// +// rclone authorize "fs name" +// rclone authorize "fs name" "client id" "client secret" +func Authorize(ctx context.Context, args []string, noAutoBrowser bool) { + ctx = suppressConfirm(ctx) + switch len(args) { + case 1, 3: + default: + log.Fatalf("Invalid number of arguments: %d", len(args)) + } + newType := args[0] + f := fs.MustFind(newType) + if f.Config == nil { + log.Fatalf("Can't authorize fs %q", newType) + } + // Name used for temporary fs + name := "**temp-fs**" + + // Make sure we delete it + defer DeleteRemote(name) + + // Indicate that we are running rclone authorize + getConfigData().SetValue(name, ConfigAuthorize, "true") + if noAutoBrowser { + getConfigData().SetValue(name, ConfigAuthNoBrowser, "true") + } + + if len(args) == 3 { + getConfigData().SetValue(name, ConfigClientID, args[1]) + getConfigData().SetValue(name, ConfigClientSecret, args[2]) + } + + m := fs.ConfigMap(f, name) + f.Config(ctx, name, m) +} + +// FileGetFlag gets the config key under section returning the +// the value and true if found and or ("", false) otherwise +func FileGetFlag(section, key string) (string, bool) { + newValue, err := getConfigData().GetValue(section, key) + return newValue, err == nil +} + +// FileGet gets the config key under section returning the +// default or empty string if not set. +// +// It looks up defaults in the environment if they are present +func FileGet(section, key string, defaultVal ...string) string { + envKey := fs.ConfigToEnv(section, key) + newValue, found := os.LookupEnv(envKey) + if found { + defaultVal = []string{newValue} + } + return getConfigData().MustValue(section, key, defaultVal...) +} + +// FileSet sets the key in section to value. It doesn't save +// the config file. +func FileSet(section, key, value string) { + if value != "" { + getConfigData().SetValue(section, key, value) + } else { + FileDeleteKey(section, key) + } +} + +// FileDeleteKey deletes the config key in the config file. +// It returns true if the key was deleted, +// or returns false if the section or key didn't exist. +func FileDeleteKey(section, key string) bool { + return getConfigData().DeleteKey(section, key) +} + +var matchEnv = regexp.MustCompile(`^RCLONE_CONFIG_(.*?)_TYPE=.*$`) + +// FileRefresh ensures the latest configFile is loaded from disk +func FileRefresh() error { + reloadedConfigFile, err := loadConfigFile() + if err != nil { + return err + } + configFile = reloadedConfigFile + return nil +} + +// FileSections returns the sections in the config file +// including any defined by environment variables. +func FileSections() []string { + sections := getConfigData().GetSectionList() + for _, item := range os.Environ() { + matches := matchEnv.FindStringSubmatch(item) + if len(matches) == 2 { + sections = append(sections, strings.ToLower(matches[1])) + } + } + return sections +} + +// DumpRcRemote dumps the config for a single remote +func DumpRcRemote(name string) (dump rc.Params) { + params := rc.Params{} + for _, key := range getConfigData().GetKeyList(name) { + params[key] = FileGet(name, key) + } + return params +} + +// DumpRcBlob dumps all the config as an unstructured blob suitable +// for the rc +func DumpRcBlob() (dump rc.Params) { + dump = rc.Params{} + for _, name := range getConfigData().GetSectionList() { + dump[name] = DumpRcRemote(name) + } + return dump +} + +// Dump dumps all the config as a JSON file +func Dump() error { + dump := DumpRcBlob() + b, err := json.MarshalIndent(dump, "", " ") + if err != nil { + return errors.Wrap(err, "failed to marshal config dump") + } + _, err = os.Stdout.Write(b) + if err != nil { + return errors.Wrap(err, "failed to write config dump") + } + return nil +} + +// makeCacheDir returns a directory to use for caching. +// +// Code borrowed from go stdlib until it is made public +func makeCacheDir() (dir string) { + // Compute default location. + switch runtime.GOOS { + case "windows": + dir = os.Getenv("LocalAppData") + + case "darwin": + dir = os.Getenv("HOME") + if dir != "" { + dir += "/Library/Caches" + } + + case "plan9": + dir = os.Getenv("home") + if dir != "" { + // Plan 9 has no established per-user cache directory, + // but $home/lib/xyz is the usual equivalent of $HOME/.xyz on Unix. + dir += "/lib/cache" + } + + default: // Unix + // https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html + dir = os.Getenv("XDG_CACHE_HOME") + if dir == "" { + dir = os.Getenv("HOME") + if dir != "" { + dir += "/.cache" + } + } + } + + // if no dir found then use TempDir - we will have a cachedir! + if dir == "" { + dir = os.TempDir() + } + return filepath.Join(dir, "rclone") +} diff --git a/vendor/github.com/rclone/rclone/fs/config/config_other.go b/vendor/github.com/rclone/rclone/fs/config/config_other.go new file mode 100644 index 00000000000..e9024a8010e --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config/config_other.go @@ -0,0 +1,10 @@ +// Read, write and edit the config file +// Non-unix specific functions. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package config + +// attemptCopyGroups tries to keep the group the same, which only makes sense +// for system with user-group-world permission model. +func attemptCopyGroup(fromPath, toPath string) {} diff --git a/vendor/github.com/rclone/rclone/fs/config/config_read_password.go b/vendor/github.com/rclone/rclone/fs/config/config_read_password.go new file mode 100644 index 00000000000..023053c7e22 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config/config_read_password.go @@ -0,0 +1,29 @@ +// ReadPassword for OSes which are supported by golang.org/x/crypto/ssh/terminal +// See https://github.com/golang/go/issues/14441 - plan9 +// https://github.com/golang/go/issues/13085 - solaris + +// +build !solaris,!plan9 + +package config + +import ( + "fmt" + "log" + "os" + + "github.com/rclone/rclone/lib/terminal" +) + +// ReadPassword reads a password without echoing it to the terminal. +func ReadPassword() string { + stdin := int(os.Stdin.Fd()) + if !terminal.IsTerminal(stdin) { + return ReadLine() + } + line, err := terminal.ReadPassword(stdin) + _, _ = fmt.Fprintln(os.Stderr) + if err != nil { + log.Fatalf("Failed to read password: %v", err) + } + return string(line) +} diff --git a/vendor/github.com/rclone/rclone/fs/config/config_read_password_unsupported.go b/vendor/github.com/rclone/rclone/fs/config/config_read_password_unsupported.go new file mode 100644 index 00000000000..eb762448ce3 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config/config_read_password_unsupported.go @@ -0,0 +1,12 @@ +// ReadPassword for OSes which are not supported by golang.org/x/crypto/ssh/terminal +// See https://github.com/golang/go/issues/14441 - plan9 +// https://github.com/golang/go/issues/13085 - solaris + +// +build solaris plan9 + +package config + +// ReadPassword reads a password with echoing it to the terminal. +func ReadPassword() string { + return ReadLine() +} diff --git a/vendor/github.com/rclone/rclone/fs/config/config_unix.go b/vendor/github.com/rclone/rclone/fs/config/config_unix.go new file mode 100644 index 00000000000..8db2c4c2bbe --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config/config_unix.go @@ -0,0 +1,37 @@ +// Read, write and edit the config file +// Unix specific functions. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package config + +import ( + "os" + "os/user" + "strconv" + "syscall" + + "github.com/rclone/rclone/fs" +) + +// attemptCopyGroups tries to keep the group the same. User will be the one +// who is currently running this process. +func attemptCopyGroup(fromPath, toPath string) { + info, err := os.Stat(fromPath) + if err != nil || info.Sys() == nil { + return + } + if stat, ok := info.Sys().(*syscall.Stat_t); ok { + uid := int(stat.Uid) + // prefer self over previous owner of file, because it has a higher chance + // of success + if user, err := user.Current(); err == nil { + if tmpUID, err := strconv.Atoi(user.Uid); err == nil { + uid = tmpUID + } + } + if err = os.Chown(toPath, uid, int(stat.Gid)); err != nil { + fs.Debugf(nil, "Failed to keep previous owner of config file: %v", err) + } + } +} diff --git a/vendor/github.com/rclone/rclone/fs/config/configmap/configmap.go b/vendor/github.com/rclone/rclone/fs/config/configmap/configmap.go new file mode 100644 index 00000000000..2d1267a22af --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config/configmap/configmap.go @@ -0,0 +1,86 @@ +// Package configmap provides an abstraction for reading and writing config +package configmap + +// Getter provides an interface to get config items +type Getter interface { + // Get should get an item with the key passed in and return + // the value. If the item is found then it should return true, + // otherwise false. + Get(key string) (value string, ok bool) +} + +// Setter provides an interface to set config items +type Setter interface { + // Set should set an item into persistent config store. + Set(key, value string) +} + +// Mapper provides an interface to read and write config +type Mapper interface { + Getter + Setter +} + +// Map provides a wrapper around multiple Setter and +// Getter interfaces. +type Map struct { + setters []Setter + getters []Getter +} + +// New returns an empty Map +func New() *Map { + return &Map{} +} + +// AddGetter appends a getter onto the end of the getters +func (c *Map) AddGetter(getter Getter) *Map { + c.getters = append(c.getters, getter) + return c +} + +// AddGetters appends multiple getters onto the end of the getters +func (c *Map) AddGetters(getters ...Getter) *Map { + c.getters = append(c.getters, getters...) + return c +} + +// AddSetter appends a setter onto the end of the setters +func (c *Map) AddSetter(setter Setter) *Map { + c.setters = append(c.setters, setter) + return c +} + +// Get gets an item with the key passed in and return the value from +// the first getter. If the item is found then it returns true, +// otherwise false. +func (c *Map) Get(key string) (value string, ok bool) { + for _, do := range c.getters { + value, ok = do.Get(key) + if ok { + return value, ok + } + } + return "", false +} + +// Set sets an item into all the stored setters. +func (c *Map) Set(key, value string) { + for _, do := range c.setters { + do.Set(key, value) + } +} + +// Simple is a simple Mapper for testing +type Simple map[string]string + +// Get the value +func (c Simple) Get(key string) (value string, ok bool) { + value, ok = c[key] + return value, ok +} + +// Set the value +func (c Simple) Set(key, value string) { + c[key] = value +} diff --git a/vendor/github.com/rclone/rclone/fs/config/configstruct/configstruct.go b/vendor/github.com/rclone/rclone/fs/config/configstruct/configstruct.go new file mode 100644 index 00000000000..181f25b5e03 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config/configstruct/configstruct.go @@ -0,0 +1,127 @@ +// Package configstruct parses unstructured maps into structures +package configstruct + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs/config/configmap" +) + +var matchUpper = regexp.MustCompile("([A-Z]+)") + +// camelToSnake converts CamelCase to snake_case +func camelToSnake(in string) string { + out := matchUpper.ReplaceAllString(in, "_$1") + out = strings.ToLower(out) + out = strings.Trim(out, "_") + return out +} + +// StringToInterface turns in into an interface{} the same type as def +func StringToInterface(def interface{}, in string) (newValue interface{}, err error) { + typ := reflect.TypeOf(def) + switch typ.Kind() { + case reflect.String: + // Pass strings unmodified + return in, nil + } + // Otherwise parse with Sscanln + // + // This means any types we use here must implement fmt.Scanner + o := reflect.New(typ) + n, err := fmt.Sscanln(in, o.Interface()) + if err != nil { + return newValue, errors.Wrapf(err, "parsing %q as %T failed", in, def) + } + if n != 1 { + return newValue, errors.New("no items parsed") + } + return o.Elem().Interface(), nil +} + +// Item describes a single entry in the options structure +type Item struct { + Name string // snake_case + Field string // CamelCase + Num int // number of the field in the struct + Value interface{} +} + +// Items parses the opt struct and returns a slice of Item objects. +// +// opt must be a pointer to a struct. The struct should have entirely +// public fields. +// +// The config_name is looked up in a struct tag called "config" or if +// not found is the field name converted from CamelCase to snake_case. +func Items(opt interface{}) (items []Item, err error) { + def := reflect.ValueOf(opt) + if def.Kind() != reflect.Ptr { + return nil, errors.New("argument must be a pointer") + } + def = def.Elem() // indirect the pointer + if def.Kind() != reflect.Struct { + return nil, errors.New("argument must be a pointer to a struct") + } + defType := def.Type() + for i := 0; i < def.NumField(); i++ { + field := defType.Field(i) + fieldName := field.Name + configName, ok := field.Tag.Lookup("config") + if !ok { + configName = camelToSnake(fieldName) + } + defaultItem := Item{ + Name: configName, + Field: fieldName, + Num: i, + Value: def.Field(i).Interface(), + } + items = append(items, defaultItem) + } + return items, nil +} + +// Set interprets the field names in defaults and looks up config +// values in the config passed in. Any values found in config will be +// set in the opt structure. +// +// opt must be a pointer to a struct. The struct should have entirely +// public fields. The field names are converted from CamelCase to +// snake_case and looked up in the config supplied or a +// `config:"field_name"` is looked up. +// +// If items are found then they are converted from string to native +// types and set in opt. +// +// All the field types in the struct must implement fmt.Scanner. +func Set(config configmap.Getter, opt interface{}) (err error) { + defaultItems, err := Items(opt) + if err != nil { + return err + } + defStruct := reflect.ValueOf(opt).Elem() + for _, defaultItem := range defaultItems { + newValue := defaultItem.Value + if configValue, ok := config.Get(defaultItem.Name); ok { + var newNewValue interface{} + newNewValue, err = StringToInterface(newValue, configValue) + if err != nil { + // Mask errors if setting an empty string as + // it isn't valid for all types. This makes + // empty string be the equivalent of unset. + if configValue != "" { + return errors.Wrapf(err, "couldn't parse config item %q = %q as %T", defaultItem.Name, configValue, defaultItem.Value) + } + } else { + newValue = newNewValue + } + } + defStruct.Field(defaultItem.Num).Set(reflect.ValueOf(newValue)) + } + return nil +} diff --git a/vendor/github.com/rclone/rclone/fs/config/obscure/obscure.go b/vendor/github.com/rclone/rclone/fs/config/obscure/obscure.go new file mode 100644 index 00000000000..2f2261f3d3a --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config/obscure/obscure.go @@ -0,0 +1,94 @@ +// Package obscure contains the Obscure and Reveal commands +package obscure + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "io" + "log" + + "github.com/pkg/errors" +) + +// crypt internals +var ( + cryptKey = []byte{ + 0x9c, 0x93, 0x5b, 0x48, 0x73, 0x0a, 0x55, 0x4d, + 0x6b, 0xfd, 0x7c, 0x63, 0xc8, 0x86, 0xa9, 0x2b, + 0xd3, 0x90, 0x19, 0x8e, 0xb8, 0x12, 0x8a, 0xfb, + 0xf4, 0xde, 0x16, 0x2b, 0x8b, 0x95, 0xf6, 0x38, + } + cryptBlock cipher.Block + cryptRand = rand.Reader +) + +// crypt transforms in to out using iv under AES-CTR. +// +// in and out may be the same buffer. +// +// Note encryption and decryption are the same operation +func crypt(out, in, iv []byte) error { + if cryptBlock == nil { + var err error + cryptBlock, err = aes.NewCipher(cryptKey) + if err != nil { + return err + } + } + stream := cipher.NewCTR(cryptBlock, iv) + stream.XORKeyStream(out, in) + return nil +} + +// Obscure a value +// +// This is done by encrypting with AES-CTR +func Obscure(x string) (string, error) { + plaintext := []byte(x) + ciphertext := make([]byte, aes.BlockSize+len(plaintext)) + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(cryptRand, iv); err != nil { + return "", errors.Wrap(err, "failed to read iv") + } + if err := crypt(ciphertext[aes.BlockSize:], plaintext, iv); err != nil { + return "", errors.Wrap(err, "encrypt failed") + } + return base64.RawURLEncoding.EncodeToString(ciphertext), nil +} + +// MustObscure obscures a value, exiting with a fatal error if it failed +func MustObscure(x string) string { + out, err := Obscure(x) + if err != nil { + log.Fatalf("Obscure failed: %v", err) + } + return out +} + +// Reveal an obscured value +func Reveal(x string) (string, error) { + ciphertext, err := base64.RawURLEncoding.DecodeString(x) + if err != nil { + return "", errors.Wrap(err, "base64 decode failed when revealing password - is it obscured?") + } + if len(ciphertext) < aes.BlockSize { + return "", errors.New("input too short when revealing password - is it obscured?") + } + buf := ciphertext[aes.BlockSize:] + iv := ciphertext[:aes.BlockSize] + if err := crypt(buf, buf, iv); err != nil { + return "", errors.Wrap(err, "decrypt failed when revealing password - is it obscured?") + } + return string(buf), nil +} + +// MustReveal reveals an obscured value, exiting with a fatal error if it failed +func MustReveal(x string) string { + out, err := Reveal(x) + if err != nil { + log.Fatalf("Reveal failed: %v", err) + } + return out +} diff --git a/vendor/github.com/rclone/rclone/fs/config/rc.go b/vendor/github.com/rclone/rclone/fs/config/rc.go new file mode 100644 index 00000000000..3a08448cea5 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config/rc.go @@ -0,0 +1,188 @@ +package config + +import ( + "context" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/rc" +) + +func init() { + rc.Add(rc.Call{ + Path: "config/dump", + Fn: rcDump, + Title: "Dumps the config file.", + AuthRequired: true, + Help: ` +Returns a JSON object: +- key: value + +Where keys are remote names and values are the config parameters. + +See the [config dump command](/commands/rclone_config_dump/) command for more information on the above. +`, + }) +} + +// Return the config file dump +func rcDump(ctx context.Context, in rc.Params) (out rc.Params, err error) { + return DumpRcBlob(), nil +} + +func init() { + rc.Add(rc.Call{ + Path: "config/get", + Fn: rcGet, + Title: "Get a remote in the config file.", + AuthRequired: true, + Help: ` +Parameters: + +- name - name of remote to get + +See the [config dump command](/commands/rclone_config_dump/) command for more information on the above. +`, + }) +} + +// Return the config file get +func rcGet(ctx context.Context, in rc.Params) (out rc.Params, err error) { + name, err := in.GetString("name") + if err != nil { + return nil, err + } + return DumpRcRemote(name), nil +} + +func init() { + rc.Add(rc.Call{ + Path: "config/listremotes", + Fn: rcListRemotes, + Title: "Lists the remotes in the config file.", + AuthRequired: true, + Help: ` +Returns +- remotes - array of remote names + +See the [listremotes command](/commands/rclone_listremotes/) command for more information on the above. +`, + }) +} + +// Return the a list of remotes in the config file +func rcListRemotes(ctx context.Context, in rc.Params) (out rc.Params, err error) { + var remotes = []string{} + for _, remote := range getConfigData().GetSectionList() { + remotes = append(remotes, remote) + } + out = rc.Params{ + "remotes": remotes, + } + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "config/providers", + Fn: rcProviders, + Title: "Shows how providers are configured in the config file.", + AuthRequired: true, + Help: ` +Returns a JSON object: +- providers - array of objects + +See the [config providers command](/commands/rclone_config_providers/) command for more information on the above. +`, + }) +} + +// Return the config file providers +func rcProviders(ctx context.Context, in rc.Params) (out rc.Params, err error) { + out = rc.Params{ + "providers": fs.Registry, + } + return out, nil +} + +func init() { + for _, name := range []string{"create", "update", "password"} { + name := name + extraHelp := "" + if name == "create" { + extraHelp = "- type - type of the new remote\n" + } + if name == "create" || name == "update" { + extraHelp += "- obscure - optional bool - forces obscuring of passwords\n" + extraHelp += "- noObscure - optional bool - forces passwords not to be obscured\n" + } + rc.Add(rc.Call{ + Path: "config/" + name, + AuthRequired: true, + Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) { + return rcConfig(ctx, in, name) + }, + Title: name + " the config for a remote.", + Help: `This takes the following parameters + +- name - name of remote +- parameters - a map of \{ "key": "value" \} pairs +` + extraHelp + ` + +See the [config ` + name + ` command](/commands/rclone_config_` + name + `/) command for more information on the above.`, + }) + } +} + +// Manipulate the config file +func rcConfig(ctx context.Context, in rc.Params, what string) (out rc.Params, err error) { + name, err := in.GetString("name") + if err != nil { + return nil, err + } + parameters := rc.Params{} + err = in.GetStruct("parameters", ¶meters) + if err != nil { + return nil, err + } + doObscure, _ := in.GetBool("obscure") + noObscure, _ := in.GetBool("noObscure") + switch what { + case "create": + remoteType, err := in.GetString("type") + if err != nil { + return nil, err + } + return nil, CreateRemote(ctx, name, remoteType, parameters, doObscure, noObscure) + case "update": + return nil, UpdateRemote(ctx, name, parameters, doObscure, noObscure) + case "password": + return nil, PasswordRemote(ctx, name, parameters) + } + panic("unknown rcConfig type") +} + +func init() { + rc.Add(rc.Call{ + Path: "config/delete", + Fn: rcDelete, + Title: "Delete a remote in the config file.", + AuthRequired: true, + Help: ` +Parameters: + +- name - name of remote to delete + +See the [config delete command](/commands/rclone_config_delete/) command for more information on the above. +`, + }) +} + +// Return the config file delete +func rcDelete(ctx context.Context, in rc.Params) (out rc.Params, err error) { + name, err := in.GetString("name") + if err != nil { + return nil, err + } + DeleteRemote(name) + return nil, nil +} diff --git a/vendor/github.com/rclone/rclone/fs/config_list.go b/vendor/github.com/rclone/rclone/fs/config_list.go new file mode 100644 index 00000000000..31ab0eba098 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/config_list.go @@ -0,0 +1,94 @@ +package fs + +import ( + "bytes" + "encoding/csv" + "fmt" +) + +// CommaSepList is a comma separated config value +// It uses the encoding/csv rules for quoting and escaping +type CommaSepList []string + +// SpaceSepList is a space separated config value +// It uses the encoding/csv rules for quoting and escaping +type SpaceSepList []string + +type genericList []string + +func (l CommaSepList) String() string { + return genericList(l).string(',') +} + +// Set the List entries +func (l *CommaSepList) Set(s string) error { + return (*genericList)(l).set(',', []byte(s)) +} + +// Type of the value +func (CommaSepList) Type() string { + return "CommaSepList" +} + +// Scan implements the fmt.Scanner interface +func (l *CommaSepList) Scan(s fmt.ScanState, ch rune) error { + return (*genericList)(l).scan(',', s, ch) +} + +func (l SpaceSepList) String() string { + return genericList(l).string(' ') +} + +// Set the List entries +func (l *SpaceSepList) Set(s string) error { + return (*genericList)(l).set(' ', []byte(s)) +} + +// Type of the value +func (SpaceSepList) Type() string { + return "SpaceSepList" +} + +// Scan implements the fmt.Scanner interface +func (l *SpaceSepList) Scan(s fmt.ScanState, ch rune) error { + return (*genericList)(l).scan(' ', s, ch) +} + +func (gl genericList) string(sep rune) string { + var buf bytes.Buffer + w := csv.NewWriter(&buf) + w.Comma = sep + err := w.Write(gl) + if err != nil { + // can only happen if w.Comma is invalid + panic(err) + } + w.Flush() + return string(bytes.TrimSpace(buf.Bytes())) +} + +func (gl *genericList) set(sep rune, b []byte) error { + if len(b) == 0 { + *gl = nil + return nil + } + r := csv.NewReader(bytes.NewReader(b)) + r.Comma = sep + + record, err := r.Read() + switch _err := err.(type) { + case nil: + *gl = record + case *csv.ParseError: + err = _err.Err // remove line numbers from the error message + } + return err +} + +func (gl *genericList) scan(sep rune, s fmt.ScanState, ch rune) error { + token, err := s.Token(true, func(rune) bool { return true }) + if err != nil { + return err + } + return gl.set(sep, bytes.TrimSpace(token)) +} diff --git a/vendor/github.com/rclone/rclone/fs/cutoffmode.go b/vendor/github.com/rclone/rclone/fs/cutoffmode.go new file mode 100644 index 00000000000..19ec2b0c4b5 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/cutoffmode.go @@ -0,0 +1,49 @@ +package fs + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" +) + +// CutoffMode describes the possible delete modes in the config +type CutoffMode byte + +// MaxTransferMode constants +const ( + CutoffModeHard CutoffMode = iota + CutoffModeSoft + CutoffModeCautious + CutoffModeDefault = CutoffModeHard +) + +var cutoffModeToString = []string{ + CutoffModeHard: "HARD", + CutoffModeSoft: "SOFT", + CutoffModeCautious: "CAUTIOUS", +} + +// String turns a LogLevel into a string +func (m CutoffMode) String() string { + if m >= CutoffMode(len(cutoffModeToString)) { + return fmt.Sprintf("CutoffMode(%d)", m) + } + return cutoffModeToString[m] +} + +// Set a LogLevel +func (m *CutoffMode) Set(s string) error { + for n, name := range cutoffModeToString { + if s != "" && name == strings.ToUpper(s) { + *m = CutoffMode(n) + return nil + } + } + return errors.Errorf("Unknown cutoff mode %q", s) +} + +// Type of the value +func (m *CutoffMode) Type() string { + return "string" +} diff --git a/vendor/github.com/rclone/rclone/fs/deletemode.go b/vendor/github.com/rclone/rclone/fs/deletemode.go new file mode 100644 index 00000000000..9e16373d91c --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/deletemode.go @@ -0,0 +1,14 @@ +package fs + +// DeleteMode describes the possible delete modes in the config +type DeleteMode byte + +// DeleteMode constants +const ( + DeleteModeOff DeleteMode = iota + DeleteModeBefore + DeleteModeDuring + DeleteModeAfter + DeleteModeOnly + DeleteModeDefault = DeleteModeAfter +) diff --git a/vendor/github.com/rclone/rclone/fs/dir.go b/vendor/github.com/rclone/rclone/fs/dir.go new file mode 100644 index 00000000000..d876d89a61a --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/dir.go @@ -0,0 +1,101 @@ +package fs + +import ( + "context" + "time" +) + +// Dir describes an unspecialized directory for directory/container/bucket lists +type Dir struct { + remote string // name of the directory + modTime time.Time // modification or creation time - IsZero for unknown + size int64 // size of directory and contents or -1 if unknown + items int64 // number of objects or -1 for unknown + id string // optional ID +} + +// NewDir creates an unspecialized Directory object +func NewDir(remote string, modTime time.Time) *Dir { + return &Dir{ + remote: remote, + modTime: modTime, + size: -1, + items: -1, + } +} + +// NewDirCopy creates an unspecialized copy of the Directory object passed in +func NewDirCopy(ctx context.Context, d Directory) *Dir { + return &Dir{ + remote: d.Remote(), + modTime: d.ModTime(ctx), + size: d.Size(), + items: d.Items(), + id: d.ID(), + } +} + +// String returns the name +func (d *Dir) String() string { + return d.remote +} + +// Remote returns the remote path +func (d *Dir) Remote() string { + return d.remote +} + +// SetRemote sets the remote +func (d *Dir) SetRemote(remote string) *Dir { + d.remote = remote + return d +} + +// ID gets the optional ID +func (d *Dir) ID() string { + return d.id +} + +// SetID sets the optional ID +func (d *Dir) SetID(id string) *Dir { + d.id = id + return d +} + +// ModTime returns the modification date of the file +// It should return a best guess if one isn't available +func (d *Dir) ModTime(ctx context.Context) time.Time { + if !d.modTime.IsZero() { + return d.modTime + } + return time.Now() +} + +// Size returns the size of the file +func (d *Dir) Size() int64 { + return d.size +} + +// SetSize sets the size of the directory +func (d *Dir) SetSize(size int64) *Dir { + d.size = size + return d +} + +// Items returns the count of items in this directory or this +// directory and subdirectories if known, -1 for unknown +func (d *Dir) Items() int64 { + return d.items +} + +// SetItems sets the number of items in the directory +func (d *Dir) SetItems(items int64) *Dir { + d.items = items + return d +} + +// Check interfaces +var ( + _ DirEntry = (*Dir)(nil) + _ Directory = (*Dir)(nil) +) diff --git a/vendor/github.com/rclone/rclone/fs/direntries.go b/vendor/github.com/rclone/rclone/fs/direntries.go new file mode 100644 index 00000000000..72839a417b7 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/direntries.go @@ -0,0 +1,106 @@ +package fs + +import "fmt" + +// DirEntries is a slice of Object or *Dir +type DirEntries []DirEntry + +// Len is part of sort.Interface. +func (ds DirEntries) Len() int { + return len(ds) +} + +// Swap is part of sort.Interface. +func (ds DirEntries) Swap(i, j int) { + ds[i], ds[j] = ds[j], ds[i] +} + +// Less is part of sort.Interface. +func (ds DirEntries) Less(i, j int) bool { + return CompareDirEntries(ds[i], ds[j]) < 0 +} + +// ForObject runs the function supplied on every object in the entries +func (ds DirEntries) ForObject(fn func(o Object)) { + for _, entry := range ds { + o, ok := entry.(Object) + if ok { + fn(o) + } + } +} + +// ForObjectError runs the function supplied on every object in the entries +func (ds DirEntries) ForObjectError(fn func(o Object) error) error { + for _, entry := range ds { + o, ok := entry.(Object) + if ok { + err := fn(o) + if err != nil { + return err + } + } + } + return nil +} + +// ForDir runs the function supplied on every Directory in the entries +func (ds DirEntries) ForDir(fn func(dir Directory)) { + for _, entry := range ds { + dir, ok := entry.(Directory) + if ok { + fn(dir) + } + } +} + +// ForDirError runs the function supplied on every Directory in the entries +func (ds DirEntries) ForDirError(fn func(dir Directory) error) error { + for _, entry := range ds { + dir, ok := entry.(Directory) + if ok { + err := fn(dir) + if err != nil { + return err + } + } + } + return nil +} + +// DirEntryType returns a string description of the DirEntry, either +// "object", "directory" or "unknown type XXX" +func DirEntryType(d DirEntry) string { + switch d.(type) { + case Object: + return "object" + case Directory: + return "directory" + } + return fmt.Sprintf("unknown type %T", d) +} + +// CompareDirEntries returns 1 if a > b, 0 if a == b and -1 if a < b +// If two dir entries have the same name, compare their types (directories are before objects) +func CompareDirEntries(a, b DirEntry) int { + aName := a.Remote() + bName := b.Remote() + + if aName > bName { + return 1 + } else if aName < bName { + return -1 + } + + typeA := DirEntryType(a) + typeB := DirEntryType(b) + + // same name, compare types + if typeA > typeB { + return 1 + } else if typeA < typeB { + return -1 + } + + return 0 +} diff --git a/vendor/github.com/rclone/rclone/fs/dirtree/dirtree.go b/vendor/github.com/rclone/rclone/fs/dirtree/dirtree.go new file mode 100644 index 00000000000..64f5b2512a6 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/dirtree/dirtree.go @@ -0,0 +1,209 @@ +// Package dirtree contains the DirTree type which is used for +// building filesystem heirachies in memory. +package dirtree + +import ( + "bytes" + "fmt" + "path" + "sort" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/lib/errors" +) + +// DirTree is a map of directories to entries +type DirTree map[string]fs.DirEntries + +// New returns a fresh DirTree +func New() DirTree { + return make(DirTree) +} + +// parentDir finds the parent directory of path +func parentDir(entryPath string) string { + dirPath := path.Dir(entryPath) + if dirPath == "." { + dirPath = "" + } + return dirPath +} + +// Add an entry to the tree +// it doesn't create parents +func (dt DirTree) Add(entry fs.DirEntry) { + dirPath := parentDir(entry.Remote()) + dt[dirPath] = append(dt[dirPath], entry) +} + +// AddDir adds a directory entry to the tree +// this creates the directory itself if required +// it doesn't create parents +func (dt DirTree) AddDir(entry fs.DirEntry) { + dt.Add(entry) + // create the directory itself if it doesn't exist already + dirPath := entry.Remote() + if _, ok := dt[dirPath]; !ok { + dt[dirPath] = nil + } +} + +// AddEntry adds the entry and creates the parents for it regardless +// of whether it is a file or a directory. +func (dt DirTree) AddEntry(entry fs.DirEntry) { + switch entry.(type) { + case fs.Directory: + dt.AddDir(entry) + case fs.Object: + dt.Add(entry) + default: + panic("unknown entry type") + } + remoteParent := parentDir(entry.Remote()) + dt.CheckParent("", remoteParent) +} + +// Find returns the DirEntry for filePath or nil if not found +func (dt DirTree) Find(filePath string) (parentPath string, entry fs.DirEntry) { + parentPath = parentDir(filePath) + for _, entry := range dt[parentPath] { + if entry.Remote() == filePath { + return parentPath, entry + } + } + return parentPath, nil +} + +// CheckParent checks that dirPath has a *Dir in its parent +func (dt DirTree) CheckParent(root, dirPath string) { + if dirPath == root { + return + } + parentPath, entry := dt.Find(dirPath) + if entry != nil { + return + } + dt[parentPath] = append(dt[parentPath], fs.NewDir(dirPath, time.Now())) + dt.CheckParent(root, parentPath) +} + +// CheckParents checks every directory in the tree has *Dir in its parent +func (dt DirTree) CheckParents(root string) { + for dirPath := range dt { + dt.CheckParent(root, dirPath) + } +} + +// Sort sorts all the Entries +func (dt DirTree) Sort() { + for _, entries := range dt { + sort.Stable(entries) + } +} + +// Dirs returns the directories in sorted order +func (dt DirTree) Dirs() (dirNames []string) { + for dirPath := range dt { + dirNames = append(dirNames, dirPath) + } + sort.Strings(dirNames) + return dirNames +} + +// Prune remove directories from a directory tree. dirNames contains +// all directories to remove as keys, with true as values. dirNames +// will be modified in the function. +func (dt DirTree) Prune(dirNames map[string]bool) error { + // We use map[string]bool to avoid recursion (and potential + // stack exhaustion). + + // First we need delete directories from their parents. + for dName, remove := range dirNames { + if !remove { + // Currently all values should be + // true, therefore this should not + // happen. But this makes function + // more predictable. + fs.Infof(dName, "Directory in the map for prune, but the value is false") + continue + } + if dName == "" { + // if dName is root, do nothing (no parent exist) + continue + } + parent := parentDir(dName) + // It may happen that dt does not have a dName key, + // since directory was excluded based on a filter. In + // such case the loop will be skipped. + for i, entry := range dt[parent] { + switch x := entry.(type) { + case fs.Directory: + if x.Remote() == dName { + // the slice is not sorted yet + // to delete item + // a) replace it with the last one + dt[parent][i] = dt[parent][len(dt[parent])-1] + // b) remove last + dt[parent] = dt[parent][:len(dt[parent])-1] + // we modify a slice within a loop, but we stop + // iterating immediately + break + } + case fs.Object: + // do nothing + default: + return errors.Errorf("unknown object type %T", entry) + + } + } + } + + for len(dirNames) > 0 { + // According to golang specs, if new keys were added + // during range iteration, they may be skipped. + for dName, remove := range dirNames { + if !remove { + fs.Infof(dName, "Directory in the map for prune, but the value is false") + continue + } + // First, add all subdirectories to dirNames. + + // It may happen that dt[dName] does not exist. + // If so, the loop will be skipped. + for _, entry := range dt[dName] { + switch x := entry.(type) { + case fs.Directory: + excludeDir := x.Remote() + dirNames[excludeDir] = true + case fs.Object: + // do nothing + default: + return errors.Errorf("unknown object type %T", entry) + + } + } + // Then remove current directory from DirTree + delete(dt, dName) + // and from dirNames + delete(dirNames, dName) + } + } + return nil +} + +// String emits a simple representation of the DirTree +func (dt DirTree) String() string { + out := new(bytes.Buffer) + for _, dir := range dt.Dirs() { + _, _ = fmt.Fprintf(out, "%s/\n", dir) + for _, entry := range dt[dir] { + flag := "" + if _, ok := entry.(fs.Directory); ok { + flag = "/" + } + _, _ = fmt.Fprintf(out, " %s%s\n", path.Base(entry.Remote()), flag) + } + } + return out.String() +} diff --git a/vendor/github.com/rclone/rclone/fs/driveletter/driveletter.go b/vendor/github.com/rclone/rclone/fs/driveletter/driveletter.go new file mode 100644 index 00000000000..322b244a4f8 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/driveletter/driveletter.go @@ -0,0 +1,14 @@ +// Package driveletter returns whether a name is a valid drive letter + +// +build !windows + +package driveletter + +// IsDriveLetter returns a bool indicating whether name is a valid +// Windows drive letter +// +// On non windows platforms we don't have drive letters so we always +// return false +func IsDriveLetter(name string) bool { + return false +} diff --git a/vendor/github.com/rclone/rclone/fs/driveletter/driveletter_windows.go b/vendor/github.com/rclone/rclone/fs/driveletter/driveletter_windows.go new file mode 100644 index 00000000000..7f63b94d7f3 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/driveletter/driveletter_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package driveletter + +// IsDriveLetter returns a bool indicating whether name is a valid +// Windows drive letter +func IsDriveLetter(name string) bool { + if len(name) != 1 { + return false + } + c := name[0] + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} diff --git a/vendor/github.com/rclone/rclone/fs/dump.go b/vendor/github.com/rclone/rclone/fs/dump.go new file mode 100644 index 00000000000..5d89b5abd2b --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/dump.go @@ -0,0 +1,93 @@ +package fs + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" +) + +// DumpFlags describes the Dump options in force +type DumpFlags int + +// DumpFlags definitions +const ( + DumpHeaders DumpFlags = 1 << iota + DumpBodies + DumpRequests + DumpResponses + DumpAuth + DumpFilters + DumpGoRoutines + DumpOpenFiles +) + +var dumpFlags = []struct { + flag DumpFlags + name string +}{ + {DumpHeaders, "headers"}, + {DumpBodies, "bodies"}, + {DumpRequests, "requests"}, + {DumpResponses, "responses"}, + {DumpAuth, "auth"}, + {DumpFilters, "filters"}, + {DumpGoRoutines, "goroutines"}, + {DumpOpenFiles, "openfiles"}, +} + +// DumpFlagsList is a list of dump flags used in the help +var DumpFlagsList string + +func init() { + // calculate the dump flags list + var out []string + for _, info := range dumpFlags { + out = append(out, info.name) + } + DumpFlagsList = strings.Join(out, ",") +} + +// String turns a DumpFlags into a string +func (f DumpFlags) String() string { + var out []string + for _, info := range dumpFlags { + if f&info.flag != 0 { + out = append(out, info.name) + f &^= info.flag + } + } + if f != 0 { + out = append(out, fmt.Sprintf("Unknown-0x%X", int(f))) + } + return strings.Join(out, ",") +} + +// Set a DumpFlags as a comma separated list of flags +func (f *DumpFlags) Set(s string) error { + var flags DumpFlags + parts := strings.Split(s, ",") + for _, part := range parts { + found := false + part = strings.ToLower(strings.TrimSpace(part)) + if part == "" { + continue + } + for _, info := range dumpFlags { + if part == info.name { + found = true + flags |= info.flag + } + } + if !found { + return errors.Errorf("Unknown dump flag %q", part) + } + } + *f = flags + return nil +} + +// Type of the value +func (f *DumpFlags) Type() string { + return "DumpFlags" +} diff --git a/vendor/github.com/rclone/rclone/fs/filter/filter.go b/vendor/github.com/rclone/rclone/fs/filter/filter.go new file mode 100644 index 00000000000..1714dabf51d --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/filter/filter.go @@ -0,0 +1,630 @@ +// Package filter controls the filtering of files +package filter + +import ( + "bufio" + "context" + "fmt" + "log" + "os" + "path" + "regexp" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "golang.org/x/sync/errgroup" +) + +// This is the globally active filter +// +// This is accessed through GetConfig and AddConfig +var globalConfig = mustNewFilter(nil) + +// rule is one filter rule +type rule struct { + Include bool + Regexp *regexp.Regexp +} + +// Match returns true if rule matches path +func (r *rule) Match(path string) bool { + return r.Regexp.MatchString(path) +} + +// String the rule +func (r *rule) String() string { + c := "-" + if r.Include { + c = "+" + } + return fmt.Sprintf("%s %s", c, r.Regexp.String()) +} + +// rules is a slice of rules +type rules struct { + rules []rule + existing map[string]struct{} +} + +// add adds a rule if it doesn't exist already +func (rs *rules) add(Include bool, re *regexp.Regexp) { + if rs.existing == nil { + rs.existing = make(map[string]struct{}) + } + newRule := rule{ + Include: Include, + Regexp: re, + } + newRuleString := newRule.String() + if _, ok := rs.existing[newRuleString]; ok { + return // rule already exists + } + rs.rules = append(rs.rules, newRule) + rs.existing[newRuleString] = struct{}{} +} + +// clear clears all the rules +func (rs *rules) clear() { + rs.rules = nil + rs.existing = nil +} + +// len returns the number of rules +func (rs *rules) len() int { + return len(rs.rules) +} + +// FilesMap describes the map of files to transfer +type FilesMap map[string]struct{} + +// Opt configures the filter +type Opt struct { + DeleteExcluded bool + FilterRule []string + FilterFrom []string + ExcludeRule []string + ExcludeFrom []string + ExcludeFile string + IncludeRule []string + IncludeFrom []string + FilesFrom []string + FilesFromRaw []string + MinAge fs.Duration + MaxAge fs.Duration + MinSize fs.SizeSuffix + MaxSize fs.SizeSuffix + IgnoreCase bool +} + +// DefaultOpt is the default config for the filter +var DefaultOpt = Opt{ + MinAge: fs.DurationOff, + MaxAge: fs.DurationOff, + MinSize: fs.SizeSuffix(-1), + MaxSize: fs.SizeSuffix(-1), +} + +// Filter describes any filtering in operation +type Filter struct { + Opt Opt + ModTimeFrom time.Time + ModTimeTo time.Time + fileRules rules + dirRules rules + files FilesMap // files if filesFrom + dirs FilesMap // dirs from filesFrom +} + +// NewFilter parses the command line options and creates a Filter +// object. If opt is nil, then DefaultOpt will be used +func NewFilter(opt *Opt) (f *Filter, err error) { + f = &Filter{} + + // Make a copy of the options + if opt != nil { + f.Opt = *opt + } else { + f.Opt = DefaultOpt + } + + // Filter flags + if f.Opt.MinAge.IsSet() { + f.ModTimeTo = time.Now().Add(-time.Duration(f.Opt.MinAge)) + fs.Debugf(nil, "--min-age %v to %v", f.Opt.MinAge, f.ModTimeTo) + } + if f.Opt.MaxAge.IsSet() { + f.ModTimeFrom = time.Now().Add(-time.Duration(f.Opt.MaxAge)) + if !f.ModTimeTo.IsZero() && f.ModTimeTo.Before(f.ModTimeFrom) { + log.Fatal("filter: --min-age can't be larger than --max-age") + } + fs.Debugf(nil, "--max-age %v to %v", f.Opt.MaxAge, f.ModTimeFrom) + } + + addImplicitExclude := false + foundExcludeRule := false + + for _, rule := range f.Opt.IncludeRule { + err = f.Add(true, rule) + if err != nil { + return nil, err + } + addImplicitExclude = true + } + for _, rule := range f.Opt.IncludeFrom { + err := forEachLine(rule, false, func(line string) error { + return f.Add(true, line) + }) + if err != nil { + return nil, err + } + addImplicitExclude = true + } + for _, rule := range f.Opt.ExcludeRule { + err = f.Add(false, rule) + if err != nil { + return nil, err + } + foundExcludeRule = true + } + for _, rule := range f.Opt.ExcludeFrom { + err := forEachLine(rule, false, func(line string) error { + return f.Add(false, line) + }) + if err != nil { + return nil, err + } + foundExcludeRule = true + } + + if addImplicitExclude && foundExcludeRule { + fs.Errorf(nil, "Using --filter is recommended instead of both --include and --exclude as the order they are parsed in is indeterminate") + } + + for _, rule := range f.Opt.FilterRule { + err = f.AddRule(rule) + if err != nil { + return nil, err + } + } + for _, rule := range f.Opt.FilterFrom { + err := forEachLine(rule, false, f.AddRule) + if err != nil { + return nil, err + } + } + + inActive := f.InActive() + + for _, rule := range f.Opt.FilesFrom { + if !inActive { + return nil, fmt.Errorf("The usage of --files-from overrides all other filters, it should be used alone or with --files-from-raw") + } + f.initAddFile() // init to show --files-from set even if no files within + err := forEachLine(rule, false, func(line string) error { + return f.AddFile(line) + }) + if err != nil { + return nil, err + } + } + + for _, rule := range f.Opt.FilesFromRaw { + // --files-from-raw can be used with --files-from, hence we do + // not need to get the value of f.InActive again + if !inActive { + return nil, fmt.Errorf("The usage of --files-from-raw overrides all other filters, it should be used alone or with --files-from") + } + f.initAddFile() // init to show --files-from set even if no files within + err := forEachLine(rule, true, func(line string) error { + return f.AddFile(line) + }) + if err != nil { + return nil, err + } + } + + if addImplicitExclude { + err = f.Add(false, "/**") + if err != nil { + return nil, err + } + } + if fs.GetConfig(context.Background()).Dump&fs.DumpFilters != 0 { + fmt.Println("--- start filters ---") + fmt.Println(f.DumpFilters()) + fmt.Println("--- end filters ---") + } + return f, nil +} + +func mustNewFilter(opt *Opt) *Filter { + f, err := NewFilter(opt) + if err != nil { + panic(err) + } + return f +} + +// addDirGlobs adds directory globs from the file glob passed in +func (f *Filter) addDirGlobs(Include bool, glob string) error { + for _, dirGlob := range globToDirGlobs(glob) { + // Don't add "/" as we always include the root + if dirGlob == "/" { + continue + } + dirRe, err := globToRegexp(dirGlob, f.Opt.IgnoreCase) + if err != nil { + return err + } + f.dirRules.add(Include, dirRe) + } + return nil +} + +// Add adds a filter rule with include or exclude status indicated +func (f *Filter) Add(Include bool, glob string) error { + isDirRule := strings.HasSuffix(glob, "/") + isFileRule := !isDirRule + if strings.Contains(glob, "**") { + isDirRule, isFileRule = true, true + } + re, err := globToRegexp(glob, f.Opt.IgnoreCase) + if err != nil { + return err + } + if isFileRule { + f.fileRules.add(Include, re) + // If include rule work out what directories are needed to scan + // if exclude rule, we can't rule anything out + // Unless it is `*` which matches everything + // NB ** and /** are DirRules + if Include || glob == "*" { + err = f.addDirGlobs(Include, glob) + if err != nil { + return err + } + } + } + if isDirRule { + f.dirRules.add(Include, re) + } + return nil +} + +// AddRule adds a filter rule with include/exclude indicated by the prefix +// +// These are +// +// + glob +// - glob +// ! +// +// '+' includes the glob, '-' excludes it and '!' resets the filter list +// +// Line comments may be introduced with '#' or ';' +func (f *Filter) AddRule(rule string) error { + switch { + case rule == "!": + f.Clear() + return nil + case strings.HasPrefix(rule, "- "): + return f.Add(false, rule[2:]) + case strings.HasPrefix(rule, "+ "): + return f.Add(true, rule[2:]) + } + return errors.Errorf("malformed rule %q", rule) +} + +// initAddFile creates f.files and f.dirs +func (f *Filter) initAddFile() { + if f.files == nil { + f.files = make(FilesMap) + f.dirs = make(FilesMap) + } +} + +// AddFile adds a single file to the files from list +func (f *Filter) AddFile(file string) error { + f.initAddFile() + file = strings.Trim(file, "/") + f.files[file] = struct{}{} + // Put all the parent directories into f.dirs + for { + file = path.Dir(file) + if file == "." { + break + } + if _, found := f.dirs[file]; found { + break + } + f.dirs[file] = struct{}{} + } + return nil +} + +// Files returns all the files from the `--files-from` list +// +// It may be nil if the list is empty +func (f *Filter) Files() FilesMap { + return f.files +} + +// Clear clears all the filter rules +func (f *Filter) Clear() { + f.fileRules.clear() + f.dirRules.clear() +} + +// InActive returns false if any filters are active +func (f *Filter) InActive() bool { + return (f.files == nil && + f.ModTimeFrom.IsZero() && + f.ModTimeTo.IsZero() && + f.Opt.MinSize < 0 && + f.Opt.MaxSize < 0 && + f.fileRules.len() == 0 && + f.dirRules.len() == 0 && + len(f.Opt.ExcludeFile) == 0) +} + +// includeRemote returns whether this remote passes the filter rules. +func (f *Filter) includeRemote(remote string) bool { + for _, rule := range f.fileRules.rules { + if rule.Match(remote) { + return rule.Include + } + } + return true +} + +// ListContainsExcludeFile checks if exclude file is present in the list. +func (f *Filter) ListContainsExcludeFile(entries fs.DirEntries) bool { + if len(f.Opt.ExcludeFile) == 0 { + return false + } + for _, entry := range entries { + obj, ok := entry.(fs.Object) + if ok { + basename := path.Base(obj.Remote()) + if basename == f.Opt.ExcludeFile { + return true + } + } + } + return false +} + +// IncludeDirectory returns a function which checks whether this +// directory should be included in the sync or not. +func (f *Filter) IncludeDirectory(ctx context.Context, fs fs.Fs) func(string) (bool, error) { + return func(remote string) (bool, error) { + remote = strings.Trim(remote, "/") + // first check if we need to remove directory based on + // the exclude file + excl, err := f.DirContainsExcludeFile(ctx, fs, remote) + if err != nil { + return false, err + } + if excl { + return false, nil + } + + // filesFrom takes precedence + if f.files != nil { + _, include := f.dirs[remote] + return include, nil + } + remote += "/" + for _, rule := range f.dirRules.rules { + if rule.Match(remote) { + return rule.Include, nil + } + } + + return true, nil + } +} + +// DirContainsExcludeFile checks if exclude file is present in a +// directory. If fs is nil, it works properly if ExcludeFile is an +// empty string (for testing). +func (f *Filter) DirContainsExcludeFile(ctx context.Context, fremote fs.Fs, remote string) (bool, error) { + if len(f.Opt.ExcludeFile) > 0 { + exists, err := fs.FileExists(ctx, fremote, path.Join(remote, f.Opt.ExcludeFile)) + if err != nil { + return false, err + } + if exists { + return true, nil + } + } + return false, nil +} + +// Include returns whether this object should be included into the +// sync or not +func (f *Filter) Include(remote string, size int64, modTime time.Time) bool { + // filesFrom takes precedence + if f.files != nil { + _, include := f.files[remote] + return include + } + if !f.ModTimeFrom.IsZero() && modTime.Before(f.ModTimeFrom) { + return false + } + if !f.ModTimeTo.IsZero() && modTime.After(f.ModTimeTo) { + return false + } + if f.Opt.MinSize >= 0 && size < int64(f.Opt.MinSize) { + return false + } + if f.Opt.MaxSize >= 0 && size > int64(f.Opt.MaxSize) { + return false + } + return f.includeRemote(remote) +} + +// IncludeObject returns whether this object should be included into +// the sync or not. This is a convenience function to avoid calling +// o.ModTime(), which is an expensive operation. +func (f *Filter) IncludeObject(ctx context.Context, o fs.Object) bool { + var modTime time.Time + + if !f.ModTimeFrom.IsZero() || !f.ModTimeTo.IsZero() { + modTime = o.ModTime(ctx) + } else { + modTime = time.Unix(0, 0) + } + + return f.Include(o.Remote(), o.Size(), modTime) +} + +// forEachLine calls fn on every line in the file pointed to by path +// +// It ignores empty lines and lines starting with '#' or ';' if raw is false +func forEachLine(path string, raw bool, fn func(string) error) (err error) { + var scanner *bufio.Scanner + if path == "-" { + scanner = bufio.NewScanner(os.Stdin) + } else { + in, err := os.Open(path) + if err != nil { + return err + } + scanner = bufio.NewScanner(in) + defer fs.CheckClose(in, &err) + } + for scanner.Scan() { + line := scanner.Text() + if !raw { + line = strings.TrimSpace(line) + if len(line) == 0 || line[0] == '#' || line[0] == ';' { + continue + } + } + err := fn(line) + if err != nil { + return err + } + } + return scanner.Err() +} + +// DumpFilters dumps the filters in textual form, 1 per line +func (f *Filter) DumpFilters() string { + rules := []string{} + if !f.ModTimeFrom.IsZero() { + rules = append(rules, fmt.Sprintf("Last-modified date must be equal or greater than: %s", f.ModTimeFrom.String())) + } + if !f.ModTimeTo.IsZero() { + rules = append(rules, fmt.Sprintf("Last-modified date must be equal or less than: %s", f.ModTimeTo.String())) + } + rules = append(rules, "--- File filter rules ---") + for _, rule := range f.fileRules.rules { + rules = append(rules, rule.String()) + } + rules = append(rules, "--- Directory filter rules ---") + for _, dirRule := range f.dirRules.rules { + rules = append(rules, dirRule.String()) + } + return strings.Join(rules, "\n") +} + +// HaveFilesFrom returns true if --files-from has been supplied +func (f *Filter) HaveFilesFrom() bool { + return f.files != nil +} + +var errFilesFromNotSet = errors.New("--files-from not set so can't use Filter.ListR") + +// MakeListR makes function to return all the files set using --files-from +func (f *Filter) MakeListR(ctx context.Context, NewObject func(ctx context.Context, remote string) (fs.Object, error)) fs.ListRFn { + return func(ctx context.Context, dir string, callback fs.ListRCallback) error { + ci := fs.GetConfig(ctx) + if !f.HaveFilesFrom() { + return errFilesFromNotSet + } + var ( + checkers = ci.Checkers + remotes = make(chan string, checkers) + g errgroup.Group + ) + for i := 0; i < checkers; i++ { + g.Go(func() (err error) { + var entries = make(fs.DirEntries, 1) + for remote := range remotes { + entries[0], err = NewObject(ctx, remote) + if err == fs.ErrorObjectNotFound { + // Skip files that are not found + } else if err != nil { + return err + } else { + err = callback(entries) + if err != nil { + return err + } + } + } + return nil + }) + } + for remote := range f.files { + remotes <- remote + } + close(remotes) + return g.Wait() + } +} + +// UsesDirectoryFilters returns true if the filter uses directory +// filters and false if it doesn't. +// +// This is used in deciding whether to walk directories or use ListR +func (f *Filter) UsesDirectoryFilters() bool { + if len(f.dirRules.rules) == 0 { + return false + } + rule := f.dirRules.rules[0] + re := rule.Regexp.String() + if rule.Include == true && re == "^.*$" { + return false + } + return true +} + +type configContextKeyType struct{} + +// Context key for config +var configContextKey = configContextKeyType{} + +// GetConfig returns the global or context sensitive config +func GetConfig(ctx context.Context) *Filter { + if ctx == nil { + return globalConfig + } + c := ctx.Value(configContextKey) + if c == nil { + return globalConfig + } + return c.(*Filter) +} + +// AddConfig returns a mutable config structure based on a shallow +// copy of that found in ctx and returns a new context with that added +// to it. +func AddConfig(ctx context.Context) (context.Context, *Filter) { + c := GetConfig(ctx) + cCopy := new(Filter) + *cCopy = *c + newCtx := context.WithValue(ctx, configContextKey, cCopy) + return newCtx, cCopy +} + +// ReplaceConfig replaces the filter config in the ctx with the one +// passed in and returns a new context with that added to it. +func ReplaceConfig(ctx context.Context, f *Filter) context.Context { + newCtx := context.WithValue(ctx, configContextKey, f) + return newCtx +} diff --git a/vendor/github.com/rclone/rclone/fs/filter/glob.go b/vendor/github.com/rclone/rclone/fs/filter/glob.go new file mode 100644 index 00000000000..96a48d3d454 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/filter/glob.go @@ -0,0 +1,169 @@ +// rsync style glob parser + +package filter + +import ( + "bytes" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +// globToRegexp converts an rsync style glob to a regexp +// +// documented in filtering.md +func globToRegexp(glob string, ignoreCase bool) (*regexp.Regexp, error) { + var re bytes.Buffer + if ignoreCase { + _, _ = re.WriteString("(?i)") + } + if strings.HasPrefix(glob, "/") { + glob = glob[1:] + _, _ = re.WriteRune('^') + } else { + _, _ = re.WriteString("(^|/)") + } + consecutiveStars := 0 + insertStars := func() error { + if consecutiveStars > 0 { + switch consecutiveStars { + case 1: + _, _ = re.WriteString(`[^/]*`) + case 2: + _, _ = re.WriteString(`.*`) + default: + return errors.Errorf("too many stars in %q", glob) + } + } + consecutiveStars = 0 + return nil + } + inBraces := false + inBrackets := 0 + slashed := false + for _, c := range glob { + if slashed { + _, _ = re.WriteRune(c) + slashed = false + continue + } + if c != '*' { + err := insertStars() + if err != nil { + return nil, err + } + } + if inBrackets > 0 { + _, _ = re.WriteRune(c) + if c == '[' { + inBrackets++ + } + if c == ']' { + inBrackets-- + } + continue + } + switch c { + case '\\': + _, _ = re.WriteRune(c) + slashed = true + case '*': + consecutiveStars++ + case '?': + _, _ = re.WriteString(`[^/]`) + case '[': + _, _ = re.WriteRune(c) + inBrackets++ + case ']': + return nil, errors.Errorf("mismatched ']' in glob %q", glob) + case '{': + if inBraces { + return nil, errors.Errorf("can't nest '{' '}' in glob %q", glob) + } + inBraces = true + _, _ = re.WriteRune('(') + case '}': + if !inBraces { + return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob) + } + _, _ = re.WriteRune(')') + inBraces = false + case ',': + if inBraces { + _, _ = re.WriteRune('|') + } else { + _, _ = re.WriteRune(c) + } + case '.', '+', '(', ')', '|', '^', '$': // regexp meta characters not dealt with above + _, _ = re.WriteRune('\\') + _, _ = re.WriteRune(c) + default: + _, _ = re.WriteRune(c) + } + } + err := insertStars() + if err != nil { + return nil, err + } + if inBrackets > 0 { + return nil, errors.Errorf("mismatched '[' and ']' in glob %q", glob) + } + if inBraces { + return nil, errors.Errorf("mismatched '{' and '}' in glob %q", glob) + } + _, _ = re.WriteRune('$') + result, err := regexp.Compile(re.String()) + if err != nil { + return nil, errors.Wrapf(err, "bad glob pattern %q (regexp %q)", glob, re.String()) + } + return result, nil +} + +var ( + // Can't deal with / or ** in {} + tooHardRe = regexp.MustCompile(`{[^{}]*(\*\*|/)[^{}]*}`) + + // Squash all / + squashSlash = regexp.MustCompile(`/{2,}`) +) + +// globToDirGlobs takes a file glob and turns it into a series of +// directory globs. When matched with a directory (with a trailing /) +// this should answer the question as to whether this glob could be in +// this directory. +func globToDirGlobs(glob string) (out []string) { + if tooHardRe.MatchString(glob) { + // Can't figure this one out so return any directory might match + out = append(out, "/**") + return out + } + + // Get rid of multiple /s + glob = squashSlash.ReplaceAllString(glob, "/") + + // Split on / or ** + // (** can contain /) + for { + i := strings.LastIndex(glob, "/") + j := strings.LastIndex(glob, "**") + what := "" + if j > i { + i = j + what = "**" + } + if i < 0 { + if len(out) == 0 { + out = append(out, "/**") + } + break + } + glob = glob[:i] + newGlob := glob + what + "/" + if len(out) == 0 || out[len(out)-1] != newGlob { + out = append(out, newGlob) + } + } + + return out +} diff --git a/vendor/github.com/rclone/rclone/fs/fingerprint.go b/vendor/github.com/rclone/rclone/fs/fingerprint.go new file mode 100644 index 00000000000..75d3256db8e --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/fingerprint.go @@ -0,0 +1,54 @@ +package fs + +import ( + "context" + "fmt" + "strings" + + "github.com/rclone/rclone/fs/hash" +) + +// Fingerprint produces a unique-ish string for an object. +// +// This is for detecting whether an object has changed since we last +// saw it, not for checking object identity between two different +// remotes - operations.Equal should be used for that. +// +// If fast is set then Fingerprint will only include attributes where +// usually another operation is not required to fetch them. For +// example if fast is set then this won't include hashes on the local +// backend. +func Fingerprint(ctx context.Context, o ObjectInfo, fast bool) string { + var ( + out strings.Builder + f = o.Fs() + features = f.Features() + ) + fmt.Fprintf(&out, "%d", o.Size()) + // Whether we want to do a slow operation or not + // + // fast true false true false + // opIsSlow true true false false + // do Op false true true true + // + // If !fast (slow) do the operation or if !OpIsSlow == + // OpIsFast do the operation. + // + // Eg don't do this for S3 where modtimes are expensive + if !fast || !features.SlowModTime { + if f.Precision() != ModTimeNotSupported { + fmt.Fprintf(&out, ",%v", o.ModTime(ctx).UTC()) + } + } + // Eg don't do this for SFTP/local where hashes are expensive? + if !fast || !features.SlowHash { + hashType := f.Hashes().GetOne() + if hashType != hash.None { + hash, err := o.Hash(ctx, hashType) + if err == nil { + fmt.Fprintf(&out, ",%v", hash) + } + } + } + return out.String() +} diff --git a/vendor/github.com/rclone/rclone/fs/fs.go b/vendor/github.com/rclone/rclone/fs/fs.go new file mode 100644 index 00000000000..f91192261cc --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/fs.go @@ -0,0 +1,1512 @@ +// Package fs is a generic file system interface for rclone object storage systems +package fs + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "os" + "path/filepath" + "reflect" + "sort" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/config/configstruct" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fspath" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/lib/pacer" +) + +// EntryType can be associated with remote paths to identify their type +type EntryType int + +// Constants +const ( + // ModTimeNotSupported is a very large precision value to show + // mod time isn't supported on this Fs + ModTimeNotSupported = 100 * 365 * 24 * time.Hour + // MaxLevel is a sentinel representing an infinite depth for listings + MaxLevel = math.MaxInt32 + // EntryDirectory should be used to classify remote paths in directories + EntryDirectory EntryType = iota // 0 + // EntryObject should be used to classify remote paths in objects + EntryObject // 1 +) + +// Globals +var ( + // Filesystem registry + Registry []*RegInfo + // ErrorNotFoundInConfigFile is returned by NewFs if not found in config file + ErrorNotFoundInConfigFile = errors.New("didn't find section in config file") + ErrorCantPurge = errors.New("can't purge directory") + ErrorCantCopy = errors.New("can't copy object - incompatible remotes") + ErrorCantMove = errors.New("can't move object - incompatible remotes") + ErrorCantDirMove = errors.New("can't move directory - incompatible remotes") + ErrorCantUploadEmptyFiles = errors.New("can't upload empty files to this remote") + ErrorDirExists = errors.New("can't copy directory - destination already exists") + ErrorCantSetModTime = errors.New("can't set modified time") + ErrorCantSetModTimeWithoutDelete = errors.New("can't set modified time without deleting existing object") + ErrorDirNotFound = errors.New("directory not found") + ErrorObjectNotFound = errors.New("object not found") + ErrorLevelNotSupported = errors.New("level value not supported") + ErrorListAborted = errors.New("list aborted") + ErrorListBucketRequired = errors.New("bucket or container name is needed in remote") + ErrorIsFile = errors.New("is a file not a directory") + ErrorNotAFile = errors.New("is not a regular file") + ErrorNotDeleting = errors.New("not deleting files as there were IO errors") + ErrorNotDeletingDirs = errors.New("not deleting directories as there were IO errors") + ErrorOverlapping = errors.New("can't sync or move files on overlapping remotes") + ErrorDirectoryNotEmpty = errors.New("directory not empty") + ErrorImmutableModified = errors.New("immutable file modified") + ErrorPermissionDenied = errors.New("permission denied") + ErrorCantShareDirectories = errors.New("this backend can't share directories with link") + ErrorNotImplemented = errors.New("optional feature not implemented") + ErrorCommandNotFound = errors.New("command not found") + ErrorFileNameTooLong = errors.New("file name too long") +) + +// RegInfo provides information about a filesystem +type RegInfo struct { + // Name of this fs + Name string + // Description of this fs - defaults to Name + Description string + // Prefix for command line flags for this fs - defaults to Name if not set + Prefix string + // Create a new file system. If root refers to an existing + // object, then it should return an Fs which which points to + // the parent of that object and ErrorIsFile. + NewFs func(ctx context.Context, name string, root string, config configmap.Mapper) (Fs, error) `json:"-"` + // Function to call to help with config + Config func(ctx context.Context, name string, config configmap.Mapper) `json:"-"` + // Options for the Fs configuration + Options Options + // The command help, if any + CommandHelp []CommandHelp +} + +// FileName returns the on disk file name for this backend +func (ri *RegInfo) FileName() string { + return strings.Replace(ri.Name, " ", "", -1) +} + +// Options is a slice of configuration Option for a backend +type Options []Option + +// Set the default values for the options +func (os Options) setValues() { + for i := range os { + o := &os[i] + if o.Default == nil { + o.Default = "" + } + } +} + +// Get the Option corresponding to name or return nil if not found +func (os Options) Get(name string) *Option { + for i := range os { + opt := &os[i] + if opt.Name == name { + return opt + } + } + return nil +} + +// OptionVisibility controls whether the options are visible in the +// configurator or the command line. +type OptionVisibility byte + +// Constants Option.Hide +const ( + OptionHideCommandLine OptionVisibility = 1 << iota + OptionHideConfigurator + OptionHideBoth = OptionHideCommandLine | OptionHideConfigurator +) + +// Option is describes an option for the config wizard +// +// This also describes command line options and environment variables +type Option struct { + Name string // name of the option in snake_case + Help string // Help, the first line only is used for the command line help + Provider string // Set to filter on provider + Default interface{} // default value, nil => "" + Value interface{} // value to be set by flags + Examples OptionExamples `json:",omitempty"` // config examples + ShortOpt string // the short option for this if required + Hide OptionVisibility // set this to hide the config from the configurator or the command line + Required bool // this option is required + IsPassword bool // set if the option is a password + NoPrefix bool // set if the option for this should not use the backend prefix + Advanced bool // set if this is an advanced config option +} + +// BaseOption is an alias for Option used internally +type BaseOption Option + +// MarshalJSON turns an Option into JSON +// +// It adds some generated fields for ease of use +// - DefaultStr - a string rendering of Default +// - ValueStr - a string rendering of Value +// - Type - the type of the option +func (o *Option) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + BaseOption + DefaultStr string + ValueStr string + Type string + }{ + BaseOption: BaseOption(*o), + DefaultStr: fmt.Sprint(o.Default), + ValueStr: o.String(), + Type: o.Type(), + }) +} + +// GetValue gets the current current value which is the default if not set +func (o *Option) GetValue() interface{} { + val := o.Value + if val == nil { + val = o.Default + if val == nil { + val = "" + } + } + return val +} + +// String turns Option into a string +func (o *Option) String() string { + return fmt.Sprint(o.GetValue()) +} + +// Set an Option from a string +func (o *Option) Set(s string) (err error) { + newValue, err := configstruct.StringToInterface(o.GetValue(), s) + if err != nil { + return err + } + o.Value = newValue + return nil +} + +// Type of the value +func (o *Option) Type() string { + return reflect.TypeOf(o.GetValue()).Name() +} + +// FlagName for the option +func (o *Option) FlagName(prefix string) string { + name := strings.Replace(o.Name, "_", "-", -1) // convert snake_case to kebab-case + if !o.NoPrefix { + name = prefix + "-" + name + } + return name +} + +// EnvVarName for the option +func (o *Option) EnvVarName(prefix string) string { + return OptionToEnv(prefix + "-" + o.Name) +} + +// OptionExamples is a slice of examples +type OptionExamples []OptionExample + +// Len is part of sort.Interface. +func (os OptionExamples) Len() int { return len(os) } + +// Swap is part of sort.Interface. +func (os OptionExamples) Swap(i, j int) { os[i], os[j] = os[j], os[i] } + +// Less is part of sort.Interface. +func (os OptionExamples) Less(i, j int) bool { return os[i].Help < os[j].Help } + +// Sort sorts an OptionExamples +func (os OptionExamples) Sort() { sort.Sort(os) } + +// OptionExample describes an example for an Option +type OptionExample struct { + Value string + Help string + Provider string +} + +// Register a filesystem +// +// Fs modules should use this in an init() function +func Register(info *RegInfo) { + info.Options.setValues() + if info.Prefix == "" { + info.Prefix = info.Name + } + Registry = append(Registry, info) +} + +// Fs is the interface a cloud storage system must provide +type Fs interface { + Info + + // List the objects and directories in dir into entries. The + // entries can be returned in any order but should be for a + // complete directory. + // + // dir should be "" to list the root, and should not have + // trailing slashes. + // + // This should return ErrDirNotFound if the directory isn't + // found. + List(ctx context.Context, dir string) (entries DirEntries, err error) + + // NewObject finds the Object at remote. If it can't be found + // it returns the error ErrorObjectNotFound. + NewObject(ctx context.Context, remote string) (Object, error) + + // Put in to the remote path with the modTime given of the given size + // + // When called from outside an Fs by rclone, src.Size() will always be >= 0. + // But for unknown-sized objects (indicated by src.Size() == -1), Put should either + // return an error or upload it properly (rather than e.g. calling panic). + // + // May create the object even if it returns an error - if so + // will return the object and the error, otherwise will return + // nil and the error + Put(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) + + // Mkdir makes the directory (container, bucket) + // + // Shouldn't return an error if it already exists + Mkdir(ctx context.Context, dir string) error + + // Rmdir removes the directory (container, bucket) if empty + // + // Return an error if it doesn't exist or isn't empty + Rmdir(ctx context.Context, dir string) error +} + +// Info provides a read only interface to information about a filesystem. +type Info interface { + // Name of the remote (as passed into NewFs) + Name() string + + // Root of the remote (as passed into NewFs) + Root() string + + // String returns a description of the FS + String() string + + // Precision of the ModTimes in this Fs + Precision() time.Duration + + // Returns the supported hash types of the filesystem + Hashes() hash.Set + + // Features returns the optional features of this Fs + Features() *Features +} + +// Object is a filesystem like object provided by an Fs +type Object interface { + ObjectInfo + + // SetModTime sets the metadata on the object to set the modification date + SetModTime(ctx context.Context, t time.Time) error + + // Open opens the file for read. Call Close() on the returned io.ReadCloser + Open(ctx context.Context, options ...OpenOption) (io.ReadCloser, error) + + // Update in to the object with the modTime given of the given size + // + // When called from outside an Fs by rclone, src.Size() will always be >= 0. + // But for unknown-sized objects (indicated by src.Size() == -1), Upload should either + // return an error or update the object properly (rather than e.g. calling panic). + Update(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) error + + // Removes this object + Remove(ctx context.Context) error +} + +// ObjectInfo provides read only information about an object. +type ObjectInfo interface { + DirEntry + + // Fs returns read only access to the Fs that this object is part of + Fs() Info + + // Hash returns the selected checksum of the file + // If no checksum is available it returns "" + Hash(ctx context.Context, ty hash.Type) (string, error) + + // Storable says whether this object can be stored + Storable() bool +} + +// DirEntry provides read only information about the common subset of +// a Dir or Object. These are returned from directory listings - type +// assert them into the correct type. +type DirEntry interface { + // String returns a description of the Object + String() string + + // Remote returns the remote path + Remote() string + + // ModTime returns the modification date of the file + // It should return a best guess if one isn't available + ModTime(context.Context) time.Time + + // Size returns the size of the file + Size() int64 +} + +// Directory is a filesystem like directory provided by an Fs +type Directory interface { + DirEntry + + // Items returns the count of items in this directory or this + // directory and subdirectories if known, -1 for unknown + Items() int64 + + // ID returns the internal ID of this directory if known, or + // "" otherwise + ID() string +} + +// MimeTyper is an optional interface for Object +type MimeTyper interface { + // MimeType returns the content type of the Object if + // known, or "" if not + MimeType(ctx context.Context) string +} + +// IDer is an optional interface for Object +type IDer interface { + // ID returns the ID of the Object if known, or "" if not + ID() string +} + +// ObjectUnWrapper is an optional interface for Object +type ObjectUnWrapper interface { + // UnWrap returns the Object that this Object is wrapping or + // nil if it isn't wrapping anything + UnWrap() Object +} + +// SetTierer is an optional interface for Object +type SetTierer interface { + // SetTier performs changing storage tier of the Object if + // multiple storage classes supported + SetTier(tier string) error +} + +// GetTierer is an optional interface for Object +type GetTierer interface { + // GetTier returns storage tier or class of the Object + GetTier() string +} + +// FullObjectInfo contains all the read-only optional interfaces +// +// Use for checking making wrapping ObjectInfos implement everything +type FullObjectInfo interface { + ObjectInfo + MimeTyper + IDer + ObjectUnWrapper + GetTierer +} + +// FullObject contains all the optional interfaces for Object +// +// Use for checking making wrapping Objects implement everything +type FullObject interface { + Object + MimeTyper + IDer + ObjectUnWrapper + GetTierer + SetTierer +} + +// ObjectOptionalInterfaces returns the names of supported and +// unsupported optional interfaces for an Object +func ObjectOptionalInterfaces(o Object) (supported, unsupported []string) { + store := func(ok bool, name string) { + if ok { + supported = append(supported, name) + } else { + unsupported = append(unsupported, name) + } + } + + _, ok := o.(MimeTyper) + store(ok, "MimeType") + + _, ok = o.(IDer) + store(ok, "ID") + + _, ok = o.(ObjectUnWrapper) + store(ok, "UnWrap") + + _, ok = o.(SetTierer) + store(ok, "SetTier") + + _, ok = o.(GetTierer) + store(ok, "GetTier") + + return supported, unsupported +} + +// ListRCallback defines a callback function for ListR to use +// +// It is called for each tranche of entries read from the listing and +// if it returns an error, the listing stops. +type ListRCallback func(entries DirEntries) error + +// ListRFn is defines the call used to recursively list a directory +type ListRFn func(ctx context.Context, dir string, callback ListRCallback) error + +// NewUsageValue makes a valid value +func NewUsageValue(value int64) *int64 { + p := new(int64) + *p = value + return p +} + +// Usage is returned by the About call +// +// If a value is nil then it isn't supported by that backend +type Usage struct { + Total *int64 `json:"total,omitempty"` // quota of bytes that can be used + Used *int64 `json:"used,omitempty"` // bytes in use + Trashed *int64 `json:"trashed,omitempty"` // bytes in trash + Other *int64 `json:"other,omitempty"` // other usage e.g. gmail in drive + Free *int64 `json:"free,omitempty"` // bytes which can be uploaded before reaching the quota + Objects *int64 `json:"objects,omitempty"` // objects in the storage system +} + +// WriterAtCloser wraps io.WriterAt and io.Closer +type WriterAtCloser interface { + io.WriterAt + io.Closer +} + +// Features describe the optional features of the Fs +type Features struct { + // Feature flags, whether Fs + CaseInsensitive bool // has case insensitive files + DuplicateFiles bool // allows duplicate files + ReadMimeType bool // can read the mime type of objects + WriteMimeType bool // can set the mime type of objects + CanHaveEmptyDirectories bool // can have empty directories + BucketBased bool // is bucket based (like s3, swift, etc.) + BucketBasedRootOK bool // is bucket based and can use from root + SetTier bool // allows set tier functionality on objects + GetTier bool // allows to retrieve storage tier of objects + ServerSideAcrossConfigs bool // can server-side copy between different remotes of the same type + IsLocal bool // is the local backend + SlowModTime bool // if calling ModTime() generally takes an extra transaction + SlowHash bool // if calling Hash() generally takes an extra transaction + + // Purge all files in the directory specified + // + // Implement this if you have a way of deleting all the files + // quicker than just running Remove() on the result of List() + // + // Return an error if it doesn't exist + Purge func(ctx context.Context, dir string) error + + // Copy src to this remote using server-side copy operations. + // + // This is stored with the remote path given + // + // It returns the destination Object and a possible error + // + // Will only be called if src.Fs().Name() == f.Name() + // + // If it isn't possible then return fs.ErrorCantCopy + Copy func(ctx context.Context, src Object, remote string) (Object, error) + + // Move src to this remote using server-side move operations. + // + // This is stored with the remote path given + // + // It returns the destination Object and a possible error + // + // Will only be called if src.Fs().Name() == f.Name() + // + // If it isn't possible then return fs.ErrorCantMove + Move func(ctx context.Context, src Object, remote string) (Object, error) + + // DirMove moves src, srcRemote to this remote at dstRemote + // using server-side move operations. + // + // Will only be called if src.Fs().Name() == f.Name() + // + // If it isn't possible then return fs.ErrorCantDirMove + // + // If destination exists then return fs.ErrorDirExists + DirMove func(ctx context.Context, src Fs, srcRemote, dstRemote string) error + + // ChangeNotify calls the passed function with a path + // that has had changes. If the implementation + // uses polling, it should adhere to the given interval. + ChangeNotify func(context.Context, func(string, EntryType), <-chan time.Duration) + + // UnWrap returns the Fs that this Fs is wrapping + UnWrap func() Fs + + // WrapFs returns the Fs that is wrapping this Fs + WrapFs func() Fs + + // SetWrapper sets the Fs that is wrapping this Fs + SetWrapper func(f Fs) + + // DirCacheFlush resets the directory cache - used in testing + // as an optional interface + DirCacheFlush func() + + // PublicLink generates a public link to the remote path (usually readable by anyone) + PublicLink func(ctx context.Context, remote string, expire Duration, unlink bool) (string, error) + + // Put in to the remote path with the modTime given of the given size + // + // May create the object even if it returns an error - if so + // will return the object and the error, otherwise will return + // nil and the error + // + // May create duplicates or return errors if src already + // exists. + PutUnchecked func(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) + + // PutStream uploads to the remote path with the modTime given of indeterminate size + // + // May create the object even if it returns an error - if so + // will return the object and the error, otherwise will return + // nil and the error + PutStream func(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) + + // MergeDirs merges the contents of all the directories passed + // in into the first one and rmdirs the other directories. + MergeDirs func(ctx context.Context, dirs []Directory) error + + // CleanUp the trash in the Fs + // + // Implement this if you have a way of emptying the trash or + // otherwise cleaning up old versions of files. + CleanUp func(ctx context.Context) error + + // ListR lists the objects and directories of the Fs starting + // from dir recursively into out. + // + // dir should be "" to start from the root, and should not + // have trailing slashes. + // + // This should return ErrDirNotFound if the directory isn't + // found. + // + // It should call callback for each tranche of entries read. + // These need not be returned in any particular order. If + // callback returns an error then the listing will stop + // immediately. + // + // Don't implement this unless you have a more efficient way + // of listing recursively that doing a directory traversal. + ListR ListRFn + + // About gets quota information from the Fs + About func(ctx context.Context) (*Usage, error) + + // OpenWriterAt opens with a handle for random access writes + // + // Pass in the remote desired and the size if known. + // + // It truncates any existing object + OpenWriterAt func(ctx context.Context, remote string, size int64) (WriterAtCloser, error) + + // UserInfo returns info about the connected user + UserInfo func(ctx context.Context) (map[string]string, error) + + // Disconnect the current user + Disconnect func(ctx context.Context) error + + // Command the backend to run a named command + // + // The command run is name + // args may be used to read arguments from + // opts may be used to read optional arguments from + // + // The result should be capable of being JSON encoded + // If it is a string or a []string it will be shown to the user + // otherwise it will be JSON encoded and shown to the user like that + Command func(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) + + // Shutdown the backend, closing any background tasks and any + // cached connections. + Shutdown func(ctx context.Context) error +} + +// Disable nil's out the named feature. If it isn't found then it +// will log a message. +func (ft *Features) Disable(name string) *Features { + v := reflect.ValueOf(ft).Elem() + vType := v.Type() + for i := 0; i < v.NumField(); i++ { + vName := vType.Field(i).Name + field := v.Field(i) + if strings.EqualFold(name, vName) { + if !field.CanSet() { + Errorf(nil, "Can't set Feature %q", name) + } else { + zero := reflect.Zero(field.Type()) + field.Set(zero) + Debugf(nil, "Reset feature %q", name) + } + } + } + return ft +} + +// List returns a slice of all the possible feature names +func (ft *Features) List() (out []string) { + v := reflect.ValueOf(ft).Elem() + vType := v.Type() + for i := 0; i < v.NumField(); i++ { + out = append(out, vType.Field(i).Name) + } + return out +} + +// Enabled returns a map of features with keys showing whether they +// are enabled or not +func (ft *Features) Enabled() (features map[string]bool) { + v := reflect.ValueOf(ft).Elem() + vType := v.Type() + features = make(map[string]bool, v.NumField()) + for i := 0; i < v.NumField(); i++ { + vName := vType.Field(i).Name + field := v.Field(i) + if field.Kind() == reflect.Func { + // Can't compare functions + features[vName] = !field.IsNil() + } else { + zero := reflect.Zero(field.Type()) + features[vName] = field.Interface() != zero.Interface() + } + } + return features +} + +// DisableList nil's out the comma separated list of named features. +// If it isn't found then it will log a message. +func (ft *Features) DisableList(list []string) *Features { + for _, feature := range list { + ft.Disable(strings.TrimSpace(feature)) + } + return ft +} + +// Fill fills in the function pointers in the Features struct from the +// optional interfaces. It returns the original updated Features +// struct passed in. +func (ft *Features) Fill(ctx context.Context, f Fs) *Features { + if do, ok := f.(Purger); ok { + ft.Purge = do.Purge + } + if do, ok := f.(Copier); ok { + ft.Copy = do.Copy + } + if do, ok := f.(Mover); ok { + ft.Move = do.Move + } + if do, ok := f.(DirMover); ok { + ft.DirMove = do.DirMove + } + if do, ok := f.(ChangeNotifier); ok { + ft.ChangeNotify = do.ChangeNotify + } + if do, ok := f.(UnWrapper); ok { + ft.UnWrap = do.UnWrap + } + if do, ok := f.(Wrapper); ok { + ft.WrapFs = do.WrapFs + ft.SetWrapper = do.SetWrapper + } + if do, ok := f.(DirCacheFlusher); ok { + ft.DirCacheFlush = do.DirCacheFlush + } + if do, ok := f.(PublicLinker); ok { + ft.PublicLink = do.PublicLink + } + if do, ok := f.(PutUncheckeder); ok { + ft.PutUnchecked = do.PutUnchecked + } + if do, ok := f.(PutStreamer); ok { + ft.PutStream = do.PutStream + } + if do, ok := f.(MergeDirser); ok { + ft.MergeDirs = do.MergeDirs + } + if do, ok := f.(CleanUpper); ok { + ft.CleanUp = do.CleanUp + } + if do, ok := f.(ListRer); ok { + ft.ListR = do.ListR + } + if do, ok := f.(Abouter); ok { + ft.About = do.About + } + if do, ok := f.(OpenWriterAter); ok { + ft.OpenWriterAt = do.OpenWriterAt + } + if do, ok := f.(UserInfoer); ok { + ft.UserInfo = do.UserInfo + } + if do, ok := f.(Disconnecter); ok { + ft.Disconnect = do.Disconnect + } + if do, ok := f.(Commander); ok { + ft.Command = do.Command + } + if do, ok := f.(Shutdowner); ok { + ft.Shutdown = do.Shutdown + } + return ft.DisableList(GetConfig(ctx).DisableFeatures) +} + +// Mask the Features with the Fs passed in +// +// Only optional features which are implemented in both the original +// Fs AND the one passed in will be advertised. Any features which +// aren't in both will be set to false/nil, except for UnWrap/Wrap which +// will be left untouched. +func (ft *Features) Mask(ctx context.Context, f Fs) *Features { + mask := f.Features() + ft.CaseInsensitive = ft.CaseInsensitive && mask.CaseInsensitive + ft.DuplicateFiles = ft.DuplicateFiles && mask.DuplicateFiles + ft.ReadMimeType = ft.ReadMimeType && mask.ReadMimeType + ft.WriteMimeType = ft.WriteMimeType && mask.WriteMimeType + ft.CanHaveEmptyDirectories = ft.CanHaveEmptyDirectories && mask.CanHaveEmptyDirectories + ft.BucketBased = ft.BucketBased && mask.BucketBased + ft.BucketBasedRootOK = ft.BucketBasedRootOK && mask.BucketBasedRootOK + ft.SetTier = ft.SetTier && mask.SetTier + ft.GetTier = ft.GetTier && mask.GetTier + ft.ServerSideAcrossConfigs = ft.ServerSideAcrossConfigs && mask.ServerSideAcrossConfigs + // ft.IsLocal = ft.IsLocal && mask.IsLocal Don't propagate IsLocal + ft.SlowModTime = ft.SlowModTime && mask.SlowModTime + ft.SlowHash = ft.SlowHash && mask.SlowHash + + if mask.Purge == nil { + ft.Purge = nil + } + if mask.Copy == nil { + ft.Copy = nil + } + if mask.Move == nil { + ft.Move = nil + } + if mask.DirMove == nil { + ft.DirMove = nil + } + if mask.ChangeNotify == nil { + ft.ChangeNotify = nil + } + // if mask.UnWrap == nil { + // ft.UnWrap = nil + // } + // if mask.Wrapper == nil { + // ft.Wrapper = nil + // } + if mask.DirCacheFlush == nil { + ft.DirCacheFlush = nil + } + if mask.PublicLink == nil { + ft.PublicLink = nil + } + if mask.PutUnchecked == nil { + ft.PutUnchecked = nil + } + if mask.PutStream == nil { + ft.PutStream = nil + } + if mask.MergeDirs == nil { + ft.MergeDirs = nil + } + if mask.CleanUp == nil { + ft.CleanUp = nil + } + if mask.ListR == nil { + ft.ListR = nil + } + if mask.About == nil { + ft.About = nil + } + if mask.OpenWriterAt == nil { + ft.OpenWriterAt = nil + } + if mask.UserInfo == nil { + ft.UserInfo = nil + } + if mask.Disconnect == nil { + ft.Disconnect = nil + } + // Command is always local so we don't mask it + if mask.Shutdown == nil { + ft.Shutdown = nil + } + return ft.DisableList(GetConfig(ctx).DisableFeatures) +} + +// Wrap makes a Copy of the features passed in, overriding the UnWrap/Wrap +// method only if available in f. +func (ft *Features) Wrap(f Fs) *Features { + ftCopy := new(Features) + *ftCopy = *ft + if do, ok := f.(UnWrapper); ok { + ftCopy.UnWrap = do.UnWrap + } + if do, ok := f.(Wrapper); ok { + ftCopy.WrapFs = do.WrapFs + ftCopy.SetWrapper = do.SetWrapper + } + return ftCopy +} + +// WrapsFs adds extra information between `f` which wraps `w` +func (ft *Features) WrapsFs(f Fs, w Fs) *Features { + wFeatures := w.Features() + if wFeatures.WrapFs != nil && wFeatures.SetWrapper != nil { + wFeatures.SetWrapper(f) + } + return ft +} + +// Purger is an optional interfaces for Fs +type Purger interface { + // Purge all files in the directory specified + // + // Implement this if you have a way of deleting all the files + // quicker than just running Remove() on the result of List() + // + // Return an error if it doesn't exist + Purge(ctx context.Context, dir string) error +} + +// Copier is an optional interface for Fs +type Copier interface { + // Copy src to this remote using server-side copy operations. + // + // This is stored with the remote path given + // + // It returns the destination Object and a possible error + // + // Will only be called if src.Fs().Name() == f.Name() + // + // If it isn't possible then return fs.ErrorCantCopy + Copy(ctx context.Context, src Object, remote string) (Object, error) +} + +// Mover is an optional interface for Fs +type Mover interface { + // Move src to this remote using server-side move operations. + // + // This is stored with the remote path given + // + // It returns the destination Object and a possible error + // + // Will only be called if src.Fs().Name() == f.Name() + // + // If it isn't possible then return fs.ErrorCantMove + Move(ctx context.Context, src Object, remote string) (Object, error) +} + +// DirMover is an optional interface for Fs +type DirMover interface { + // DirMove moves src, srcRemote to this remote at dstRemote + // using server-side move operations. + // + // Will only be called if src.Fs().Name() == f.Name() + // + // If it isn't possible then return fs.ErrorCantDirMove + // + // If destination exists then return fs.ErrorDirExists + DirMove(ctx context.Context, src Fs, srcRemote, dstRemote string) error +} + +// ChangeNotifier is an optional interface for Fs +type ChangeNotifier interface { + // ChangeNotify calls the passed function with a path + // that has had changes. If the implementation + // uses polling, it should adhere to the given interval. + // At least one value will be written to the channel, + // specifying the initial value and updated values might + // follow. A 0 Duration should pause the polling. + // The ChangeNotify implementation must empty the channel + // regularly. When the channel gets closed, the implementation + // should stop polling and release resources. + ChangeNotify(context.Context, func(string, EntryType), <-chan time.Duration) +} + +// UnWrapper is an optional interfaces for Fs +type UnWrapper interface { + // UnWrap returns the Fs that this Fs is wrapping + UnWrap() Fs +} + +// Wrapper is an optional interfaces for Fs +type Wrapper interface { + // Wrap returns the Fs that is wrapping this Fs + WrapFs() Fs + // SetWrapper sets the Fs that is wrapping this Fs + SetWrapper(f Fs) +} + +// DirCacheFlusher is an optional interface for Fs +type DirCacheFlusher interface { + // DirCacheFlush resets the directory cache - used in testing + // as an optional interface + DirCacheFlush() +} + +// PutUncheckeder is an optional interface for Fs +type PutUncheckeder interface { + // Put in to the remote path with the modTime given of the given size + // + // May create the object even if it returns an error - if so + // will return the object and the error, otherwise will return + // nil and the error + // + // May create duplicates or return errors if src already + // exists. + PutUnchecked(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) +} + +// PutStreamer is an optional interface for Fs +type PutStreamer interface { + // PutStream uploads to the remote path with the modTime given of indeterminate size + // + // May create the object even if it returns an error - if so + // will return the object and the error, otherwise will return + // nil and the error + PutStream(ctx context.Context, in io.Reader, src ObjectInfo, options ...OpenOption) (Object, error) +} + +// PublicLinker is an optional interface for Fs +type PublicLinker interface { + // PublicLink generates a public link to the remote path (usually readable by anyone) + PublicLink(ctx context.Context, remote string, expire Duration, unlink bool) (string, error) +} + +// MergeDirser is an option interface for Fs +type MergeDirser interface { + // MergeDirs merges the contents of all the directories passed + // in into the first one and rmdirs the other directories. + MergeDirs(ctx context.Context, dirs []Directory) error +} + +// CleanUpper is an optional interfaces for Fs +type CleanUpper interface { + // CleanUp the trash in the Fs + // + // Implement this if you have a way of emptying the trash or + // otherwise cleaning up old versions of files. + CleanUp(ctx context.Context) error +} + +// ListRer is an optional interfaces for Fs +type ListRer interface { + // ListR lists the objects and directories of the Fs starting + // from dir recursively into out. + // + // dir should be "" to start from the root, and should not + // have trailing slashes. + // + // This should return ErrDirNotFound if the directory isn't + // found. + // + // It should call callback for each tranche of entries read. + // These need not be returned in any particular order. If + // callback returns an error then the listing will stop + // immediately. + // + // Don't implement this unless you have a more efficient way + // of listing recursively that doing a directory traversal. + ListR(ctx context.Context, dir string, callback ListRCallback) error +} + +// RangeSeeker is the interface that wraps the RangeSeek method. +// +// Some of the returns from Object.Open() may optionally implement +// this method for efficiency purposes. +type RangeSeeker interface { + // RangeSeek behaves like a call to Seek(offset int64, whence + // int) with the output wrapped in an io.LimitedReader + // limiting the total length to limit. + // + // RangeSeek with a limit of < 0 is equivalent to a regular Seek. + RangeSeek(ctx context.Context, offset int64, whence int, length int64) (int64, error) +} + +// Abouter is an optional interface for Fs +type Abouter interface { + // About gets quota information from the Fs + About(ctx context.Context) (*Usage, error) +} + +// OpenWriterAter is an optional interface for Fs +type OpenWriterAter interface { + // OpenWriterAt opens with a handle for random access writes + // + // Pass in the remote desired and the size if known. + // + // It truncates any existing object + OpenWriterAt(ctx context.Context, remote string, size int64) (WriterAtCloser, error) +} + +// UserInfoer is an optional interface for Fs +type UserInfoer interface { + // UserInfo returns info about the connected user + UserInfo(ctx context.Context) (map[string]string, error) +} + +// Disconnecter is an optional interface for Fs +type Disconnecter interface { + // Disconnect the current user + Disconnect(ctx context.Context) error +} + +// CommandHelp describes a single backend Command +// +// These are automatically inserted in the docs +type CommandHelp struct { + Name string // Name of the command, e.g. "link" + Short string // Single line description + Long string // Long multi-line description + Opts map[string]string // maps option name to a single line help +} + +// Commander is an interface to wrap the Command function +type Commander interface { + // Command the backend to run a named command + // + // The command run is name + // args may be used to read arguments from + // opts may be used to read optional arguments from + // + // The result should be capable of being JSON encoded + // If it is a string or a []string it will be shown to the user + // otherwise it will be JSON encoded and shown to the user like that + Command(ctx context.Context, name string, arg []string, opt map[string]string) (interface{}, error) +} + +// Shutdowner is an interface to wrap the Shutdown function +type Shutdowner interface { + // Shutdown the backend, closing any background tasks and any + // cached connections. + Shutdown(ctx context.Context) error +} + +// ObjectsChan is a channel of Objects +type ObjectsChan chan Object + +// Objects is a slice of Object~s +type Objects []Object + +// ObjectPair is a pair of Objects used to describe a potential copy +// operation. +type ObjectPair struct { + Src, Dst Object + Name string +} + +// UnWrapFs unwraps f as much as possible and returns the base Fs +func UnWrapFs(f Fs) Fs { + for { + unwrap := f.Features().UnWrap + if unwrap == nil { + break // not a wrapped Fs, use current + } + next := unwrap() + if next == nil { + break // no base Fs found, use current + } + f = next + } + return f +} + +// UnWrapObject unwraps o as much as possible and returns the base object +func UnWrapObject(o Object) Object { + for { + u, ok := o.(ObjectUnWrapper) + if !ok { + break // not a wrapped object, use current + } + next := u.UnWrap() + if next == nil { + break // no base object found, use current + } + o = next + } + return o +} + +// UnWrapObjectInfo returns the underlying Object unwrapped as much as +// possible or nil. +func UnWrapObjectInfo(oi ObjectInfo) Object { + o, ok := oi.(Object) + if !ok { + return nil + } + return UnWrapObject(o) +} + +// Find looks for a RegInfo object for the name passed in. The name +// can be either the Name or the Prefix. +// +// Services are looked up in the config file +func Find(name string) (*RegInfo, error) { + for _, item := range Registry { + if item.Name == name || item.Prefix == name || item.FileName() == name { + return item, nil + } + } + return nil, errors.Errorf("didn't find backend called %q", name) +} + +// MustFind looks for an Info object for the type name passed in +// +// Services are looked up in the config file +// +// Exits with a fatal error if not found +func MustFind(name string) *RegInfo { + fs, err := Find(name) + if err != nil { + log.Fatalf("Failed to find remote: %v", err) + } + return fs +} + +// ParseRemote deconstructs a path into configName, fsPath, looking up +// the fsName in the config file (returning NotFoundInConfigFile if not found) +func ParseRemote(path string) (fsInfo *RegInfo, configName, fsPath string, err error) { + configName, fsPath, err = fspath.Parse(path) + if err != nil { + return nil, "", "", err + } + var fsName string + var ok bool + if configName != "" { + if strings.HasPrefix(configName, ":") { + fsName = configName[1:] + } else { + m := ConfigMap(nil, configName) + fsName, ok = m.Get("type") + if !ok { + return nil, "", "", ErrorNotFoundInConfigFile + } + } + } else { + fsName = "local" + configName = "local" + } + fsInfo, err = Find(fsName) + return fsInfo, configName, fsPath, err +} + +// A configmap.Getter to read from the environment RCLONE_CONFIG_backend_option_name +type configEnvVars string + +// Get a config item from the environment variables if possible +func (configName configEnvVars) Get(key string) (value string, ok bool) { + return os.LookupEnv(ConfigToEnv(string(configName), key)) +} + +// A configmap.Getter to read from the environment RCLONE_option_name +type optionEnvVars struct { + fsInfo *RegInfo +} + +// Get a config item from the option environment variables if possible +func (oev optionEnvVars) Get(key string) (value string, ok bool) { + opt := oev.fsInfo.Options.Get(key) + if opt == nil { + return "", false + } + // For options with NoPrefix set, check without prefix too + if opt.NoPrefix { + value, ok = os.LookupEnv(OptionToEnv(key)) + if ok { + return value, ok + } + } + return os.LookupEnv(OptionToEnv(oev.fsInfo.Prefix + "-" + key)) +} + +// A configmap.Getter to read either the default value or the set +// value from the RegInfo.Options +type regInfoValues struct { + fsInfo *RegInfo + useDefault bool +} + +// override the values in configMap with the either the flag values or +// the default values +func (r *regInfoValues) Get(key string) (value string, ok bool) { + opt := r.fsInfo.Options.Get(key) + if opt != nil && (r.useDefault || opt.Value != nil) { + return opt.String(), true + } + return "", false +} + +// A configmap.Setter to read from the config file +type setConfigFile string + +// Set a config item into the config file +func (section setConfigFile) Set(key, value string) { + Debugf(nil, "Saving config %q = %q in section %q of the config file", key, value, section) + err := ConfigFileSet(string(section), key, value) + if err != nil { + Errorf(nil, "Failed saving config %q = %q in section %q of the config file: %v", key, value, section, err) + } +} + +// A configmap.Getter to read from the config file +type getConfigFile string + +// Get a config item from the config file +func (section getConfigFile) Get(key string) (value string, ok bool) { + value, ok = ConfigFileGet(string(section), key) + // Ignore empty lines in the config file + if value == "" { + ok = false + } + return value, ok +} + +// ConfigMap creates a configmap.Map from the *RegInfo and the +// configName passed in. +// +// If fsInfo is nil then the returned configmap.Map should only be +// used for reading non backend specific parameters, such as "type". +func ConfigMap(fsInfo *RegInfo, configName string) (config *configmap.Map) { + // Create the config + config = configmap.New() + + // Read the config, more specific to least specific + + // flag values + if fsInfo != nil { + config.AddGetter(®InfoValues{fsInfo, false}) + } + + // remote specific environment vars + config.AddGetter(configEnvVars(configName)) + + // backend specific environment vars + if fsInfo != nil { + config.AddGetter(optionEnvVars{fsInfo: fsInfo}) + } + + // config file + config.AddGetter(getConfigFile(configName)) + + // default values + if fsInfo != nil { + config.AddGetter(®InfoValues{fsInfo, true}) + } + + // Set Config + config.AddSetter(setConfigFile(configName)) + return config +} + +// ConfigFs makes the config for calling NewFs with. +// +// It parses the path which is of the form remote:path +// +// Remotes are looked up in the config file. If the remote isn't +// found then NotFoundInConfigFile will be returned. +func ConfigFs(path string) (fsInfo *RegInfo, configName, fsPath string, config *configmap.Map, err error) { + // Parse the remote path + fsInfo, configName, fsPath, err = ParseRemote(path) + if err != nil { + return + } + config = ConfigMap(fsInfo, configName) + return +} + +// NewFs makes a new Fs object from the path +// +// The path is of the form remote:path +// +// Remotes are looked up in the config file. If the remote isn't +// found then NotFoundInConfigFile will be returned. +// +// On Windows avoid single character remote names as they can be mixed +// up with drive letters. +func NewFs(ctx context.Context, path string) (Fs, error) { + Debugf(nil, "Creating backend with remote %q", path) + fsInfo, configName, fsPath, config, err := ConfigFs(path) + if err != nil { + return nil, err + } + return fsInfo.NewFs(ctx, configName, fsPath, config) +} + +// ConfigString returns a canonical version of the config string used +// to configure the Fs as passed to fs.NewFs +func ConfigString(f Fs) string { + name := f.Name() + root := f.Root() + if name == "local" && f.Features().IsLocal { + return root + } + return name + ":" + root +} + +// TemporaryLocalFs creates a local FS in the OS's temporary directory. +// +// No cleanup is performed, the caller must call Purge on the Fs themselves. +func TemporaryLocalFs(ctx context.Context) (Fs, error) { + path, err := ioutil.TempDir("", "rclone-spool") + if err == nil { + err = os.Remove(path) + } + if err != nil { + return nil, err + } + path = filepath.ToSlash(path) + return NewFs(ctx, path) +} + +// CheckClose is a utility function used to check the return from +// Close in a defer statement. +func CheckClose(c io.Closer, err *error) { + cerr := c.Close() + if *err == nil { + *err = cerr + } +} + +// FileExists returns true if a file remote exists. +// If remote is a directory, FileExists returns false. +func FileExists(ctx context.Context, fs Fs, remote string) (bool, error) { + _, err := fs.NewObject(ctx, remote) + if err != nil { + if err == ErrorObjectNotFound || err == ErrorNotAFile || err == ErrorPermissionDenied { + return false, nil + } + return false, err + } + return true, nil +} + +// GetModifyWindow calculates the maximum modify window between the given Fses +// and the Config.ModifyWindow parameter. +func GetModifyWindow(ctx context.Context, fss ...Info) time.Duration { + window := GetConfig(ctx).ModifyWindow + for _, f := range fss { + if f != nil { + precision := f.Precision() + if precision == ModTimeNotSupported { + return ModTimeNotSupported + } + if precision > window { + window = precision + } + } + } + return window +} + +// Pacer is a simple wrapper around a pacer.Pacer with logging. +type Pacer struct { + *pacer.Pacer +} + +type logCalculator struct { + pacer.Calculator +} + +// NewPacer creates a Pacer for the given Fs and Calculator. +func NewPacer(ctx context.Context, c pacer.Calculator) *Pacer { + p := &Pacer{ + Pacer: pacer.New( + pacer.InvokerOption(pacerInvoker), + pacer.MaxConnectionsOption(GetConfig(ctx).Checkers+GetConfig(ctx).Transfers), + pacer.RetriesOption(GetConfig(ctx).LowLevelRetries), + pacer.CalculatorOption(c), + ), + } + p.SetCalculator(c) + return p +} + +func (d *logCalculator) Calculate(state pacer.State) time.Duration { + oldSleepTime := state.SleepTime + newSleepTime := d.Calculator.Calculate(state) + if state.ConsecutiveRetries > 0 { + if newSleepTime != oldSleepTime { + Debugf("pacer", "Rate limited, increasing sleep to %v", newSleepTime) + } + } else { + if newSleepTime != oldSleepTime { + Debugf("pacer", "Reducing sleep to %v", newSleepTime) + } + } + return newSleepTime +} + +// SetCalculator sets the pacing algorithm. Don't modify the Calculator object +// afterwards, use the ModifyCalculator method when needed. +// +// It will choose the default algorithm if nil is passed in. +func (p *Pacer) SetCalculator(c pacer.Calculator) { + switch c.(type) { + case *logCalculator: + Logf("pacer", "Invalid Calculator in fs.Pacer.SetCalculator") + case nil: + c = &logCalculator{pacer.NewDefault()} + default: + c = &logCalculator{c} + } + + p.Pacer.SetCalculator(c) +} + +// ModifyCalculator calls the given function with the currently configured +// Calculator and the Pacer lock held. +func (p *Pacer) ModifyCalculator(f func(pacer.Calculator)) { + p.ModifyCalculator(func(c pacer.Calculator) { + switch _c := c.(type) { + case *logCalculator: + f(_c.Calculator) + default: + Logf("pacer", "Invalid Calculator in fs.Pacer: %t", c) + f(c) + } + }) +} + +func pacerInvoker(try, retries int, f pacer.Paced) (retry bool, err error) { + retry, err = f() + if retry { + Debugf("pacer", "low level retry %d/%d (error %v)", try, retries, err) + err = fserrors.RetryError(err) + } + return +} diff --git a/vendor/github.com/rclone/rclone/fs/fserrors/enospc_error.go b/vendor/github.com/rclone/rclone/fs/fserrors/enospc_error.go new file mode 100644 index 00000000000..a2cb1d496a6 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/fserrors/enospc_error.go @@ -0,0 +1,23 @@ +// +build !plan9 + +package fserrors + +import ( + "syscall" + + "github.com/rclone/rclone/lib/errors" +) + +// IsErrNoSpace checks a possibly wrapped error to +// see if it contains a ENOSPC error +func IsErrNoSpace(cause error) (isNoSpc bool) { + errors.Walk(cause, func(c error) bool { + if c == syscall.ENOSPC { + isNoSpc = true + return true + } + isNoSpc = false + return false + }) + return +} diff --git a/vendor/github.com/rclone/rclone/fs/fserrors/enospc_error_notsupported.go b/vendor/github.com/rclone/rclone/fs/fserrors/enospc_error_notsupported.go new file mode 100644 index 00000000000..c4cd1e94094 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/fserrors/enospc_error_notsupported.go @@ -0,0 +1,10 @@ +// +build plan9 + +package fserrors + +// IsErrNoSpace() on plan9 returns false because +// plan9 does not support syscall.ENOSPC error. +func IsErrNoSpace(cause error) (isNoSpc bool) { + isNoSpc = false + return +} diff --git a/vendor/github.com/rclone/rclone/fs/fserrors/error.go b/vendor/github.com/rclone/rclone/fs/fserrors/error.go new file mode 100644 index 00000000000..76922e55c0e --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/fserrors/error.go @@ -0,0 +1,448 @@ +// Package fserrors provides errors and error handling +package fserrors + +import ( + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/rclone/rclone/lib/errors" +) + +// Retrier is an optional interface for error as to whether the +// operation should be retried at a high level. +// +// This should be returned from Update or Put methods as required +type Retrier interface { + error + Retry() bool +} + +// retryError is a type of error +type retryError string + +// Error interface +func (r retryError) Error() string { + return string(r) +} + +// Retry interface +func (r retryError) Retry() bool { + return true +} + +// Check interface +var _ Retrier = retryError("") + +// RetryErrorf makes an error which indicates it would like to be retried +func RetryErrorf(format string, a ...interface{}) error { + return retryError(fmt.Sprintf(format, a...)) +} + +// wrappedRetryError is an error wrapped so it will satisfy the +// Retrier interface and return true +type wrappedRetryError struct { + error +} + +// Retry interface +func (err wrappedRetryError) Retry() bool { + return true +} + +// Check interface +var _ Retrier = wrappedRetryError{error(nil)} + +// RetryError makes an error which indicates it would like to be retried +func RetryError(err error) error { + if err == nil { + err = errors.New("needs retry") + } + return wrappedRetryError{err} +} + +func (err wrappedRetryError) Cause() error { + return err.error +} + +// IsRetryError returns true if err conforms to the Retry interface +// and calling the Retry method returns true. +func IsRetryError(err error) (isRetry bool) { + errors.Walk(err, func(err error) bool { + if r, ok := err.(Retrier); ok { + isRetry = r.Retry() + return true + } + return false + }) + return +} + +// Fataler is an optional interface for error as to whether the +// operation should cause the entire operation to finish immediately. +// +// This should be returned from Update or Put methods as required +type Fataler interface { + error + Fatal() bool +} + +// wrappedFatalError is an error wrapped so it will satisfy the +// Retrier interface and return true +type wrappedFatalError struct { + error +} + +// Fatal interface +func (err wrappedFatalError) Fatal() bool { + return true +} + +// Check interface +var _ Fataler = wrappedFatalError{error(nil)} + +// FatalError makes an error which indicates it is a fatal error and +// the sync should stop. +func FatalError(err error) error { + if err == nil { + err = errors.New("fatal error") + } + return wrappedFatalError{err} +} + +func (err wrappedFatalError) Cause() error { + return err.error +} + +// IsFatalError returns true if err conforms to the Fatal interface +// and calling the Fatal method returns true. +func IsFatalError(err error) (isFatal bool) { + errors.Walk(err, func(err error) bool { + if r, ok := err.(Fataler); ok { + isFatal = r.Fatal() + return true + } + return false + }) + return +} + +// NoRetrier is an optional interface for error as to whether the +// operation should not be retried at a high level. +// +// If only NoRetry errors are returned in a sync then the sync won't +// be retried. +// +// This should be returned from Update or Put methods as required +type NoRetrier interface { + error + NoRetry() bool +} + +// wrappedNoRetryError is an error wrapped so it will satisfy the +// Retrier interface and return true +type wrappedNoRetryError struct { + error +} + +// NoRetry interface +func (err wrappedNoRetryError) NoRetry() bool { + return true +} + +// Check interface +var _ NoRetrier = wrappedNoRetryError{error(nil)} + +// NoRetryError makes an error which indicates the sync shouldn't be +// retried. +func NoRetryError(err error) error { + return wrappedNoRetryError{err} +} + +func (err wrappedNoRetryError) Cause() error { + return err.error +} + +// IsNoRetryError returns true if err conforms to the NoRetry +// interface and calling the NoRetry method returns true. +func IsNoRetryError(err error) (isNoRetry bool) { + errors.Walk(err, func(err error) bool { + if r, ok := err.(NoRetrier); ok { + isNoRetry = r.NoRetry() + return true + } + return false + }) + return +} + +// NoLowLevelRetrier is an optional interface for error as to whether +// the operation should not be retried at a low level. +// +// NoLowLevelRetry errors won't be retried by low level retry loops. +type NoLowLevelRetrier interface { + error + NoLowLevelRetry() bool +} + +// wrappedNoLowLevelRetryError is an error wrapped so it will satisfy the +// NoLowLevelRetrier interface and return true +type wrappedNoLowLevelRetryError struct { + error +} + +// NoLowLevelRetry interface +func (err wrappedNoLowLevelRetryError) NoLowLevelRetry() bool { + return true +} + +// Check interface +var _ NoLowLevelRetrier = wrappedNoLowLevelRetryError{error(nil)} + +// NoLowLevelRetryError makes an error which indicates the sync +// shouldn't be low level retried. +func NoLowLevelRetryError(err error) error { + return wrappedNoLowLevelRetryError{err} +} + +// Cause returns the underlying error +func (err wrappedNoLowLevelRetryError) Cause() error { + return err.error +} + +// IsNoLowLevelRetryError returns true if err conforms to the NoLowLevelRetry +// interface and calling the NoLowLevelRetry method returns true. +func IsNoLowLevelRetryError(err error) (isNoLowLevelRetry bool) { + errors.Walk(err, func(err error) bool { + if r, ok := err.(NoLowLevelRetrier); ok { + isNoLowLevelRetry = r.NoLowLevelRetry() + return true + } + return false + }) + return +} + +// RetryAfter is an optional interface for error as to whether the +// operation should be retried after a given delay +// +// This should be returned from Update or Put methods as required and +// will cause the entire sync to be retried after a delay. +type RetryAfter interface { + error + RetryAfter() time.Time +} + +// ErrorRetryAfter is an error which expresses a time that should be +// waited for until trying again +type ErrorRetryAfter time.Time + +// NewErrorRetryAfter returns an ErrorRetryAfter with the given +// duration as an endpoint +func NewErrorRetryAfter(d time.Duration) ErrorRetryAfter { + return ErrorRetryAfter(time.Now().Add(d)) +} + +// Error returns the textual version of the error +func (e ErrorRetryAfter) Error() string { + return fmt.Sprintf("try again after %v (%v)", time.Time(e).Format(time.RFC3339Nano), time.Time(e).Sub(time.Now())) +} + +// RetryAfter returns the time the operation should be retried at or +// after +func (e ErrorRetryAfter) RetryAfter() time.Time { + return time.Time(e) +} + +// Check interface +var _ RetryAfter = ErrorRetryAfter{} + +// RetryAfterErrorTime returns the time that the RetryAfter error +// indicates or a Zero time.Time +func RetryAfterErrorTime(err error) (retryAfter time.Time) { + errors.Walk(err, func(err error) bool { + if r, ok := err.(RetryAfter); ok { + retryAfter = r.RetryAfter() + return true + } + return false + }) + return +} + +// IsRetryAfterError returns true if err is an ErrorRetryAfter +func IsRetryAfterError(err error) bool { + return !RetryAfterErrorTime(err).IsZero() +} + +// CountableError is an optional interface for error. It stores a boolean +// which signifies if the error has already been counted or not +type CountableError interface { + error + Count() + IsCounted() bool +} + +// wrappedFatalError is an error wrapped so it will satisfy the +// Retrier interface and return true +type wrappedCountableError struct { + error + isCounted bool +} + +// CountableError interface +func (err *wrappedCountableError) Count() { + err.isCounted = true +} + +// CountableError interface +func (err *wrappedCountableError) IsCounted() bool { + return err.isCounted +} + +func (err *wrappedCountableError) Cause() error { + return err.error +} + +// IsCounted returns true if err conforms to the CountableError interface +// and has already been counted +func IsCounted(err error) bool { + if r, ok := err.(CountableError); ok { + return r.IsCounted() + } + return false +} + +// Count sets the isCounted variable on the error if it conforms to the +// CountableError interface +func Count(err error) { + if r, ok := err.(CountableError); ok { + r.Count() + } +} + +// Check interface +var _ CountableError = &wrappedCountableError{error: error(nil)} + +// FsError makes an error which can keep a record that it is already counted +// or not +func FsError(err error) error { + if err == nil { + err = errors.New("countable error") + } + return &wrappedCountableError{error: err} +} + +// Cause is a souped up errors.Cause which can unwrap some standard +// library errors too. It returns true if any of the intermediate +// errors had a Timeout() or Temporary() method which returned true. +func Cause(cause error) (retriable bool, err error) { + errors.Walk(cause, func(c error) bool { + // Check for net error Timeout() + if x, ok := c.(interface { + Timeout() bool + }); ok && x.Timeout() { + retriable = true + } + + // Check for net error Temporary() + if x, ok := c.(interface { + Temporary() bool + }); ok && x.Temporary() { + retriable = true + } + err = c + return false + }) + return +} + +// retriableErrorStrings is a list of phrases which when we find it +// in an error, we know it is a networking error which should be +// retried. +// +// This is incredibly ugly - if only errors.Cause worked for all +// errors and all errors were exported from the stdlib. +var retriableErrorStrings = []string{ + "use of closed network connection", // internal/poll/fd.go + "unexpected EOF reading trailer", // net/http/transfer.go + "transport connection broken", // net/http/transport.go + "http: ContentLength=", // net/http/transfer.go + "server closed idle connection", // net/http/transport.go + "bad record MAC", // crypto/tls/alert.go + "stream error:", // net/http/h2_bundle.go + "tls: use of closed connection", // crypto/tls/conn.go +} + +// Errors which indicate networking errors which should be retried +// +// These are added to in retriable_errors*.go +var retriableErrors = []error{ + io.EOF, + io.ErrUnexpectedEOF, +} + +// ShouldRetry looks at an error and tries to work out if retrying the +// operation that caused it would be a good idea. It returns true if +// the error implements Timeout() or Temporary() or if the error +// indicates a premature closing of the connection. +func ShouldRetry(err error) bool { + if err == nil { + return false + } + + // If error has been marked to NoLowLevelRetry then don't retry + if IsNoLowLevelRetryError(err) { + return false + } + + // Find root cause if available + retriable, err := Cause(err) + if retriable { + return true + } + + // Check if it is a retriable error + for _, retriableErr := range retriableErrors { + if err == retriableErr { + return true + } + } + + // Check error strings (yuch!) too + errString := err.Error() + for _, phrase := range retriableErrorStrings { + if strings.Contains(errString, phrase) { + return true + } + } + + return false +} + +// ShouldRetryHTTP returns a boolean as to whether this resp deserves. +// It checks to see if the HTTP response code is in the slice +// retryErrorCodes. +func ShouldRetryHTTP(resp *http.Response, retryErrorCodes []int) bool { + if resp == nil { + return false + } + for _, e := range retryErrorCodes { + if resp.StatusCode == e { + return true + } + } + return false +} + +type causer interface { + Cause() error +} + +var ( + _ causer = wrappedRetryError{} + _ causer = wrappedFatalError{} + _ causer = wrappedNoRetryError{} +) diff --git a/vendor/github.com/rclone/rclone/fs/fserrors/retriable_errors.go b/vendor/github.com/rclone/rclone/fs/fserrors/retriable_errors.go new file mode 100644 index 00000000000..9ec0b5b67d9 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/fserrors/retriable_errors.go @@ -0,0 +1,21 @@ +// +build !plan9 + +package fserrors + +import ( + "syscall" +) + +func init() { + retriableErrors = append(retriableErrors, + syscall.EPIPE, + syscall.ETIMEDOUT, + syscall.ECONNREFUSED, + syscall.EHOSTDOWN, + syscall.EHOSTUNREACH, + syscall.ECONNABORTED, + syscall.EAGAIN, + syscall.EWOULDBLOCK, + syscall.ECONNRESET, + ) +} diff --git a/vendor/github.com/rclone/rclone/fs/fserrors/retriable_errors_windows.go b/vendor/github.com/rclone/rclone/fs/fserrors/retriable_errors_windows.go new file mode 100644 index 00000000000..c6bea32ae5d --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/fserrors/retriable_errors_windows.go @@ -0,0 +1,55 @@ +// +build windows + +package fserrors + +import ( + "syscall" +) + +// Windows error code list +// https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2 +const ( + WSAENETDOWN syscall.Errno = 10050 + WSAENETUNREACH syscall.Errno = 10051 + WSAENETRESET syscall.Errno = 10052 + WSAECONNABORTED syscall.Errno = 10053 + WSAECONNRESET syscall.Errno = 10054 + WSAENOBUFS syscall.Errno = 10055 + WSAENOTCONN syscall.Errno = 10057 + WSAESHUTDOWN syscall.Errno = 10058 + WSAETIMEDOUT syscall.Errno = 10060 + WSAECONNREFUSED syscall.Errno = 10061 + WSAEHOSTDOWN syscall.Errno = 10064 + WSAEHOSTUNREACH syscall.Errno = 10065 + WSAEDISCON syscall.Errno = 10101 + WSAEREFUSED syscall.Errno = 10112 + WSAHOST_NOT_FOUND syscall.Errno = 11001 + WSATRY_AGAIN syscall.Errno = 11002 +) + +func init() { + // append some lower level errors since the standardized ones + // don't seem to happen + retriableErrors = append(retriableErrors, + syscall.WSAECONNRESET, + WSAENETDOWN, + WSAENETUNREACH, + WSAENETRESET, + WSAECONNABORTED, + WSAECONNRESET, + WSAENOBUFS, + WSAENOTCONN, + WSAESHUTDOWN, + WSAETIMEDOUT, + WSAECONNREFUSED, + WSAEHOSTDOWN, + WSAEHOSTUNREACH, + WSAEDISCON, + WSAEREFUSED, + WSAHOST_NOT_FOUND, + WSATRY_AGAIN, + syscall.ERROR_HANDLE_EOF, + syscall.ERROR_NETNAME_DELETED, + syscall.ERROR_BROKEN_PIPE, + ) +} diff --git a/vendor/github.com/rclone/rclone/fs/fshttp/http.go b/vendor/github.com/rclone/rclone/fs/fshttp/http.go new file mode 100644 index 00000000000..d8478536979 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/fshttp/http.go @@ -0,0 +1,368 @@ +// Package fshttp contains the common http parts of the config, Transport and Client +package fshttp + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/cookiejar" + "net/http/httputil" + "sync" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/lib/structs" + "golang.org/x/net/publicsuffix" +) + +const ( + separatorReq = ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>" + separatorResp = "<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<" +) + +var ( + transport http.RoundTripper + noTransport = new(sync.Once) + cookieJar, _ = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + logMutex sync.Mutex +) + +// A net.Conn that sets a deadline for every Read or Write operation +type timeoutConn struct { + net.Conn + timeout time.Duration +} + +// create a timeoutConn using the timeout +func newTimeoutConn(conn net.Conn, timeout time.Duration) (c *timeoutConn, err error) { + c = &timeoutConn{ + Conn: conn, + timeout: timeout, + } + err = c.nudgeDeadline() + return +} + +// Nudge the deadline for an idle timeout on by c.timeout if non-zero +func (c *timeoutConn) nudgeDeadline() (err error) { + if c.timeout == 0 { + return nil + } + when := time.Now().Add(c.timeout) + return c.Conn.SetDeadline(when) +} + +// Read bytes doing idle timeouts +func (c *timeoutConn) Read(b []byte) (n int, err error) { + // Ideally we would LimitBandwidth(len(b)) here and replace tokens we didn't use + n, err = c.Conn.Read(b) + accounting.TokenBucket.LimitBandwidth(accounting.TokenBucketSlotTransportRx, n) + // Don't nudge if no bytes or an error + if n == 0 || err != nil { + return + } + // Nudge the deadline on successful Read or Write + err = c.nudgeDeadline() + return n, err +} + +// Write bytes doing idle timeouts +func (c *timeoutConn) Write(b []byte) (n int, err error) { + accounting.TokenBucket.LimitBandwidth(accounting.TokenBucketSlotTransportTx, len(b)) + n, err = c.Conn.Write(b) + // Don't nudge if no bytes or an error + if n == 0 || err != nil { + return + } + // Nudge the deadline on successful Read or Write + err = c.nudgeDeadline() + return n, err +} + +// dial with context and timeouts +func dialContextTimeout(ctx context.Context, network, address string, ci *fs.ConfigInfo) (net.Conn, error) { + dialer := NewDialer(ctx) + c, err := dialer.DialContext(ctx, network, address) + if err != nil { + return c, err + } + return newTimeoutConn(c, ci.Timeout) +} + +// ResetTransport resets the existing transport, allowing it to take new settings. +// Should only be used for testing. +func ResetTransport() { + noTransport = new(sync.Once) +} + +// NewTransportCustom returns an http.RoundTripper with the correct timeouts. +// The customize function is called if set to give the caller an opportunity to +// customize any defaults in the Transport. +func NewTransportCustom(ctx context.Context, customize func(*http.Transport)) http.RoundTripper { + ci := fs.GetConfig(ctx) + // Start with a sensible set of defaults then override. + // This also means we get new stuff when it gets added to go + t := new(http.Transport) + structs.SetDefaults(t, http.DefaultTransport.(*http.Transport)) + t.Proxy = http.ProxyFromEnvironment + t.MaxIdleConnsPerHost = 2 * (ci.Checkers + ci.Transfers + 1) + t.MaxIdleConns = 2 * t.MaxIdleConnsPerHost + t.TLSHandshakeTimeout = ci.ConnectTimeout + t.ResponseHeaderTimeout = ci.Timeout + + // TLS Config + t.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: ci.InsecureSkipVerify, + } + + // Load client certs + if ci.ClientCert != "" || ci.ClientKey != "" { + if ci.ClientCert == "" || ci.ClientKey == "" { + log.Fatalf("Both --client-cert and --client-key must be set") + } + cert, err := tls.LoadX509KeyPair(ci.ClientCert, ci.ClientKey) + if err != nil { + log.Fatalf("Failed to load --client-cert/--client-key pair: %v", err) + } + t.TLSClientConfig.Certificates = []tls.Certificate{cert} + t.TLSClientConfig.BuildNameToCertificate() + } + + // Load CA cert + if ci.CaCert != "" { + caCert, err := ioutil.ReadFile(ci.CaCert) + if err != nil { + log.Fatalf("Failed to read --ca-cert: %v", err) + } + caCertPool := x509.NewCertPool() + ok := caCertPool.AppendCertsFromPEM(caCert) + if !ok { + log.Fatalf("Failed to add certificates from --ca-cert") + } + t.TLSClientConfig.RootCAs = caCertPool + } + + t.DisableCompression = ci.NoGzip + t.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + return dialContextTimeout(ctx, network, addr, ci) + } + t.IdleConnTimeout = 60 * time.Second + t.ExpectContinueTimeout = ci.ExpectContinueTimeout + + if ci.Dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 { + fs.Debugf(nil, "You have specified to dump information. Please be noted that the "+ + "Accept-Encoding as shown may not be correct in the request and the response may not show "+ + "Content-Encoding if the go standard libraries auto gzip encoding was in effect. In this case"+ + " the body of the request will be gunzipped before showing it.") + } + + // customize the transport if required + if customize != nil { + customize(t) + } + + // Wrap that http.Transport in our own transport + return newTransport(ci, t) +} + +// NewTransport returns an http.RoundTripper with the correct timeouts +func NewTransport(ctx context.Context) http.RoundTripper { + (*noTransport).Do(func() { + transport = NewTransportCustom(ctx, nil) + }) + return transport +} + +// NewClient returns an http.Client with the correct timeouts +func NewClient(ctx context.Context) *http.Client { + ci := fs.GetConfig(ctx) + client := &http.Client{ + Transport: NewTransport(ctx), + } + if ci.Cookie { + client.Jar = cookieJar + } + return client +} + +// Transport is our http Transport which wraps an http.Transport +// * Sets the User Agent +// * Does logging +// * Updates metrics +type Transport struct { + *http.Transport + dump fs.DumpFlags + filterRequest func(req *http.Request) + userAgent string + headers []*fs.HTTPOption + metrics *Metrics +} + +// newTransport wraps the http.Transport passed in and logs all +// roundtrips including the body if logBody is set. +func newTransport(ci *fs.ConfigInfo, transport *http.Transport) *Transport { + return &Transport{ + Transport: transport, + dump: ci.Dump, + userAgent: ci.UserAgent, + headers: ci.Headers, + metrics: DefaultMetrics, + } +} + +// SetRequestFilter sets a filter to be used on each request +func (t *Transport) SetRequestFilter(f func(req *http.Request)) { + t.filterRequest = f +} + +// A mutex to protect this map +var checkedHostMu sync.RWMutex + +// A map of servers we have checked for time +var checkedHost = make(map[string]struct{}, 1) + +// Check the server time is the same as ours, once for each server +func checkServerTime(req *http.Request, resp *http.Response) { + host := req.URL.Host + if req.Host != "" { + host = req.Host + } + checkedHostMu.RLock() + _, ok := checkedHost[host] + checkedHostMu.RUnlock() + if ok { + return + } + dateString := resp.Header.Get("Date") + if dateString == "" { + return + } + date, err := http.ParseTime(dateString) + if err != nil { + fs.Debugf(nil, "Couldn't parse Date: from server %s: %q: %v", host, dateString, err) + return + } + dt := time.Since(date) + const window = 5 * 60 * time.Second + if dt > window || dt < -window { + fs.Logf(nil, "Time may be set wrong - time from %q is %v different from this computer", host, dt) + } + checkedHostMu.Lock() + checkedHost[host] = struct{}{} + checkedHostMu.Unlock() +} + +// cleanAuth gets rid of one authBuf header within the first 4k +func cleanAuth(buf, authBuf []byte) []byte { + // Find how much buffer to check + n := 4096 + if len(buf) < n { + n = len(buf) + } + // See if there is an Authorization: header + i := bytes.Index(buf[:n], authBuf) + if i < 0 { + return buf + } + i += len(authBuf) + // Overwrite the next 4 chars with 'X' + for j := 0; i < len(buf) && j < 4; j++ { + if buf[i] == '\n' { + break + } + buf[i] = 'X' + i++ + } + // Snip out to the next '\n' + j := bytes.IndexByte(buf[i:], '\n') + if j < 0 { + return buf[:i] + } + n = copy(buf[i:], buf[i+j:]) + return buf[:i+n] +} + +var authBufs = [][]byte{ + []byte("Authorization: "), + []byte("X-Auth-Token: "), +} + +// cleanAuths gets rid of all the possible Auth headers +func cleanAuths(buf []byte) []byte { + for _, authBuf := range authBufs { + buf = cleanAuth(buf, authBuf) + } + return buf +} + +// RoundTrip implements the RoundTripper interface. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + // Limit transactions per second if required + accounting.LimitTPS(req.Context()) + // Force user agent + req.Header.Set("User-Agent", t.userAgent) + // Set user defined headers + for _, option := range t.headers { + req.Header.Set(option.Key, option.Value) + } + // Filter the request if required + if t.filterRequest != nil { + t.filterRequest(req) + } + // Logf request + if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 { + buf, _ := httputil.DumpRequestOut(req, t.dump&(fs.DumpBodies|fs.DumpRequests) != 0) + if t.dump&fs.DumpAuth == 0 { + buf = cleanAuths(buf) + } + logMutex.Lock() + fs.Debugf(nil, "%s", separatorReq) + fs.Debugf(nil, "%s (req %p)", "HTTP REQUEST", req) + fs.Debugf(nil, "%s", string(buf)) + fs.Debugf(nil, "%s", separatorReq) + logMutex.Unlock() + } + // Do round trip + resp, err = t.Transport.RoundTrip(req) + // Logf response + if t.dump&(fs.DumpHeaders|fs.DumpBodies|fs.DumpAuth|fs.DumpRequests|fs.DumpResponses) != 0 { + logMutex.Lock() + fs.Debugf(nil, "%s", separatorResp) + fs.Debugf(nil, "%s (req %p)", "HTTP RESPONSE", req) + if err != nil { + fs.Debugf(nil, "Error: %v", err) + } else { + buf, _ := httputil.DumpResponse(resp, t.dump&(fs.DumpBodies|fs.DumpResponses) != 0) + fs.Debugf(nil, "%s", string(buf)) + } + fs.Debugf(nil, "%s", separatorResp) + logMutex.Unlock() + } + // Update metrics + t.metrics.onResponse(req, resp) + + if err == nil { + checkServerTime(req, resp) + } + return resp, err +} + +// NewDialer creates a net.Dialer structure with Timeout, Keepalive +// and LocalAddr set from rclone flags. +func NewDialer(ctx context.Context) *net.Dialer { + ci := fs.GetConfig(ctx) + dialer := &net.Dialer{ + Timeout: ci.ConnectTimeout, + KeepAlive: 30 * time.Second, + } + if ci.BindAddr != nil { + dialer.LocalAddr = &net.TCPAddr{IP: ci.BindAddr} + } + return dialer +} diff --git a/vendor/github.com/rclone/rclone/fs/fshttp/prometheus.go b/vendor/github.com/rclone/rclone/fs/fshttp/prometheus.go new file mode 100644 index 00000000000..aa7633d60c6 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/fshttp/prometheus.go @@ -0,0 +1,59 @@ +package fshttp + +import ( + "fmt" + "net/http" + + "github.com/prometheus/client_golang/prometheus" +) + +// Metrics provide Transport HTTP level metrics. +type Metrics struct { + StatusCode *prometheus.CounterVec +} + +// NewMetrics creates a new metrics instance, the instance shall be assigned to +// DefaultMetrics before any processing takes place. +func NewMetrics(namespace string) *Metrics { + return &Metrics{ + StatusCode: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: "http", + Name: "status_code", + }, []string{"host", "method", "code"}), + } +} + +// DefaultMetrics specifies metrics used for new Transports. +var DefaultMetrics = (*Metrics)(nil) + +// Collectors returns all prometheus metrics as collectors for registration. +func (m *Metrics) Collectors() []prometheus.Collector { + if m == nil { + return nil + } + return []prometheus.Collector{ + m.StatusCode, + } +} + +func (m *Metrics) onResponse(req *http.Request, resp *http.Response) { + if m == nil { + return + } + + var statusCode = 0 + if resp != nil { + statusCode = resp.StatusCode + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + if host == "" { + host = req.Header.Get("Host") + } + + m.StatusCode.WithLabelValues(host, req.Method, fmt.Sprint(statusCode)).Inc() +} diff --git a/vendor/github.com/rclone/rclone/fs/fspath/path.go b/vendor/github.com/rclone/rclone/fs/fspath/path.go new file mode 100644 index 00000000000..a6a4843a484 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/fspath/path.go @@ -0,0 +1,148 @@ +// Package fspath contains routines for fspath manipulation +package fspath + +import ( + "errors" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/rclone/rclone/fs/driveletter" +) + +const ( + configNameRe = `[\w_ -]+` + remoteNameRe = `^(:?` + configNameRe + `):` +) + +var ( + errInvalidCharacters = errors.New("config name contains invalid characters - may only contain 0-9, A-Z ,a-z ,_ , - and space") + errCantBeEmpty = errors.New("can't use empty string as a path") + errCantStartWithDash = errors.New("config name starts with -") + + // urlMatcher is a pattern to match an rclone URL + // note that this matches invalid remoteNames + urlMatcher = regexp.MustCompile(`^(:?[^\\/:]*):(.*)$`) + + // configNameMatcher is a pattern to match an rclone config name + configNameMatcher = regexp.MustCompile(`^` + configNameRe + `$`) + + // remoteNameMatcher is a pattern to match an rclone remote name + remoteNameMatcher = regexp.MustCompile(remoteNameRe + `$`) +) + +// CheckConfigName returns an error if configName is invalid +func CheckConfigName(configName string) error { + if !configNameMatcher.MatchString(configName) { + return errInvalidCharacters + } + // Reject configName, if it starts with -, complicates usage. (#4261) + if strings.HasPrefix(configName, "-") { + return errCantStartWithDash + } + return nil +} + +// CheckRemoteName returns an error if remoteName is invalid +func CheckRemoteName(remoteName string) error { + if !remoteNameMatcher.MatchString(remoteName) { + return errInvalidCharacters + } + return nil +} + +// Parse deconstructs a remote path into configName and fsPath +// +// If the path is a local path then configName will be returned as "". +// +// So "remote:path/to/dir" will return "remote", "path/to/dir" +// and "/path/to/local" will return ("", "/path/to/local") +// +// Note that this will turn \ into / in the fsPath on Windows +// +// An error may be returned if the remote name has invalid characters +// in it or if the path is empty. +func Parse(path string) (configName, fsPath string, err error) { + if path == "" { + return "", "", errCantBeEmpty + } + parts := urlMatcher.FindStringSubmatch(path) + configName, fsPath = "", path + if parts != nil && !driveletter.IsDriveLetter(parts[1]) { + configName, fsPath = parts[1], parts[2] + err = CheckRemoteName(configName + ":") + if err != nil { + return configName, fsPath, errInvalidCharacters + } + } + // change native directory separators to / if there are any + fsPath = filepath.ToSlash(fsPath) + return configName, fsPath, nil +} + +// Split splits a remote into a parent and a leaf +// +// if it returns leaf as an empty string then remote is a directory +// +// if it returns parent as an empty string then that means the current directory +// +// The returned values have the property that parent + leaf == remote +// (except under Windows where \ will be translated into /) +func Split(remote string) (parent string, leaf string, err error) { + remoteName, remotePath, err := Parse(remote) + if err != nil { + return "", "", err + } + if remoteName != "" { + remoteName += ":" + } + // Construct new remote name without last segment + parent, leaf = path.Split(remotePath) + return remoteName + parent, leaf, nil +} + +// Make filePath absolute so it can't read above the root +func makeAbsolute(filePath string) string { + leadingSlash := strings.HasPrefix(filePath, "/") + filePath = path.Join("/", filePath) + if !leadingSlash && strings.HasPrefix(filePath, "/") { + filePath = filePath[1:] + } + return filePath +} + +// JoinRootPath joins filePath onto remote +// +// If the remote has a leading "//" this is preserved to allow Windows +// network paths to be used as remotes. +// +// If filePath is empty then remote will be returned. +// +// If the path contains \ these will be converted to / on Windows. +func JoinRootPath(remote, filePath string) string { + remote = filepath.ToSlash(remote) + if filePath == "" { + return remote + } + filePath = filepath.ToSlash(filePath) + filePath = makeAbsolute(filePath) + if strings.HasPrefix(remote, "//") { + return "/" + path.Join(remote, filePath) + } + remoteName, remotePath, err := Parse(remote) + if err != nil { + // Couldn't parse so assume it is a path + remoteName = "" + remotePath = remote + } + remotePath = path.Join(remotePath, filePath) + if remoteName != "" { + remoteName += ":" + // if have remote: then normalise the remotePath + if remotePath == "." { + remotePath = "" + } + } + return remoteName + remotePath +} diff --git a/vendor/github.com/rclone/rclone/fs/hash/hash.go b/vendor/github.com/rclone/rclone/fs/hash/hash.go new file mode 100644 index 00000000000..8b49c2476da --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/hash/hash.go @@ -0,0 +1,352 @@ +package hash + +import ( + "crypto/md5" + "crypto/sha1" + "encoding/hex" + "fmt" + "hash" + "hash/crc32" + "io" + "strings" + + "github.com/jzelinskie/whirlpool" + "github.com/pkg/errors" +) + +// Type indicates a standard hashing algorithm +type Type int + +type hashDefinition struct { + width int + name string + newFunc func() hash.Hash + hashType Type +} + +var hashes []*hashDefinition +var highestType Type = 1 + +// RegisterHash adds a new Hash to the list and returns it Type +func RegisterHash(name string, width int, newFunc func() hash.Hash) Type { + definition := &hashDefinition{ + name: name, + width: width, + newFunc: newFunc, + hashType: highestType, + } + hashes = append(hashes, definition) + highestType = highestType << 1 + + return definition.hashType +} + +// ErrUnsupported should be returned by filesystem, +// if it is requested to deliver an unsupported hash type. +var ErrUnsupported = errors.New("hash type not supported") + +var ( + // None indicates no hashes are supported + None Type + + // MD5 indicates MD5 support + MD5 Type + + // SHA1 indicates SHA-1 support + SHA1 Type + + // Whirlpool indicates Whirlpool support + Whirlpool Type + + // CRC32 indicates CRC-32 support + CRC32 Type +) + +func init() { + MD5 = RegisterHash("MD5", 32, md5.New) + SHA1 = RegisterHash("SHA-1", 40, sha1.New) + Whirlpool = RegisterHash("Whirlpool", 128, whirlpool.New) + CRC32 = RegisterHash("CRC-32", 8, func() hash.Hash { return crc32.NewIEEE() }) +} + +// Supported returns a set of all the supported hashes by +// HashStream and MultiHasher. +func Supported() Set { + var types []Type + for _, v := range hashes { + types = append(types, v.hashType) + } + + return NewHashSet(types...) +} + +// Width returns the width in characters for any HashType +func Width(hashType Type) int { + for _, v := range hashes { + if v.hashType == hashType { + return v.width + } + } + + return 0 +} + +// Stream will calculate hashes of all supported hash types. +func Stream(r io.Reader) (map[Type]string, error) { + return StreamTypes(r, Supported()) +} + +// StreamTypes will calculate hashes of the requested hash types. +func StreamTypes(r io.Reader, set Set) (map[Type]string, error) { + hashers, err := fromTypes(set) + if err != nil { + return nil, err + } + + _, err = io.Copy(toMultiWriter(hashers), r) + if err != nil { + return nil, err + } + var ret = make(map[Type]string) + for k, v := range hashers { + ret[k] = hex.EncodeToString(v.Sum(nil)) + } + return ret, nil +} + +// String returns a string representation of the hash type. +// The function will panic if the hash type is unknown. +func (h Type) String() string { + if h == None { + return "None" + } + + for _, v := range hashes { + if v.hashType == h { + return v.name + } + } + + err := fmt.Sprintf("internal error: unknown hash type: 0x%x", int(h)) + panic(err) +} + +// Set a Type from a flag +func (h *Type) Set(s string) error { + if s == "None" { + *h = None + } + + for _, v := range hashes { + if v.name == s { + *h = v.hashType + return nil + } + } + + return errors.Errorf("Unknown hash type %q", s) +} + +// Type of the value +func (h Type) Type() string { + return "string" +} + +// fromTypes will return hashers for all the requested types. +// The types must be a subset of SupportedHashes, +// and this function must support all types. +func fromTypes(set Set) (map[Type]hash.Hash, error) { + if !set.SubsetOf(Supported()) { + return nil, errors.Errorf("requested set %08x contains unknown hash types", int(set)) + } + var hashers = make(map[Type]hash.Hash) + + types := set.Array() + for _, t := range types { + for _, v := range hashes { + if t != v.hashType { + continue + } + + hashers[t] = v.newFunc() + break + } + + if hashers[t] == nil { + err := fmt.Sprintf("internal error: Unsupported hash type %v", t) + panic(err) + } + } + + return hashers, nil +} + +// toMultiWriter will return a set of hashers into a +// single multiwriter, where one write will update all +// the hashers. +func toMultiWriter(h map[Type]hash.Hash) io.Writer { + // Convert to to slice + var w = make([]io.Writer, 0, len(h)) + for _, v := range h { + w = append(w, v) + } + return io.MultiWriter(w...) +} + +// A MultiHasher will construct various hashes on +// all incoming writes. +type MultiHasher struct { + w io.Writer + size int64 + h map[Type]hash.Hash // Hashes +} + +// NewMultiHasher will return a hash writer that will write all +// supported hash types. +func NewMultiHasher() *MultiHasher { + h, err := NewMultiHasherTypes(Supported()) + if err != nil { + panic("internal error: could not create multihasher") + } + return h +} + +// NewMultiHasherTypes will return a hash writer that will write +// the requested hash types. +func NewMultiHasherTypes(set Set) (*MultiHasher, error) { + hashers, err := fromTypes(set) + if err != nil { + return nil, err + } + m := MultiHasher{h: hashers, w: toMultiWriter(hashers)} + return &m, nil +} + +func (m *MultiHasher) Write(p []byte) (n int, err error) { + n, err = m.w.Write(p) + m.size += int64(n) + return n, err +} + +// Sums returns the sums of all accumulated hashes as hex encoded +// strings. +func (m *MultiHasher) Sums() map[Type]string { + dst := make(map[Type]string) + for k, v := range m.h { + dst[k] = hex.EncodeToString(v.Sum(nil)) + } + return dst +} + +// Sum returns the specified hash from the multihasher +func (m *MultiHasher) Sum(hashType Type) ([]byte, error) { + h, ok := m.h[hashType] + if !ok { + return nil, ErrUnsupported + } + return h.Sum(nil), nil +} + +// Size returns the number of bytes written +func (m *MultiHasher) Size() int64 { + return m.size +} + +// A Set Indicates one or more hash types. +type Set int + +// NewHashSet will create a new hash set with the hash types supplied +func NewHashSet(t ...Type) Set { + h := Set(None) + return h.Add(t...) +} + +// Add one or more hash types to the set. +// Returns the modified hash set. +func (h *Set) Add(t ...Type) Set { + for _, v := range t { + *h |= Set(v) + } + return *h +} + +// Contains returns true if the +func (h Set) Contains(t Type) bool { + return int(h)&int(t) != 0 +} + +// Overlap returns the overlapping hash types +func (h Set) Overlap(t Set) Set { + return Set(int(h) & int(t)) +} + +// SubsetOf will return true if all types of h +// is present in the set c +func (h Set) SubsetOf(c Set) bool { + return int(h)|int(c) == int(c) +} + +// GetOne will return a hash type. +// Currently the first is returned, but it could be +// improved to return the strongest. +func (h Set) GetOne() Type { + v := int(h) + i := uint(0) + for v != 0 { + if v&1 != 0 { + return Type(1 << i) + } + i++ + v >>= 1 + } + return None +} + +// Array returns an array of all hash types in the set +func (h Set) Array() (ht []Type) { + v := int(h) + i := uint(0) + for v != 0 { + if v&1 != 0 { + ht = append(ht, Type(1<>= 1 + } + return ht +} + +// Count returns the number of hash types in the set +func (h Set) Count() int { + if int(h) == 0 { + return 0 + } + // credit: https://code.google.com/u/arnehormann/ + x := uint64(h) + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return int(x >> 56) +} + +// String returns a string representation of the hash set. +// The function will panic if it contains an unknown type. +func (h Set) String() string { + a := h.Array() + var r []string + for _, v := range a { + r = append(r, v.String()) + } + return "[" + strings.Join(r, ", ") + "]" +} + +// Equals checks to see if src == dst, but ignores empty strings +// and returns true if either is empty. +func Equals(src, dst string) bool { + if src == "" || dst == "" { + return true + } + return src == dst +} diff --git a/vendor/github.com/rclone/rclone/fs/list/list.go b/vendor/github.com/rclone/rclone/fs/list/list.go new file mode 100644 index 00000000000..dfa8b688d47 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/list/list.go @@ -0,0 +1,104 @@ +// Package list contains list functions +package list + +import ( + "context" + "sort" + "strings" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/filter" +) + +// DirSorted reads Object and *Dir into entries for the given Fs. +// +// dir is the start directory, "" for root +// +// If includeAll is specified all files will be added, otherwise only +// files and directories passing the filter will be added. +// +// Files will be returned in sorted order +func DirSorted(ctx context.Context, f fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) { + // Get unfiltered entries from the fs + entries, err = f.List(ctx, dir) + if err != nil { + return nil, err + } + // This should happen only if exclude files lives in the + // starting directory, otherwise ListDirSorted should not be + // called. + fi := filter.GetConfig(ctx) + if !includeAll && fi.ListContainsExcludeFile(entries) { + fs.Debugf(dir, "Excluded") + return nil, nil + } + return filterAndSortDir(ctx, entries, includeAll, dir, fi.IncludeObject, fi.IncludeDirectory(ctx, f)) +} + +// filter (if required) and check the entries, then sort them +func filterAndSortDir(ctx context.Context, entries fs.DirEntries, includeAll bool, dir string, + IncludeObject func(ctx context.Context, o fs.Object) bool, + IncludeDirectory func(remote string) (bool, error)) (newEntries fs.DirEntries, err error) { + newEntries = entries[:0] // in place filter + prefix := "" + if dir != "" { + prefix = dir + "/" + } + for _, entry := range entries { + ok := true + // check includes and types + switch x := entry.(type) { + case fs.Object: + // Make sure we don't delete excluded files if not required + if !includeAll && !IncludeObject(ctx, x) { + ok = false + fs.Debugf(x, "Excluded") + } + case fs.Directory: + if !includeAll { + include, err := IncludeDirectory(x.Remote()) + if err != nil { + return nil, err + } + if !include { + ok = false + fs.Debugf(x, "Excluded") + } + } + default: + return nil, errors.Errorf("unknown object type %T", entry) + } + // check remote name belongs in this directory + remote := entry.Remote() + switch { + case !ok: + // ignore + case !strings.HasPrefix(remote, prefix): + ok = false + fs.Errorf(entry, "Entry doesn't belong in directory %q (too short) - ignoring", dir) + case remote == prefix: + ok = false + fs.Errorf(entry, "Entry doesn't belong in directory %q (same as directory) - ignoring", dir) + case strings.ContainsRune(remote[len(prefix):], '/'): + ok = false + fs.Errorf(entry, "Entry doesn't belong in directory %q (contains subdir) - ignoring", dir) + default: + // ok + } + if ok { + newEntries = append(newEntries, entry) + } + } + entries = newEntries + + // Sort the directory entries by Remote + // + // We use a stable sort here just in case there are + // duplicates. Assuming the remote delivers the entries in a + // consistent order, this will give the best user experience + // in syncing as it will use the first entry for the sync + // comparison. + sort.Stable(entries) + return entries, nil +} diff --git a/vendor/github.com/rclone/rclone/fs/log.go b/vendor/github.com/rclone/rclone/fs/log.go new file mode 100644 index 00000000000..1ded32694eb --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/log.go @@ -0,0 +1,206 @@ +package fs + +import ( + "context" + "fmt" + "log" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// LogLevel describes rclone's logs. These are a subset of the syslog log levels. +type LogLevel byte + +// Log levels. These are the syslog levels of which we only use a +// subset. +// +// LOG_EMERG system is unusable +// LOG_ALERT action must be taken immediately +// LOG_CRIT critical conditions +// LOG_ERR error conditions +// LOG_WARNING warning conditions +// LOG_NOTICE normal, but significant, condition +// LOG_INFO informational message +// LOG_DEBUG debug-level message +const ( + LogLevelEmergency LogLevel = iota + LogLevelAlert + LogLevelCritical + LogLevelError // Error - can't be suppressed + LogLevelWarning + LogLevelNotice // Normal logging, -q suppresses + LogLevelInfo // Transfers, needs -v + LogLevelDebug // Debug level, needs -vv +) + +var logLevelToString = []string{ + LogLevelEmergency: "EMERGENCY", + LogLevelAlert: "ALERT", + LogLevelCritical: "CRITICAL", + LogLevelError: "ERROR", + LogLevelWarning: "WARNING", + LogLevelNotice: "NOTICE", + LogLevelInfo: "INFO", + LogLevelDebug: "DEBUG", +} + +// String turns a LogLevel into a string +func (l LogLevel) String() string { + if l >= LogLevel(len(logLevelToString)) { + return fmt.Sprintf("LogLevel(%d)", l) + } + return logLevelToString[l] +} + +// Set a LogLevel +func (l *LogLevel) Set(s string) error { + for n, name := range logLevelToString { + if s != "" && name == s { + *l = LogLevel(n) + return nil + } + } + return errors.Errorf("Unknown log level %q", s) +} + +// Type of the value +func (l *LogLevel) Type() string { + return "string" +} + +// LogPrint sends the text to the logger of level +var LogPrint = func(level LogLevel, text string) { + text = fmt.Sprintf("%-6s: %s", level, text) + _ = log.Output(4, text) +} + +// LogValueItem describes keyed item for a JSON log entry +type LogValueItem struct { + key string + value interface{} + render bool +} + +// LogValue should be used as an argument to any logging calls to +// augment the JSON output with more structured information. +// +// key is the dictionary parameter used to store value. +func LogValue(key string, value interface{}) LogValueItem { + return LogValueItem{key: key, value: value, render: true} +} + +// LogValueHide should be used as an argument to any logging calls to +// augment the JSON output with more structured information. +// +// key is the dictionary parameter used to store value. +// +// String() will return a blank string - this is useful to put items +// in which don't print into the log. +func LogValueHide(key string, value interface{}) LogValueItem { + return LogValueItem{key: key, value: value, render: false} +} + +// String returns the representation of value. If render is fals this +// is an empty string so LogValueItem entries won't show in the +// textual representation of logs. +func (j LogValueItem) String() string { + if !j.render { + return "" + } + if do, ok := j.value.(fmt.Stringer); ok { + return do.String() + } + return fmt.Sprint(j.value) +} + +// LogPrintf produces a log string from the arguments passed in +func LogPrintf(level LogLevel, o interface{}, text string, args ...interface{}) { + out := fmt.Sprintf(text, args...) + + if GetConfig(context.TODO()).UseJSONLog { + fields := logrus.Fields{} + if o != nil { + fields = logrus.Fields{ + "object": fmt.Sprintf("%+v", o), + "objectType": fmt.Sprintf("%T", o), + } + } + for _, arg := range args { + if item, ok := arg.(LogValueItem); ok { + fields[item.key] = item.value + } + } + switch level { + case LogLevelDebug: + logrus.WithFields(fields).Debug(out) + case LogLevelInfo: + logrus.WithFields(fields).Info(out) + case LogLevelNotice, LogLevelWarning: + logrus.WithFields(fields).Warn(out) + case LogLevelError: + logrus.WithFields(fields).Error(out) + case LogLevelCritical: + logrus.WithFields(fields).Fatal(out) + case LogLevelEmergency, LogLevelAlert: + logrus.WithFields(fields).Panic(out) + } + } else { + if o != nil { + out = fmt.Sprintf("%v: %s", o, out) + } + LogPrint(level, out) + } +} + +// LogLevelPrintf writes logs at the given level +func LogLevelPrintf(level LogLevel, o interface{}, text string, args ...interface{}) { + if GetConfig(context.TODO()).LogLevel >= level { + LogPrintf(level, o, text, args...) + } +} + +// Errorf writes error log output for this Object or Fs. It +// should always be seen by the user. +func Errorf(o interface{}, text string, args ...interface{}) { + if GetConfig(context.TODO()).LogLevel >= LogLevelError { + LogPrintf(LogLevelError, o, text, args...) + } +} + +// Logf writes log output for this Object or Fs. This should be +// considered to be Info level logging. It is the default level. By +// default rclone should not log very much so only use this for +// important things the user should see. The user can filter these +// out with the -q flag. +func Logf(o interface{}, text string, args ...interface{}) { + if GetConfig(context.TODO()).LogLevel >= LogLevelNotice { + LogPrintf(LogLevelNotice, o, text, args...) + } +} + +// Infof writes info on transfers for this Object or Fs. Use this +// level for logging transfers, deletions and things which should +// appear with the -v flag. +func Infof(o interface{}, text string, args ...interface{}) { + if GetConfig(context.TODO()).LogLevel >= LogLevelInfo { + LogPrintf(LogLevelInfo, o, text, args...) + } +} + +// Debugf writes debugging output for this Object or Fs. Use this for +// debug only. The user must have to specify -vv to see this. +func Debugf(o interface{}, text string, args ...interface{}) { + if GetConfig(context.TODO()).LogLevel >= LogLevelDebug { + LogPrintf(LogLevelDebug, o, text, args...) + } +} + +// LogDirName returns an object for the logger, logging a root +// directory which would normally be "" as the Fs +func LogDirName(f Fs, dir string) interface{} { + if dir != "" { + return dir + } + return f +} diff --git a/vendor/github.com/rclone/rclone/fs/march/march.go b/vendor/github.com/rclone/rclone/fs/march/march.go new file mode 100644 index 00000000000..d4e65ce93e2 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/march/march.go @@ -0,0 +1,508 @@ +// Package march traverses two directories in lock step +package march + +import ( + "context" + "path" + "sort" + "strings" + "sync" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs/object" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/dirtree" + "github.com/rclone/rclone/fs/filter" + "github.com/rclone/rclone/fs/list" + "github.com/rclone/rclone/fs/walk" + "golang.org/x/text/unicode/norm" +) + +// March holds the data used to traverse two Fs simultaneously, +// calling Callback for each match +type March struct { + // parameters + Ctx context.Context // context for background goroutines + Fdst fs.Fs // source Fs + DstDir string // destination directory + Fsrc fs.Fs // dest Fs + SrcDir string // source directory + Paths []string // List of specific paths in source to traverse + NoTraverse bool // don't traverse the destination + SrcIncludeAll bool // don't include all files in the src + DstIncludeAll bool // don't include all files in the destination + Callback Marcher // object to call with results + NoCheckDest bool // transfer all objects regardless without checking dst + NoUnicodeNormalization bool // don't normalize unicode characters in filenames + // internal state + srcListDir listDirFn // function to call to list a directory in the src + dstListDir listDirFn // function to call to list a directory in the dst + transforms []matchTransformFn +} + +// Marcher is called on each match +type Marcher interface { + // SrcOnly is called for a DirEntry found only in the source + SrcOnly(src fs.DirEntry) (recurse bool) + // DstOnly is called for a DirEntry found only in the destination + DstOnly(dst fs.DirEntry) (recurse bool) + // Match is called for a DirEntry found both in the source and destination + Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) +} + +// init sets up a march over opt.Fsrc, and opt.Fdst calling back callback for each match +func (m *March) init(ctx context.Context) { + ci := fs.GetConfig(ctx) + if m.Paths != nil { + m.srcListDir = m.listPaths(ctx, m.Fsrc, m.SrcDir) + } else { + m.srcListDir = m.makeListDir(ctx, m.Fsrc, m.SrcDir, m.SrcIncludeAll) + } + if !m.NoTraverse { + m.dstListDir = m.makeListDir(ctx, m.Fdst, m.DstDir, m.DstIncludeAll) + } + // Now create the matching transform + // ..normalise the UTF8 first + if !m.NoUnicodeNormalization { + m.transforms = append(m.transforms, norm.NFC.String) + } + // ..if destination is caseInsensitive then make it lower case + // case Insensitive | src | dst | lower case compare | + // | No | No | No | + // | Yes | No | No | + // | No | Yes | Yes | + // | Yes | Yes | Yes | + if m.Fdst.Features().CaseInsensitive || ci.IgnoreCaseSync { + m.transforms = append(m.transforms, strings.ToLower) + } +} + +// list a directory into entries, err +type listDirFn func(dir string) (entries fs.DirEntries, err error) + +// makeListDir makes constructs a listing function for the given fs +// and includeAll flags for marching through the file system. +func (m *March) makeListDir(ctx context.Context, f fs.Fs, remote string, includeAll bool) listDirFn { + ci := fs.GetConfig(ctx) + fi := filter.GetConfig(ctx) + + if !(ci.UseListR && f.Features().ListR != nil) && // !--fast-list active and + !(ci.NoTraverse && fi.HaveFilesFrom()) { // !(--files-from and --no-traverse) + return func(dir string) (entries fs.DirEntries, err error) { + return list.DirSorted(m.Ctx, f, includeAll, dir) + } + } + + // This returns a closure for use when --fast-list is active or for when + // --files-from and --no-traverse is set + var ( + mu sync.Mutex + started bool + dirs dirtree.DirTree + dirsErr error + ) + return func(dir string) (entries fs.DirEntries, err error) { + mu.Lock() + defer mu.Unlock() + if !started { + dirs, dirsErr = walk.NewDirTree(m.Ctx, f, remote, includeAll, ci.MaxDepth) + started = true + } + if dirsErr != nil { + return nil, dirsErr + } + entries, ok := dirs[dir] + if !ok { + err = fs.ErrorDirNotFound + } else { + delete(dirs, dir) + } + return entries, err + } +} + +func (m *March) listPaths(ctx context.Context, f fs.Fs, remote string) listDirFn { + return func(dir string) (fs.DirEntries, error) { + entries := make(fs.DirEntries, len(m.Paths)) + for i, p := range m.Paths { + entries[i] = object.NewLazyObject(ctx, f, path.Join(remote, p)) + } + return entries, nil + } +} + +// listDirJob describe a directory listing that needs to be done +type listDirJob struct { + srcRemote string + dstRemote string + srcDepth int + dstDepth int + noSrc bool + noDst bool +} + +// Run starts the matching process off +func (m *March) Run(ctx context.Context) error { + ci := fs.GetConfig(ctx) + fi := filter.GetConfig(ctx) + m.init(ctx) + + srcDepth := ci.MaxDepth + if srcDepth < 0 { + srcDepth = fs.MaxLevel + } + dstDepth := srcDepth + if fi.Opt.DeleteExcluded { + dstDepth = fs.MaxLevel + } + + var mu sync.Mutex // Protects vars below + var jobError error + var errCount int + + // Start some directory listing go routines + var wg sync.WaitGroup // sync closing of go routines + var traversing sync.WaitGroup // running directory traversals + checkers := ci.Checkers + in := make(chan listDirJob, checkers) + for i := 0; i < checkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-m.Ctx.Done(): + return + case job, ok := <-in: + if !ok { + return + } + jobs, err := m.processJob(job) + if err != nil { + mu.Lock() + // Keep reference only to the first encountered error + if jobError == nil { + jobError = err + } + errCount++ + mu.Unlock() + } + if len(jobs) > 0 { + traversing.Add(len(jobs)) + go func() { + // Now we have traversed this directory, send these + // jobs off for traversal in the background + for _, newJob := range jobs { + select { + case <-m.Ctx.Done(): + // discard job if finishing + traversing.Done() + case in <- newJob: + } + } + }() + } + traversing.Done() + } + } + }() + } + + // Start the process + traversing.Add(1) + in <- listDirJob{ + srcRemote: m.SrcDir, + srcDepth: srcDepth - 1, + dstRemote: m.DstDir, + dstDepth: dstDepth - 1, + noDst: m.NoCheckDest, + } + go func() { + // when the context is cancelled discard the remaining jobs + <-m.Ctx.Done() + for range in { + traversing.Done() + } + }() + traversing.Wait() + close(in) + wg.Wait() + + if errCount > 1 { + return errors.Wrapf(jobError, "march failed with %d error(s): first error", errCount) + } + return jobError +} + +// Check to see if the context has been cancelled +func (m *March) aborting() bool { + select { + case <-m.Ctx.Done(): + return true + default: + } + return false +} + +// matchEntry is an entry plus transformed name +type matchEntry struct { + entry fs.DirEntry + leaf string + name string +} + +// matchEntries contains many matchEntry~s +type matchEntries []matchEntry + +// Len is part of sort.Interface. +func (es matchEntries) Len() int { return len(es) } + +// Swap is part of sort.Interface. +func (es matchEntries) Swap(i, j int) { es[i], es[j] = es[j], es[i] } + +// Less is part of sort.Interface. +// +// Compare in order (name, leaf, remote) +func (es matchEntries) Less(i, j int) bool { + ei, ej := &es[i], &es[j] + if ei.name == ej.name { + if ei.leaf == ej.leaf { + return fs.CompareDirEntries(ei.entry, ej.entry) < 0 + } + return ei.leaf < ej.leaf + } + return ei.name < ej.name +} + +// Sort the directory entries by (name, leaf, remote) +// +// We use a stable sort here just in case there are +// duplicates. Assuming the remote delivers the entries in a +// consistent order, this will give the best user experience +// in syncing as it will use the first entry for the sync +// comparison. +func (es matchEntries) sort() { + sort.Stable(es) +} + +// make a matchEntries from a newMatch entries +func newMatchEntries(entries fs.DirEntries, transforms []matchTransformFn) matchEntries { + es := make(matchEntries, len(entries)) + for i := range es { + es[i].entry = entries[i] + name := path.Base(entries[i].Remote()) + es[i].leaf = name + for _, transform := range transforms { + name = transform(name) + } + es[i].name = name + } + es.sort() + return es +} + +// matchPair is a matched pair of direntries returned by matchListings +type matchPair struct { + src, dst fs.DirEntry +} + +// matchTransformFn converts a name into a form which is used for +// comparison in matchListings. +type matchTransformFn func(name string) string + +// Process the two listings, matching up the items in the two slices +// using the transform function on each name first. +// +// Into srcOnly go Entries which only exist in the srcList +// Into dstOnly go Entries which only exist in the dstList +// Into matches go matchPair's of src and dst which have the same name +// +// This checks for duplicates and checks the list is sorted. +func matchListings(srcListEntries, dstListEntries fs.DirEntries, transforms []matchTransformFn) (srcOnly fs.DirEntries, dstOnly fs.DirEntries, matches []matchPair) { + srcList := newMatchEntries(srcListEntries, transforms) + dstList := newMatchEntries(dstListEntries, transforms) + + for iSrc, iDst := 0, 0; ; iSrc, iDst = iSrc+1, iDst+1 { + var src, dst fs.DirEntry + var srcName, dstName string + if iSrc < len(srcList) { + src = srcList[iSrc].entry + srcName = srcList[iSrc].name + } + if iDst < len(dstList) { + dst = dstList[iDst].entry + dstName = dstList[iDst].name + } + if src == nil && dst == nil { + break + } + if src != nil && iSrc > 0 { + prev := srcList[iSrc-1].entry + prevName := srcList[iSrc-1].name + if srcName == prevName && fs.DirEntryType(prev) == fs.DirEntryType(src) { + fs.Logf(src, "Duplicate %s found in source - ignoring", fs.DirEntryType(src)) + iDst-- // ignore the src and retry the dst + continue + } else if srcName < prevName { + // this should never happen since we sort the listings + panic("Out of order listing in source") + } + } + if dst != nil && iDst > 0 { + prev := dstList[iDst-1].entry + prevName := dstList[iDst-1].name + if dstName == prevName && fs.DirEntryType(dst) == fs.DirEntryType(prev) { + fs.Logf(dst, "Duplicate %s found in destination - ignoring", fs.DirEntryType(dst)) + iSrc-- // ignore the dst and retry the src + continue + } else if dstName < prevName { + // this should never happen since we sort the listings + panic("Out of order listing in destination") + } + } + if src != nil && dst != nil { + // we can't use CompareDirEntries because srcName, dstName could + // be different then src.Remote() or dst.Remote() + srcType := fs.DirEntryType(src) + dstType := fs.DirEntryType(dst) + if srcName > dstName || (srcName == dstName && srcType > dstType) { + src = nil + iSrc-- + } else if srcName < dstName || (srcName == dstName && srcType < dstType) { + dst = nil + iDst-- + } + } + // Debugf(nil, "src = %v, dst = %v", src, dst) + switch { + case src == nil && dst == nil: + // do nothing + case src == nil: + dstOnly = append(dstOnly, dst) + case dst == nil: + srcOnly = append(srcOnly, src) + default: + matches = append(matches, matchPair{src: src, dst: dst}) + } + } + return +} + +// processJob processes a listDirJob listing the source and +// destination directories, comparing them and returning a slice of +// more jobs +// +// returns errors using processError +func (m *March) processJob(job listDirJob) ([]listDirJob, error) { + var ( + jobs []listDirJob + srcList, dstList fs.DirEntries + srcListErr, dstListErr error + wg sync.WaitGroup + mu sync.Mutex + ) + + // List the src and dst directories + if !job.noSrc { + wg.Add(1) + go func() { + defer wg.Done() + srcList, srcListErr = m.srcListDir(job.srcRemote) + }() + } + if !m.NoTraverse && !job.noDst { + wg.Add(1) + go func() { + defer wg.Done() + dstList, dstListErr = m.dstListDir(job.dstRemote) + }() + } + + // Wait for listings to complete and report errors + wg.Wait() + if srcListErr != nil { + fs.Errorf(job.srcRemote, "error reading source directory: %v", srcListErr) + srcListErr = fs.CountError(srcListErr) + return nil, srcListErr + } + if dstListErr == fs.ErrorDirNotFound { + // Copy the stuff anyway + } else if dstListErr != nil { + fs.Errorf(job.dstRemote, "error reading destination directory: %v", dstListErr) + dstListErr = fs.CountError(dstListErr) + return nil, dstListErr + } + + // If NoTraverse is set, then try to find a matching object + // for each item in the srcList to head dst object + ci := fs.GetConfig(m.Ctx) + limiter := make(chan struct{}, ci.Checkers) + if m.NoTraverse && !m.NoCheckDest { + for _, src := range srcList { + wg.Add(1) + limiter <- struct{}{} + go func(limiter chan struct{}, src fs.DirEntry) { + defer wg.Done() + if srcObj, ok := src.(fs.Object); ok { + leaf := path.Base(srcObj.Remote()) + dstObj, err := m.Fdst.NewObject(m.Ctx, path.Join(job.dstRemote, leaf)) + if err == nil { + mu.Lock() + dstList = append(dstList, dstObj) + mu.Unlock() + } + } + <-limiter + }(limiter, src) + } + wg.Wait() + } + + // Work out what to do and do it + srcOnly, dstOnly, matches := matchListings(srcList, dstList, m.transforms) + for _, src := range srcOnly { + if m.aborting() { + return nil, m.Ctx.Err() + } + recurse := m.Callback.SrcOnly(src) + if recurse && job.srcDepth > 0 { + jobs = append(jobs, listDirJob{ + srcRemote: src.Remote(), + dstRemote: src.Remote(), + srcDepth: job.srcDepth - 1, + noDst: true, + }) + } + + } + for _, dst := range dstOnly { + if m.aborting() { + return nil, m.Ctx.Err() + } + recurse := m.Callback.DstOnly(dst) + if recurse && job.dstDepth > 0 { + jobs = append(jobs, listDirJob{ + srcRemote: dst.Remote(), + dstRemote: dst.Remote(), + dstDepth: job.dstDepth - 1, + noSrc: true, + }) + } + } + for _, match := range matches { + if m.aborting() { + return nil, m.Ctx.Err() + } + recurse := m.Callback.Match(m.Ctx, match.dst, match.src) + if recurse && job.srcDepth > 0 && job.dstDepth > 0 { + jobs = append(jobs, listDirJob{ + srcRemote: match.src.Remote(), + dstRemote: match.dst.Remote(), + srcDepth: job.srcDepth - 1, + dstDepth: job.dstDepth - 1, + }) + } + } + return jobs, nil +} diff --git a/vendor/github.com/rclone/rclone/fs/mimetype.go b/vendor/github.com/rclone/rclone/fs/mimetype.go new file mode 100644 index 00000000000..50ea7dc59ab --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/mimetype.go @@ -0,0 +1,45 @@ +package fs + +import ( + "context" + "mime" + "path" + "strings" +) + +// MimeTypeFromName returns a guess at the mime type from the name +func MimeTypeFromName(remote string) (mimeType string) { + mimeType = mime.TypeByExtension(path.Ext(remote)) + if !strings.ContainsRune(mimeType, '/') { + mimeType = "application/octet-stream" + } + return mimeType +} + +// MimeType returns the MimeType from the object, either by calling +// the MimeTyper interface or using MimeTypeFromName +func MimeType(ctx context.Context, o ObjectInfo) (mimeType string) { + // Read the MimeType from the optional interface if available + if do, ok := o.(MimeTyper); ok { + mimeType = do.MimeType(ctx) + // Debugf(o, "Read MimeType as %q", mimeType) + if mimeType != "" { + return mimeType + } + } + return MimeTypeFromName(o.Remote()) +} + +// MimeTypeDirEntry returns the MimeType of a DirEntry +// +// It returns "inode/directory" for directories, or uses +// MimeType(Object) +func MimeTypeDirEntry(ctx context.Context, item DirEntry) string { + switch x := item.(type) { + case Object: + return MimeType(ctx, x) + case Directory: + return "inode/directory" + } + return "" +} diff --git a/vendor/github.com/rclone/rclone/fs/object/lazy.go b/vendor/github.com/rclone/rclone/fs/object/lazy.go new file mode 100644 index 00000000000..15eba487552 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/object/lazy.go @@ -0,0 +1,135 @@ +package object + +import ( + "context" + "io" + "sync" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/hash" +) + +// LazyObject defers underlying object creation. +type LazyObject struct { + ctx context.Context + f fs.Fs + remote string + once sync.Once + + o fs.Object + err error +} + +// NewLazyObject returns an object that defers the real object creation +// until the object is needed. +func NewLazyObject(ctx context.Context, f fs.Fs, remote string) *LazyObject { + return &LazyObject{ctx: ctx, f: f, remote: remote} +} + +var _ fs.Object = (*LazyObject)(nil) + +func (l *LazyObject) init() { + l.once.Do(func() { + fs.Debugf(l.f, "lazy init %s", l.remote) + l.o, l.err = l.f.NewObject(l.ctx, l.remote) + if l.err != nil { + fs.Errorf(l.f, "lazy init %s failed: %s", l.remote, l.err) + } + }) +} + +// String returns a description of the Object +func (l *LazyObject) String() string { + return l.remote +} + +// Remote returns the remote path +func (l *LazyObject) Remote() string { + return l.remote +} + +// ModTime returns the modification date of the file +func (l *LazyObject) ModTime(ctx context.Context) time.Time { + l.init() + + if l.err != nil { + return time.Time{} + } + return l.o.ModTime(ctx) +} + +// Size returns the size of the file +func (l *LazyObject) Size() int64 { + l.init() + + if l.err != nil { + return 0 + } + return l.o.Size() +} + +// Fs returns read only access to the Fs that this object is part of +func (l *LazyObject) Fs() fs.Info { + return l.f +} + +// Hash returns the requested hash of the contents +func (l *LazyObject) Hash(ctx context.Context, ty hash.Type) (string, error) { + l.init() + + if l.err != nil { + return "", l.err + } + return l.o.Hash(ctx, ty) +} + +// Storable says whether this object can be stored +func (l *LazyObject) Storable() bool { + l.init() + + if l.err != nil { + return false + } + return l.o.Storable() +} + +// SetModTime sets the metadata on the object to set the modification date +func (l *LazyObject) SetModTime(ctx context.Context, t time.Time) error { + l.init() + + if l.err != nil { + return l.err + } + return l.o.SetModTime(ctx, t) +} + +// Open opens the file for read. Call Close() on the returned io.ReadCloser +func (l *LazyObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { + l.init() + + if l.err != nil { + return nil, l.err + } + return l.o.Open(ctx, options...) +} + +// Update in to the object with the modTime given of the given size +func (l *LazyObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error { + l.init() + + if l.err != nil { + return l.err + } + return l.o.Update(ctx, in, src, options...) +} + +// Remove this object +func (l *LazyObject) Remove(ctx context.Context) error { + l.init() + + if l.err != nil { + return l.err + } + return l.o.Remove(ctx) +} diff --git a/vendor/github.com/rclone/rclone/fs/object/object.go b/vendor/github.com/rclone/rclone/fs/object/object.go new file mode 100644 index 00000000000..c88b9da0219 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/object/object.go @@ -0,0 +1,240 @@ +// Package object defines some useful Objects +package object + +import ( + "bytes" + "context" + "errors" + "io" + "io/ioutil" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/hash" +) + +// NewStaticObjectInfo returns a static ObjectInfo +// If hashes is nil and fs is not nil, the hash map will be replaced with +// empty hashes of the types supported by the fs. +func NewStaticObjectInfo(remote string, modTime time.Time, size int64, storable bool, hashes map[hash.Type]string, fs fs.Info) fs.ObjectInfo { + info := &staticObjectInfo{ + remote: remote, + modTime: modTime, + size: size, + storable: storable, + hashes: hashes, + fs: fs, + } + if fs != nil && hashes == nil { + set := fs.Hashes().Array() + info.hashes = make(map[hash.Type]string) + for _, ht := range set { + info.hashes[ht] = "" + } + } + return info +} + +type staticObjectInfo struct { + remote string + modTime time.Time + size int64 + storable bool + hashes map[hash.Type]string + fs fs.Info +} + +func (i *staticObjectInfo) Fs() fs.Info { return i.fs } +func (i *staticObjectInfo) Remote() string { return i.remote } +func (i *staticObjectInfo) String() string { return i.remote } +func (i *staticObjectInfo) ModTime(ctx context.Context) time.Time { return i.modTime } +func (i *staticObjectInfo) Size() int64 { return i.size } +func (i *staticObjectInfo) Storable() bool { return i.storable } +func (i *staticObjectInfo) Hash(ctx context.Context, h hash.Type) (string, error) { + if len(i.hashes) == 0 { + return "", hash.ErrUnsupported + } + if hash, ok := i.hashes[h]; ok { + return hash, nil + } + return "", hash.ErrUnsupported +} + +// MemoryFs is an in memory Fs, it only supports FsInfo and Put +var MemoryFs memoryFs + +// memoryFs is an in memory fs +type memoryFs struct{} + +// Name of the remote (as passed into NewFs) +func (memoryFs) Name() string { return "memory" } + +// Root of the remote (as passed into NewFs) +func (memoryFs) Root() string { return "" } + +// String returns a description of the FS +func (memoryFs) String() string { return "memory" } + +// Precision of the ModTimes in this Fs +func (memoryFs) Precision() time.Duration { return time.Nanosecond } + +// Returns the supported hash types of the filesystem +func (memoryFs) Hashes() hash.Set { return hash.Supported() } + +// Features returns the optional features of this Fs +func (memoryFs) Features() *fs.Features { return &fs.Features{} } + +// List the objects and directories in dir into entries. The +// entries can be returned in any order but should be for a +// complete directory. +// +// dir should be "" to list the root, and should not have +// trailing slashes. +// +// This should return ErrDirNotFound if the directory isn't +// found. +func (memoryFs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) { + return nil, nil +} + +// NewObject finds the Object at remote. If it can't be found +// it returns the error ErrorObjectNotFound. +func (memoryFs) NewObject(ctx context.Context, remote string) (fs.Object, error) { + return nil, fs.ErrorObjectNotFound +} + +// Put in to the remote path with the modTime given of the given size +// +// May create the object even if it returns an error - if so +// will return the object and the error, otherwise will return +// nil and the error +func (memoryFs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) { + o := NewMemoryObject(src.Remote(), src.ModTime(ctx), nil) + return o, o.Update(ctx, in, src, options...) +} + +// Mkdir makes the directory (container, bucket) +// +// Shouldn't return an error if it already exists +func (memoryFs) Mkdir(ctx context.Context, dir string) error { + return errors.New("memoryFs: can't make directory") +} + +// Rmdir removes the directory (container, bucket) if empty +// +// Return an error if it doesn't exist or isn't empty +func (memoryFs) Rmdir(ctx context.Context, dir string) error { + return fs.ErrorDirNotFound +} + +var _ fs.Fs = MemoryFs + +// MemoryObject is an in memory object +type MemoryObject struct { + remote string + modTime time.Time + content []byte +} + +// NewMemoryObject returns an in memory Object with the modTime and content passed in +func NewMemoryObject(remote string, modTime time.Time, content []byte) *MemoryObject { + return &MemoryObject{ + remote: remote, + modTime: modTime, + content: content, + } +} + +// Content returns the underlying buffer +func (o *MemoryObject) Content() []byte { + return o.content +} + +// Fs returns read only access to the Fs that this object is part of +func (o *MemoryObject) Fs() fs.Info { + return MemoryFs +} + +// Remote returns the remote path +func (o *MemoryObject) Remote() string { + return o.remote +} + +// String returns a description of the Object +func (o *MemoryObject) String() string { + return o.remote +} + +// ModTime returns the modification date of the file +func (o *MemoryObject) ModTime(ctx context.Context) time.Time { + return o.modTime +} + +// Size returns the size of the file +func (o *MemoryObject) Size() int64 { + return int64(len(o.content)) +} + +// Storable says whether this object can be stored +func (o *MemoryObject) Storable() bool { + return true +} + +// Hash returns the requested hash of the contents +func (o *MemoryObject) Hash(ctx context.Context, h hash.Type) (string, error) { + hash, err := hash.NewMultiHasherTypes(hash.Set(h)) + if err != nil { + return "", err + } + _, err = hash.Write(o.content) + if err != nil { + return "", err + } + return hash.Sums()[h], nil +} + +// SetModTime sets the metadata on the object to set the modification date +func (o *MemoryObject) SetModTime(ctx context.Context, modTime time.Time) error { + o.modTime = modTime + return nil +} + +// Open opens the file for read. Call Close() on the returned io.ReadCloser +func (o *MemoryObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) { + content := o.content + for _, option := range options { + switch x := option.(type) { + case *fs.RangeOption: + content = o.content[x.Start:x.End] + case *fs.SeekOption: + content = o.content[x.Offset:] + default: + if option.Mandatory() { + fs.Logf(o, "Unsupported mandatory option: %v", option) + } + } + } + return ioutil.NopCloser(bytes.NewBuffer(content)), nil +} + +// Update in to the object with the modTime given of the given size +// +// This re-uses the internal buffer if at all possible. +func (o *MemoryObject) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) { + size := src.Size() + if size == 0 { + o.content = nil + } else if size < 0 || int64(cap(o.content)) < size { + o.content, err = ioutil.ReadAll(in) + } else { + o.content = o.content[:size] + _, err = io.ReadFull(in, o.content) + } + o.modTime = src.ModTime(ctx) + return err +} + +// Remove this object +func (o *MemoryObject) Remove(ctx context.Context) error { + return errors.New("memoryObject.Remove not supported") +} diff --git a/vendor/github.com/rclone/rclone/fs/operations/check.go b/vendor/github.com/rclone/rclone/fs/operations/check.go new file mode 100644 index 00000000000..450e6a608a8 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/operations/check.go @@ -0,0 +1,359 @@ +package operations + +import ( + "bytes" + "context" + "fmt" + "io" + "sync" + "sync/atomic" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/march" + "github.com/rclone/rclone/lib/readers" +) + +// checkFn is the type of the checking function used in CheckFn() +// +// It should check the two objects (a, b) and return if they differ +// and whether the hash was used. +// +// If there are differences then this should Errorf the difference and +// the reason but return with err = nil. It should not CountError in +// this case. +type checkFn func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool, err error) + +// CheckOpt contains options for the Check functions +type CheckOpt struct { + Fdst, Fsrc fs.Fs // fses to check + Check checkFn // function to use for checking + OneWay bool // one way only? + Combined io.Writer // a file with file names with leading sigils + MissingOnSrc io.Writer // files only in the destination + MissingOnDst io.Writer // files only in the source + Match io.Writer // matching files + Differ io.Writer // differing files + Error io.Writer // files with errors of some kind +} + +// checkMarch is used to march over two Fses in the same way as +// sync/copy +type checkMarch struct { + ioMu sync.Mutex + wg sync.WaitGroup + tokens chan struct{} + differences int32 + noHashes int32 + srcFilesMissing int32 + dstFilesMissing int32 + matches int32 + opt CheckOpt +} + +// report outputs the fileName to out if required and to the combined log +func (c *checkMarch) report(o fs.DirEntry, out io.Writer, sigil rune) { + if out != nil { + c.ioMu.Lock() + _, _ = fmt.Fprintf(out, "%v\n", o) + c.ioMu.Unlock() + } + if c.opt.Combined != nil { + c.ioMu.Lock() + _, _ = fmt.Fprintf(c.opt.Combined, "%c %v\n", sigil, o) + c.ioMu.Unlock() + } +} + +// DstOnly have an object which is in the destination only +func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) { + switch dst.(type) { + case fs.Object: + if c.opt.OneWay { + return false + } + err := errors.Errorf("File not in %v", c.opt.Fsrc) + fs.Errorf(dst, "%v", err) + _ = fs.CountError(err) + atomic.AddInt32(&c.differences, 1) + atomic.AddInt32(&c.srcFilesMissing, 1) + c.report(dst, c.opt.MissingOnSrc, '-') + case fs.Directory: + // Do the same thing to the entire contents of the directory + if c.opt.OneWay { + return false + } + return true + default: + panic("Bad object in DirEntries") + } + return false +} + +// SrcOnly have an object which is in the source only +func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) { + switch src.(type) { + case fs.Object: + err := errors.Errorf("File not in %v", c.opt.Fdst) + fs.Errorf(src, "%v", err) + _ = fs.CountError(err) + atomic.AddInt32(&c.differences, 1) + atomic.AddInt32(&c.dstFilesMissing, 1) + c.report(src, c.opt.MissingOnDst, '+') + case fs.Directory: + // Do the same thing to the entire contents of the directory + return true + default: + panic("Bad object in DirEntries") + } + return false +} + +// check to see if two objects are identical using the check function +func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { + ci := fs.GetConfig(ctx) + tr := accounting.Stats(ctx).NewCheckingTransfer(src) + defer func() { + tr.Done(ctx, err) + }() + if sizeDiffers(ctx, src, dst) { + err = errors.Errorf("Sizes differ") + fs.Errorf(src, "%v", err) + return true, false, nil + } + if ci.SizeOnly { + return false, false, nil + } + return c.opt.Check(ctx, dst, src) +} + +// Match is called when src and dst are present, so sync src to dst +func (c *checkMarch) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) { + switch srcX := src.(type) { + case fs.Object: + dstX, ok := dst.(fs.Object) + if ok { + if SkipDestructive(ctx, src, "check") { + return false + } + c.wg.Add(1) + c.tokens <- struct{}{} // put a token to limit concurrency + go func() { + defer func() { + <-c.tokens // get the token back to free up a slot + c.wg.Done() + }() + differ, noHash, err := c.checkIdentical(ctx, dstX, srcX) + if err != nil { + fs.Errorf(src, "%v", err) + _ = fs.CountError(err) + c.report(src, c.opt.Error, '!') + } else if differ { + atomic.AddInt32(&c.differences, 1) + err := errors.New("files differ") + // the checkFn has already logged the reason + _ = fs.CountError(err) + c.report(src, c.opt.Differ, '*') + } else { + atomic.AddInt32(&c.matches, 1) + c.report(src, c.opt.Match, '=') + if noHash { + atomic.AddInt32(&c.noHashes, 1) + fs.Debugf(dstX, "OK - could not check hash") + } else { + fs.Debugf(dstX, "OK") + } + } + }() + } else { + err := errors.Errorf("is file on %v but directory on %v", c.opt.Fsrc, c.opt.Fdst) + fs.Errorf(src, "%v", err) + _ = fs.CountError(err) + atomic.AddInt32(&c.differences, 1) + atomic.AddInt32(&c.dstFilesMissing, 1) + c.report(src, c.opt.MissingOnDst, '+') + } + case fs.Directory: + // Do the same thing to the entire contents of the directory + _, ok := dst.(fs.Directory) + if ok { + return true + } + err := errors.Errorf("is file on %v but directory on %v", c.opt.Fdst, c.opt.Fsrc) + fs.Errorf(dst, "%v", err) + _ = fs.CountError(err) + atomic.AddInt32(&c.differences, 1) + atomic.AddInt32(&c.srcFilesMissing, 1) + c.report(dst, c.opt.MissingOnSrc, '-') + + default: + panic("Bad object in DirEntries") + } + return false +} + +// CheckFn checks the files in fsrc and fdst according to Size and +// hash using checkFunction on each file to check the hashes. +// +// checkFunction sees if dst and src are identical +// +// it returns true if differences were found +// it also returns whether it couldn't be hashed +func CheckFn(ctx context.Context, opt *CheckOpt) error { + ci := fs.GetConfig(ctx) + if opt.Check == nil { + return errors.New("internal error: nil check function") + } + c := &checkMarch{ + tokens: make(chan struct{}, ci.Checkers), + opt: *opt, + } + + // set up a march over fdst and fsrc + m := &march.March{ + Ctx: ctx, + Fdst: c.opt.Fdst, + Fsrc: c.opt.Fsrc, + Callback: c, + } + fs.Debugf(c.opt.Fdst, "Waiting for checks to finish") + err := m.Run(ctx) + c.wg.Wait() // wait for background go-routines + + if c.dstFilesMissing > 0 { + fs.Logf(c.opt.Fdst, "%d files missing", c.dstFilesMissing) + } + if c.srcFilesMissing > 0 { + fs.Logf(c.opt.Fsrc, "%d files missing", c.srcFilesMissing) + } + + fs.Logf(c.opt.Fdst, "%d differences found", accounting.Stats(ctx).GetErrors()) + if errs := accounting.Stats(ctx).GetErrors(); errs > 0 { + fs.Logf(c.opt.Fdst, "%d errors while checking", errs) + } + if c.noHashes > 0 { + fs.Logf(c.opt.Fdst, "%d hashes could not be checked", c.noHashes) + } + if c.matches > 0 { + fs.Logf(c.opt.Fdst, "%d matching files", c.matches) + } + if err != nil { + return err + } + if c.differences > 0 { + // Return an already counted error so we don't double count this error too + err = fserrors.FsError(errors.Errorf("%d differences found", c.differences)) + fserrors.Count(err) + return err + } + return nil +} + +// Check the files in fsrc and fdst according to Size and hash +func Check(ctx context.Context, opt *CheckOpt) error { + optCopy := *opt + optCopy.Check = func(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) { + same, ht, err := CheckHashes(ctx, src, dst) + if err != nil { + return true, false, err + } + if ht == hash.None { + return false, true, nil + } + if !same { + err = errors.Errorf("%v differ", ht) + fs.Errorf(src, "%v", err) + return true, false, nil + } + return false, false, nil + } + + return CheckFn(ctx, &optCopy) +} + +// CheckEqualReaders checks to see if in1 and in2 have the same +// content when read. +// +// it returns true if differences were found +func CheckEqualReaders(in1, in2 io.Reader) (differ bool, err error) { + const bufSize = 64 * 1024 + buf1 := make([]byte, bufSize) + buf2 := make([]byte, bufSize) + for { + n1, err1 := readers.ReadFill(in1, buf1) + n2, err2 := readers.ReadFill(in2, buf2) + // check errors + if err1 != nil && err1 != io.EOF { + return true, err1 + } else if err2 != nil && err2 != io.EOF { + return true, err2 + } + // err1 && err2 are nil or io.EOF here + // process the data + if n1 != n2 || !bytes.Equal(buf1[:n1], buf2[:n2]) { + return true, nil + } + // if both streams finished the we have finished + if err1 == io.EOF && err2 == io.EOF { + break + } + } + return false, nil +} + +// CheckIdenticalDownload checks to see if dst and src are identical +// by reading all their bytes if necessary. +// +// it returns true if differences were found +func CheckIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ bool, err error) { + ci := fs.GetConfig(ctx) + err = Retry(src, ci.LowLevelRetries, func() error { + differ, err = checkIdenticalDownload(ctx, dst, src) + return err + }) + return differ, err +} + +// Does the work for CheckIdenticalDownload +func checkIdenticalDownload(ctx context.Context, dst, src fs.Object) (differ bool, err error) { + in1, err := dst.Open(ctx) + if err != nil { + return true, errors.Wrapf(err, "failed to open %q", dst) + } + tr1 := accounting.Stats(ctx).NewTransfer(dst) + defer func() { + tr1.Done(ctx, nil) // error handling is done by the caller + }() + in1 = tr1.Account(ctx, in1).WithBuffer() // account and buffer the transfer + + in2, err := src.Open(ctx) + if err != nil { + return true, errors.Wrapf(err, "failed to open %q", src) + } + tr2 := accounting.Stats(ctx).NewTransfer(dst) + defer func() { + tr2.Done(ctx, nil) // error handling is done by the caller + }() + in2 = tr2.Account(ctx, in2).WithBuffer() // account and buffer the transfer + + // To assign err variable before defer. + differ, err = CheckEqualReaders(in1, in2) + return +} + +// CheckDownload checks the files in fsrc and fdst according to Size +// and the actual contents of the files. +func CheckDownload(ctx context.Context, opt *CheckOpt) error { + optCopy := *opt + optCopy.Check = func(ctx context.Context, a, b fs.Object) (differ bool, noHash bool, err error) { + differ, err = CheckIdenticalDownload(ctx, a, b) + if err != nil { + return true, true, errors.Wrap(err, "failed to download") + } + return differ, false, nil + } + return CheckFn(ctx, &optCopy) +} diff --git a/vendor/github.com/rclone/rclone/fs/operations/dedupe.go b/vendor/github.com/rclone/rclone/fs/operations/dedupe.go new file mode 100644 index 00000000000..df6d37f24f8 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/operations/dedupe.go @@ -0,0 +1,416 @@ +// dedupe - gets rid of identical files remotes which can have duplicate file names (drive, mega) + +package operations + +import ( + "context" + "fmt" + "log" + "path" + "sort" + "strings" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/walk" +) + +// dedupeRename renames the objs slice to different names +func dedupeRename(ctx context.Context, f fs.Fs, remote string, objs []fs.Object) { + doMove := f.Features().Move + if doMove == nil { + log.Fatalf("Fs %v doesn't support Move", f) + } + ext := path.Ext(remote) + base := remote[:len(remote)-len(ext)] + +outer: + for i, o := range objs { + suffix := 1 + newName := fmt.Sprintf("%s-%d%s", base, i+suffix, ext) + _, err := f.NewObject(ctx, newName) + for ; err != fs.ErrorObjectNotFound; suffix++ { + if err != nil { + err = fs.CountError(err) + fs.Errorf(o, "Failed to check for existing object: %v", err) + continue outer + } + if suffix > 100 { + fs.Errorf(o, "Could not find an available new name") + continue outer + } + newName = fmt.Sprintf("%s-%d%s", base, i+suffix, ext) + _, err = f.NewObject(ctx, newName) + } + if !SkipDestructive(ctx, o, "rename") { + newObj, err := doMove(ctx, o, newName) + if err != nil { + err = fs.CountError(err) + fs.Errorf(o, "Failed to rename: %v", err) + continue + } + fs.Infof(newObj, "renamed from: %v", o) + } + } +} + +// dedupeDeleteAllButOne deletes all but the one in keep +func dedupeDeleteAllButOne(ctx context.Context, keep int, remote string, objs []fs.Object) { + count := 0 + for i, o := range objs { + if i == keep { + continue + } + err := DeleteFile(ctx, o) + if err == nil { + count++ + } + } + if count > 0 { + fs.Logf(remote, "Deleted %d extra copies", count) + } +} + +// dedupeDeleteIdentical deletes all but one of identical (by hash) copies +func dedupeDeleteIdentical(ctx context.Context, ht hash.Type, remote string, objs []fs.Object) (remainingObjs []fs.Object) { + ci := fs.GetConfig(ctx) + + // Make map of IDs + IDs := make(map[string]int, len(objs)) + for _, o := range objs { + if do, ok := o.(fs.IDer); ok { + if ID := do.ID(); ID != "" { + IDs[ID]++ + } + } + } + + // Remove duplicate IDs + newObjs := objs[:0] + for _, o := range objs { + if do, ok := o.(fs.IDer); ok { + if ID := do.ID(); ID != "" { + if IDs[ID] <= 1 { + newObjs = append(newObjs, o) + } else { + fs.Logf(o, "Ignoring as it appears %d times in the listing and deleting would lead to data loss", IDs[ID]) + } + } + } + } + objs = newObjs + + // See how many of these duplicates are identical + dupesByID := make(map[string][]fs.Object, len(objs)) + for _, o := range objs { + ID := "" + if ci.SizeOnly && o.Size() >= 0 { + ID = fmt.Sprintf("size %d", o.Size()) + } else if ht != hash.None { + hashValue, err := o.Hash(ctx, ht) + if err == nil && hashValue != "" { + ID = fmt.Sprintf("%v %s", ht, hashValue) + } + } + if ID == "" { + remainingObjs = append(remainingObjs, o) + } else { + dupesByID[ID] = append(dupesByID[ID], o) + } + } + + // Delete identical duplicates, filling remainingObjs with the ones remaining + for ID, dupes := range dupesByID { + remainingObjs = append(remainingObjs, dupes[0]) + if len(dupes) > 1 { + fs.Logf(remote, "Deleting %d/%d identical duplicates (%s)", len(dupes)-1, len(dupes), ID) + for _, o := range dupes[1:] { + err := DeleteFile(ctx, o) + if err != nil { + remainingObjs = append(remainingObjs, o) + } + } + } + } + + return remainingObjs +} + +// dedupeList lists the duplicates and does nothing +func dedupeList(ctx context.Context, f fs.Fs, ht hash.Type, remote string, objs []fs.Object, byHash bool) { + fmt.Printf("%s: %d duplicates\n", remote, len(objs)) + for i, o := range objs { + hashValue := "" + if ht != hash.None { + var err error + hashValue, err = o.Hash(ctx, ht) + if err != nil { + hashValue = err.Error() + } + } + if byHash { + fmt.Printf(" %d: %12d bytes, %s, %s\n", i+1, o.Size(), o.ModTime(ctx).Local().Format("2006-01-02 15:04:05.000000000"), o.Remote()) + } else { + fmt.Printf(" %d: %12d bytes, %s, %v %32s\n", i+1, o.Size(), o.ModTime(ctx).Local().Format("2006-01-02 15:04:05.000000000"), ht, hashValue) + } + } +} + +// dedupeInteractive interactively dedupes the slice of objects +func dedupeInteractive(ctx context.Context, f fs.Fs, ht hash.Type, remote string, objs []fs.Object, byHash bool) { + dedupeList(ctx, f, ht, remote, objs, byHash) + commands := []string{"sSkip and do nothing", "kKeep just one (choose which in next step)"} + if !byHash { + commands = append(commands, "rRename all to be different (by changing file.jpg to file-1.jpg)") + } + switch config.Command(commands) { + case 's': + case 'k': + keep := config.ChooseNumber("Enter the number of the file to keep", 1, len(objs)) + dedupeDeleteAllButOne(ctx, keep-1, remote, objs) + case 'r': + dedupeRename(ctx, f, remote, objs) + } +} + +// DeduplicateMode is how the dedupe command chooses what to do +type DeduplicateMode int + +// Deduplicate modes +const ( + DeduplicateInteractive DeduplicateMode = iota // interactively ask the user + DeduplicateSkip // skip all conflicts + DeduplicateFirst // choose the first object + DeduplicateNewest // choose the newest object + DeduplicateOldest // choose the oldest object + DeduplicateRename // rename the objects + DeduplicateLargest // choose the largest object + DeduplicateSmallest // choose the smallest object + DeduplicateList // list duplicates only +) + +func (x DeduplicateMode) String() string { + switch x { + case DeduplicateInteractive: + return "interactive" + case DeduplicateSkip: + return "skip" + case DeduplicateFirst: + return "first" + case DeduplicateNewest: + return "newest" + case DeduplicateOldest: + return "oldest" + case DeduplicateRename: + return "rename" + case DeduplicateLargest: + return "largest" + case DeduplicateSmallest: + return "smallest" + case DeduplicateList: + return "list" + } + return "unknown" +} + +// Set a DeduplicateMode from a string +func (x *DeduplicateMode) Set(s string) error { + switch strings.ToLower(s) { + case "interactive": + *x = DeduplicateInteractive + case "skip": + *x = DeduplicateSkip + case "first": + *x = DeduplicateFirst + case "newest": + *x = DeduplicateNewest + case "oldest": + *x = DeduplicateOldest + case "rename": + *x = DeduplicateRename + case "largest": + *x = DeduplicateLargest + case "smallest": + *x = DeduplicateSmallest + case "list": + *x = DeduplicateList + default: + return errors.Errorf("Unknown mode for dedupe %q.", s) + } + return nil +} + +// Type of the value +func (x *DeduplicateMode) Type() string { + return "string" +} + +// dedupeFindDuplicateDirs scans f for duplicate directories +func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, error) { + ci := fs.GetConfig(ctx) + dirs := map[string][]fs.Directory{} + err := walk.ListR(ctx, f, "", true, ci.MaxDepth, walk.ListDirs, func(entries fs.DirEntries) error { + entries.ForDir(func(d fs.Directory) { + dirs[d.Remote()] = append(dirs[d.Remote()], d) + }) + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "find duplicate dirs") + } + // make sure parents are before children + duplicateNames := []string{} + for name, ds := range dirs { + if len(ds) > 1 { + duplicateNames = append(duplicateNames, name) + } + } + sort.Strings(duplicateNames) + duplicateDirs := [][]fs.Directory{} + for _, name := range duplicateNames { + duplicateDirs = append(duplicateDirs, dirs[name]) + } + return duplicateDirs, nil +} + +// dedupeMergeDuplicateDirs merges all the duplicate directories found +func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs.Directory) error { + mergeDirs := f.Features().MergeDirs + if mergeDirs == nil { + return errors.Errorf("%v: can't merge directories", f) + } + dirCacheFlush := f.Features().DirCacheFlush + if dirCacheFlush == nil { + return errors.Errorf("%v: can't flush dir cache", f) + } + for _, dirs := range duplicateDirs { + if !SkipDestructive(ctx, dirs[0], "merge duplicate directories") { + fs.Infof(dirs[0], "Merging contents of duplicate directories") + err := mergeDirs(ctx, dirs) + if err != nil { + err = fs.CountError(err) + fs.Errorf(nil, "merge duplicate dirs: %v", err) + } + } + } + dirCacheFlush() + return nil +} + +// sort oldest first +func sortOldestFirst(objs []fs.Object) { + sort.Slice(objs, func(i, j int) bool { + return objs[i].ModTime(context.TODO()).Before(objs[j].ModTime(context.TODO())) + }) +} + +// sort smallest first +func sortSmallestFirst(objs []fs.Object) { + sort.Slice(objs, func(i, j int) bool { + return objs[i].Size() < objs[j].Size() + }) +} + +// Deduplicate interactively finds duplicate files and offers to +// delete all but one or rename them to be different. Only useful with +// Google Drive which can have duplicate file names. +func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode, byHash bool) error { + ci := fs.GetConfig(ctx) + // find a hash to use + ht := f.Hashes().GetOne() + what := "names" + if byHash { + if ht == hash.None { + return errors.Errorf("%v has no hashes", f) + } + what = ht.String() + " hashes" + } + fs.Infof(f, "Looking for duplicate %s using %v mode.", what, mode) + + // Find duplicate directories first and fix them + if !byHash { + duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f) + if err != nil { + return err + } + if len(duplicateDirs) != 0 { + if mode != DeduplicateList { + err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs) + if err != nil { + return err + } + } else { + for _, dir := range duplicateDirs { + fmt.Printf("%s: %d duplicates of this directory\n", dir[0].Remote(), len(dir)) + } + } + } + } + + // Now find duplicate files + files := map[string][]fs.Object{} + err := walk.ListR(ctx, f, "", true, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { + entries.ForObject(func(o fs.Object) { + var remote string + var err error + if byHash { + remote, err = o.Hash(ctx, ht) + if err != nil { + fs.Errorf(o, "Failed to hash: %v", err) + remote = "" + } + } else { + remote = o.Remote() + } + if remote != "" { + files[remote] = append(files[remote], o) + } + }) + return nil + }) + if err != nil { + return err + } + + for remote, objs := range files { + if len(objs) > 1 { + fs.Logf(remote, "Found %d files with duplicate %s", len(objs), what) + if !byHash && mode != DeduplicateList { + objs = dedupeDeleteIdentical(ctx, ht, remote, objs) + if len(objs) <= 1 { + fs.Logf(remote, "All duplicates removed") + continue + } + } + switch mode { + case DeduplicateInteractive: + dedupeInteractive(ctx, f, ht, remote, objs, byHash) + case DeduplicateFirst: + dedupeDeleteAllButOne(ctx, 0, remote, objs) + case DeduplicateNewest: + sortOldestFirst(objs) + dedupeDeleteAllButOne(ctx, len(objs)-1, remote, objs) + case DeduplicateOldest: + sortOldestFirst(objs) + dedupeDeleteAllButOne(ctx, 0, remote, objs) + case DeduplicateRename: + dedupeRename(ctx, f, remote, objs) + case DeduplicateLargest: + sortSmallestFirst(objs) + dedupeDeleteAllButOne(ctx, len(objs)-1, remote, objs) + case DeduplicateSmallest: + sortSmallestFirst(objs) + dedupeDeleteAllButOne(ctx, 0, remote, objs) + case DeduplicateSkip: + fs.Logf(remote, "Skipping %d files with duplicate %s", len(objs), what) + case DeduplicateList: + dedupeList(ctx, f, ht, remote, objs, byHash) + default: + //skip + } + } + } + return nil +} diff --git a/vendor/github.com/rclone/rclone/fs/operations/lsjson.go b/vendor/github.com/rclone/rclone/fs/operations/lsjson.go new file mode 100644 index 00000000000..50ad050e106 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/operations/lsjson.go @@ -0,0 +1,200 @@ +package operations + +import ( + "context" + "path" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/backend/crypt" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/walk" +) + +// ListJSONItem in the struct which gets marshalled for each line +type ListJSONItem struct { + Path string + Name string + EncryptedPath string `json:",omitempty"` + Encrypted string `json:",omitempty"` + Size int64 + MimeType string `json:",omitempty"` + ModTime Timestamp //`json:",omitempty"` + IsDir bool + Hashes map[string]string `json:",omitempty"` + ID string `json:",omitempty"` + OrigID string `json:",omitempty"` + Tier string `json:",omitempty"` + IsBucket bool `json:",omitempty"` +} + +// Timestamp a time in the provided format +type Timestamp struct { + When time.Time + Format string +} + +// MarshalJSON turns a Timestamp into JSON +func (t Timestamp) MarshalJSON() (out []byte, err error) { + if t.When.IsZero() { + return []byte(`""`), nil + } + return []byte(`"` + t.When.Format(t.Format) + `"`), nil +} + +// Returns a time format for the given precision +func formatForPrecision(precision time.Duration) string { + switch { + case precision <= time.Nanosecond: + return "2006-01-02T15:04:05.000000000Z07:00" + case precision <= 10*time.Nanosecond: + return "2006-01-02T15:04:05.00000000Z07:00" + case precision <= 100*time.Nanosecond: + return "2006-01-02T15:04:05.0000000Z07:00" + case precision <= time.Microsecond: + return "2006-01-02T15:04:05.000000Z07:00" + case precision <= 10*time.Microsecond: + return "2006-01-02T15:04:05.00000Z07:00" + case precision <= 100*time.Microsecond: + return "2006-01-02T15:04:05.0000Z07:00" + case precision <= time.Millisecond: + return "2006-01-02T15:04:05.000Z07:00" + case precision <= 10*time.Millisecond: + return "2006-01-02T15:04:05.00Z07:00" + case precision <= 100*time.Millisecond: + return "2006-01-02T15:04:05.0Z07:00" + } + return time.RFC3339 +} + +// ListJSONOpt describes the options for ListJSON +type ListJSONOpt struct { + Recurse bool `json:"recurse"` + NoModTime bool `json:"noModTime"` + NoMimeType bool `json:"noMimeType"` + ShowEncrypted bool `json:"showEncrypted"` + ShowOrigIDs bool `json:"showOrigIDs"` + ShowHash bool `json:"showHash"` + DirsOnly bool `json:"dirsOnly"` + FilesOnly bool `json:"filesOnly"` + HashTypes []string `json:"hashTypes"` // hash types to show if ShowHash is set, e.g. "MD5", "SHA-1" +} + +// ListJSON lists fsrc using the options in opt calling callback for each item +func ListJSON(ctx context.Context, fsrc fs.Fs, remote string, opt *ListJSONOpt, callback func(*ListJSONItem) error) error { + var cipher *crypt.Cipher + if opt.ShowEncrypted { + fsInfo, _, _, config, err := fs.ConfigFs(fsrc.Name() + ":" + fsrc.Root()) + if err != nil { + return errors.Wrap(err, "ListJSON failed to load config for crypt remote") + } + if fsInfo.Name != "crypt" { + return errors.New("The remote needs to be of type \"crypt\"") + } + cipher, err = crypt.NewCipher(config) + if err != nil { + return errors.Wrap(err, "ListJSON failed to make new crypt remote") + } + } + features := fsrc.Features() + canGetTier := features.GetTier + format := formatForPrecision(fsrc.Precision()) + isBucket := features.BucketBased && remote == "" && fsrc.Root() == "" // if bucket based remote listing the root mark directories as buckets + showHash := opt.ShowHash + hashTypes := fsrc.Hashes().Array() + if len(opt.HashTypes) != 0 { + showHash = true + hashTypes = []hash.Type{} + for _, hashType := range opt.HashTypes { + var ht hash.Type + err := ht.Set(hashType) + if err != nil { + return err + } + hashTypes = append(hashTypes, ht) + } + } + err := walk.ListR(ctx, fsrc, remote, false, ConfigMaxDepth(ctx, opt.Recurse), walk.ListAll, func(entries fs.DirEntries) (err error) { + for _, entry := range entries { + switch entry.(type) { + case fs.Directory: + if opt.FilesOnly { + continue + } + case fs.Object: + if opt.DirsOnly { + continue + } + default: + fs.Errorf(nil, "Unknown type %T in listing", entry) + } + + item := ListJSONItem{ + Path: entry.Remote(), + Name: path.Base(entry.Remote()), + Size: entry.Size(), + } + if !opt.NoModTime { + item.ModTime = Timestamp{When: entry.ModTime(ctx), Format: format} + } + if !opt.NoMimeType { + item.MimeType = fs.MimeTypeDirEntry(ctx, entry) + } + if cipher != nil { + switch entry.(type) { + case fs.Directory: + item.EncryptedPath = cipher.EncryptDirName(entry.Remote()) + case fs.Object: + item.EncryptedPath = cipher.EncryptFileName(entry.Remote()) + default: + fs.Errorf(nil, "Unknown type %T in listing", entry) + } + item.Encrypted = path.Base(item.EncryptedPath) + } + if do, ok := entry.(fs.IDer); ok { + item.ID = do.ID() + } + if o, ok := entry.(fs.Object); opt.ShowOrigIDs && ok { + if do, ok := fs.UnWrapObject(o).(fs.IDer); ok { + item.OrigID = do.ID() + } + } + switch x := entry.(type) { + case fs.Directory: + item.IsDir = true + item.IsBucket = isBucket + case fs.Object: + item.IsDir = false + if showHash { + item.Hashes = make(map[string]string) + for _, hashType := range hashTypes { + hash, err := x.Hash(ctx, hashType) + if err != nil { + fs.Errorf(x, "Failed to read hash: %v", err) + } else if hash != "" { + item.Hashes[hashType.String()] = hash + } + } + } + if canGetTier { + if do, ok := x.(fs.GetTierer); ok { + item.Tier = do.GetTier() + } + } + default: + fs.Errorf(nil, "Unknown type %T in listing in ListJSON", entry) + } + err = callback(&item) + if err != nil { + return errors.Wrap(err, "callback failed in ListJSON") + } + + } + return nil + }) + if err != nil { + return errors.Wrap(err, "error in ListJSON") + } + return nil +} diff --git a/vendor/github.com/rclone/rclone/fs/operations/multithread.go b/vendor/github.com/rclone/rclone/fs/operations/multithread.go new file mode 100644 index 00000000000..db246b82a90 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/operations/multithread.go @@ -0,0 +1,202 @@ +package operations + +import ( + "context" + "io" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "golang.org/x/sync/errgroup" +) + +const ( + multithreadChunkSize = 64 << 10 + multithreadChunkSizeMask = multithreadChunkSize - 1 + multithreadBufferSize = 32 * 1024 +) + +// Return a boolean as to whether we should use multi thread copy for +// this transfer +func doMultiThreadCopy(ctx context.Context, f fs.Fs, src fs.Object) bool { + ci := fs.GetConfig(ctx) + + // Disable multi thread if... + + // ...it isn't configured + if ci.MultiThreadStreams <= 1 { + return false + } + // ...size of object is less than cutoff + if src.Size() < int64(ci.MultiThreadCutoff) { + return false + } + // ...source doesn't support it + dstFeatures := f.Features() + if dstFeatures.OpenWriterAt == nil { + return false + } + // ...if --multi-thread-streams not in use and source and + // destination are both local + if !ci.MultiThreadSet && dstFeatures.IsLocal && src.Fs().Features().IsLocal { + return false + } + return true +} + +// state for a multi-thread copy +type multiThreadCopyState struct { + ctx context.Context + partSize int64 + size int64 + wc fs.WriterAtCloser + src fs.Object + acc *accounting.Account + streams int +} + +// Copy a single stream into place +func (mc *multiThreadCopyState) copyStream(ctx context.Context, stream int) (err error) { + ci := fs.GetConfig(ctx) + defer func() { + if err != nil { + fs.Debugf(mc.src, "multi-thread copy: stream %d/%d failed: %v", stream+1, mc.streams, err) + } + }() + start := int64(stream) * mc.partSize + if start >= mc.size { + return nil + } + end := start + mc.partSize + if end > mc.size { + end = mc.size + } + + fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v starting", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start)) + + rc, err := NewReOpen(ctx, mc.src, ci.LowLevelRetries, &fs.RangeOption{Start: start, End: end - 1}) + if err != nil { + return errors.Wrap(err, "multipart copy: failed to open source") + } + defer fs.CheckClose(rc, &err) + + // Copy the data + buf := make([]byte, multithreadBufferSize) + offset := start + for { + // Check if context cancelled and exit if so + if mc.ctx.Err() != nil { + return mc.ctx.Err() + } + nr, er := rc.Read(buf) + if nr > 0 { + err = mc.acc.AccountRead(nr) + if err != nil { + return errors.Wrap(err, "multipart copy: accounting failed") + } + nw, ew := mc.wc.WriteAt(buf[0:nr], offset) + if nw > 0 { + offset += int64(nw) + } + if ew != nil { + return errors.Wrap(ew, "multipart copy: write failed") + } + if nr != nw { + return errors.Wrap(io.ErrShortWrite, "multipart copy") + } + } + if er != nil { + if er != io.EOF { + return errors.Wrap(er, "multipart copy: read failed") + } + break + } + } + + if offset != end { + return errors.Errorf("multipart copy: wrote %d bytes but expected to write %d", offset-start, end-start) + } + + fs.Debugf(mc.src, "multi-thread copy: stream %d/%d (%d-%d) size %v finished", stream+1, mc.streams, start, end, fs.SizeSuffix(end-start)) + return nil +} + +// Calculate the chunk sizes and updated number of streams +func (mc *multiThreadCopyState) calculateChunks() { + partSize := mc.size / int64(mc.streams) + // Round partition size up so partSize * streams >= size + if (mc.size % int64(mc.streams)) != 0 { + partSize++ + } + // round partSize up to nearest multithreadChunkSize boundary + mc.partSize = (partSize + multithreadChunkSizeMask) &^ multithreadChunkSizeMask + // recalculate number of streams + mc.streams = int(mc.size / mc.partSize) + // round streams up so partSize * streams >= size + if (mc.size % mc.partSize) != 0 { + mc.streams++ + } +} + +// Copy src to (f, remote) using streams download threads and the OpenWriterAt feature +func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, streams int, tr *accounting.Transfer) (newDst fs.Object, err error) { + openWriterAt := f.Features().OpenWriterAt + if openWriterAt == nil { + return nil, errors.New("multi-thread copy: OpenWriterAt not supported") + } + if src.Size() < 0 { + return nil, errors.New("multi-thread copy: can't copy unknown sized file") + } + if src.Size() == 0 { + return nil, errors.New("multi-thread copy: can't copy zero sized file") + } + + g, gCtx := errgroup.WithContext(ctx) + mc := &multiThreadCopyState{ + ctx: gCtx, + size: src.Size(), + src: src, + streams: streams, + } + mc.calculateChunks() + + // Make accounting + mc.acc = tr.Account(ctx, nil) + + // create write file handle + mc.wc, err = openWriterAt(gCtx, remote, mc.size) + if err != nil { + return nil, errors.Wrap(err, "multipart copy: failed to open destination") + } + + fs.Debugf(src, "Starting multi-thread copy with %d parts of size %v", mc.streams, fs.SizeSuffix(mc.partSize)) + for stream := 0; stream < mc.streams; stream++ { + stream := stream + g.Go(func() (err error) { + return mc.copyStream(gCtx, stream) + }) + } + err = g.Wait() + closeErr := mc.wc.Close() + if err != nil { + return nil, err + } + if closeErr != nil { + return nil, errors.Wrap(closeErr, "multi-thread copy: failed to close object after copy") + } + + obj, err := f.NewObject(ctx, remote) + if err != nil { + return nil, errors.Wrap(err, "multi-thread copy: failed to find object after copy") + } + + err = obj.SetModTime(ctx, src.ModTime(ctx)) + switch err { + case nil, fs.ErrorCantSetModTime, fs.ErrorCantSetModTimeWithoutDelete: + default: + return nil, errors.Wrap(err, "multi-thread copy: failed to set modification time") + } + + fs.Debugf(src, "Finished multi-thread copy with %d parts of size %v", mc.streams, fs.SizeSuffix(mc.partSize)) + return obj, nil +} diff --git a/vendor/github.com/rclone/rclone/fs/operations/operations.go b/vendor/github.com/rclone/rclone/fs/operations/operations.go new file mode 100644 index 00000000000..9ed4823380e --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/operations/operations.go @@ -0,0 +1,2148 @@ +// Package operations does generic operations on filesystems and objects +package operations + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/csv" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/cache" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/object" + "github.com/rclone/rclone/fs/walk" + "github.com/rclone/rclone/lib/atexit" + "github.com/rclone/rclone/lib/random" + "github.com/rclone/rclone/lib/readers" +) + +// CheckHashes checks the two files to see if they have common +// known hash types and compares them +// +// Returns +// +// equal - which is equality of the hashes +// +// hash - the HashType. This is HashNone if either of the hashes were +// unset or a compatible hash couldn't be found. +// +// err - may return an error which will already have been logged +// +// If an error is returned it will return equal as false +func CheckHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object) (equal bool, ht hash.Type, err error) { + common := src.Fs().Hashes().Overlap(dst.Fs().Hashes()) + // fs.Debugf(nil, "Shared hashes: %v", common) + if common.Count() == 0 { + return true, hash.None, nil + } + equal, ht, _, _, err = checkHashes(ctx, src, dst, common.GetOne()) + return equal, ht, err +} + +// checkHashes does the work of CheckHashes but takes a hash.Type and +// returns the effective hash type used. +func checkHashes(ctx context.Context, src fs.ObjectInfo, dst fs.Object, ht hash.Type) (equal bool, htOut hash.Type, srcHash, dstHash string, err error) { + // Calculate hashes in parallel + g, ctx := errgroup.WithContext(ctx) + g.Go(func() (err error) { + srcHash, err = src.Hash(ctx, ht) + if err != nil { + err = fs.CountError(err) + fs.Errorf(src, "Failed to calculate src hash: %v", err) + } + return err + }) + g.Go(func() (err error) { + dstHash, err = dst.Hash(ctx, ht) + if err != nil { + err = fs.CountError(err) + fs.Errorf(dst, "Failed to calculate dst hash: %v", err) + } + return err + }) + err = g.Wait() + if err != nil { + return false, ht, srcHash, dstHash, err + } + if srcHash == "" { + return true, hash.None, srcHash, dstHash, nil + } + if dstHash == "" { + return true, hash.None, srcHash, dstHash, nil + } + if srcHash != dstHash { + fs.Debugf(src, "%v = %s (%v)", ht, srcHash, src.Fs()) + fs.Debugf(dst, "%v = %s (%v)", ht, dstHash, dst.Fs()) + } else { + fs.Debugf(src, "%v = %s OK", ht, srcHash) + } + return srcHash == dstHash, ht, srcHash, dstHash, nil +} + +// Equal checks to see if the src and dst objects are equal by looking at +// size, mtime and hash +// +// If the src and dst size are different then it is considered to be +// not equal. If --size-only is in effect then this is the only check +// that is done. If --ignore-size is in effect then this check is +// skipped and the files are considered the same size. +// +// If the size is the same and the mtime is the same then it is +// considered to be equal. This check is skipped if using --checksum. +// +// If the size is the same and mtime is different, unreadable or +// --checksum is set and the hash is the same then the file is +// considered to be equal. In this case the mtime on the dst is +// updated if --checksum is not set. +// +// Otherwise the file is considered to be not equal including if there +// were errors reading info. +func Equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object) bool { + return equal(ctx, src, dst, defaultEqualOpt(ctx)) +} + +// sizeDiffers compare the size of src and dst taking into account the +// various ways of ignoring sizes +func sizeDiffers(ctx context.Context, src, dst fs.ObjectInfo) bool { + ci := fs.GetConfig(ctx) + if ci.IgnoreSize || src.Size() < 0 || dst.Size() < 0 { + return false + } + return src.Size() != dst.Size() +} + +var checksumWarning sync.Once + +// options for equal function() +type equalOpt struct { + sizeOnly bool // if set only check size + checkSum bool // if set check checksum+size instead of modtime+size + updateModTime bool // if set update the modtime if hashes identical and checking with modtime+size + forceModTimeMatch bool // if set assume modtimes match +} + +// default set of options for equal() +func defaultEqualOpt(ctx context.Context) equalOpt { + ci := fs.GetConfig(ctx) + return equalOpt{ + sizeOnly: ci.SizeOnly, + checkSum: ci.CheckSum, + updateModTime: !ci.NoUpdateModTime, + forceModTimeMatch: false, + } +} + +var modTimeUploadOnce sync.Once + +// emit a log if we are about to upload a file to set its modification time +func logModTimeUpload(dst fs.Object) { + modTimeUploadOnce.Do(func() { + fs.Logf(dst.Fs(), "Forced to upload files to set modification times on this backend.") + }) +} + +func equal(ctx context.Context, src fs.ObjectInfo, dst fs.Object, opt equalOpt) bool { + ci := fs.GetConfig(ctx) + if sizeDiffers(ctx, src, dst) { + fs.Debugf(src, "Sizes differ (src %d vs dst %d)", src.Size(), dst.Size()) + return false + } + if opt.sizeOnly { + fs.Debugf(src, "Sizes identical") + return true + } + + // Assert: Size is equal or being ignored + + // If checking checksum and not modtime + if opt.checkSum { + // Check the hash + same, ht, _ := CheckHashes(ctx, src, dst) + if !same { + fs.Debugf(src, "%v differ", ht) + return false + } + if ht == hash.None { + common := src.Fs().Hashes().Overlap(dst.Fs().Hashes()) + if common.Count() == 0 { + checksumWarning.Do(func() { + fs.Logf(dst.Fs(), "--checksum is in use but the source and destination have no hashes in common; falling back to --size-only") + }) + } + fs.Debugf(src, "Size of src and dst objects identical") + } else { + fs.Debugf(src, "Size and %v of src and dst objects identical", ht) + } + return true + } + + srcModTime := src.ModTime(ctx) + if !opt.forceModTimeMatch { + // Sizes the same so check the mtime + modifyWindow := fs.GetModifyWindow(ctx, src.Fs(), dst.Fs()) + if modifyWindow == fs.ModTimeNotSupported { + fs.Debugf(src, "Sizes identical") + return true + } + dstModTime := dst.ModTime(ctx) + dt := dstModTime.Sub(srcModTime) + if dt < modifyWindow && dt > -modifyWindow { + fs.Debugf(src, "Size and modification time the same (differ by %s, within tolerance %s)", dt, modifyWindow) + return true + } + + fs.Debugf(src, "Modification times differ by %s: %v, %v", dt, srcModTime, dstModTime) + } + + // Check if the hashes are the same + same, ht, _ := CheckHashes(ctx, src, dst) + if !same { + fs.Debugf(src, "%v differ", ht) + return false + } + if ht == hash.None && !ci.RefreshTimes { + // if couldn't check hash, return that they differ + return false + } + + // mod time differs but hash is the same to reset mod time if required + if opt.updateModTime { + if !SkipDestructive(ctx, src, "update modification time") { + // Size and hash the same but mtime different + // Error if objects are treated as immutable + if ci.Immutable { + fs.Errorf(dst, "Timestamp mismatch between immutable objects") + return false + } + // Update the mtime of the dst object here + err := dst.SetModTime(ctx, srcModTime) + if err == fs.ErrorCantSetModTime { + logModTimeUpload(dst) + fs.Infof(dst, "src and dst identical but can't set mod time without re-uploading") + return false + } else if err == fs.ErrorCantSetModTimeWithoutDelete { + logModTimeUpload(dst) + fs.Infof(dst, "src and dst identical but can't set mod time without deleting and re-uploading") + // Remove the file if BackupDir isn't set. If BackupDir is set we would rather have the old file + // put in the BackupDir than deleted which is what will happen if we don't delete it. + if ci.BackupDir == "" { + err = dst.Remove(ctx) + if err != nil { + fs.Errorf(dst, "failed to delete before re-upload: %v", err) + } + } + return false + } else if err != nil { + err = fs.CountError(err) + fs.Errorf(dst, "Failed to set modification time: %v", err) + } else { + fs.Infof(src, "Updated modification time in destination") + } + } + } + return true +} + +// Used to remove a failed copy +// +// Returns whether the file was successfully removed or not +func removeFailedCopy(ctx context.Context, dst fs.Object) bool { + if dst == nil { + return false + } + fs.Infof(dst, "Removing failed copy") + removeErr := dst.Remove(ctx) + if removeErr != nil { + fs.Infof(dst, "Failed to remove failed copy: %s", removeErr) + return false + } + return true +} + +// OverrideRemote is a wrapper to override the Remote for an +// ObjectInfo +type OverrideRemote struct { + fs.ObjectInfo + remote string +} + +// NewOverrideRemote returns an OverrideRemoteObject which will +// return the remote specified +func NewOverrideRemote(oi fs.ObjectInfo, remote string) *OverrideRemote { + return &OverrideRemote{ + ObjectInfo: oi, + remote: remote, + } +} + +// Remote returns the overridden remote name +func (o *OverrideRemote) Remote() string { + return o.remote +} + +// MimeType returns the mime type of the underlying object or "" if it +// can't be worked out +func (o *OverrideRemote) MimeType(ctx context.Context) string { + if do, ok := o.ObjectInfo.(fs.MimeTyper); ok { + return do.MimeType(ctx) + } + return "" +} + +// ID returns the ID of the Object if known, or "" if not +func (o *OverrideRemote) ID() string { + if do, ok := o.ObjectInfo.(fs.IDer); ok { + return do.ID() + } + return "" +} + +// UnWrap returns the Object that this Object is wrapping or nil if it +// isn't wrapping anything +func (o *OverrideRemote) UnWrap() fs.Object { + if o, ok := o.ObjectInfo.(fs.Object); ok { + return o + } + return nil +} + +// GetTier returns storage tier or class of the Object +func (o *OverrideRemote) GetTier() string { + if do, ok := o.ObjectInfo.(fs.GetTierer); ok { + return do.GetTier() + } + return "" +} + +// Check all optional interfaces satisfied +var _ fs.FullObjectInfo = (*OverrideRemote)(nil) + +// CommonHash returns a single hash.Type and a HashOption with that +// type which is in common between the two fs.Fs. +func CommonHash(ctx context.Context, fa, fb fs.Info) (hash.Type, *fs.HashesOption) { + ci := fs.GetConfig(ctx) + // work out which hash to use - limit to 1 hash in common + var common hash.Set + hashType := hash.None + if !ci.IgnoreChecksum { + common = fb.Hashes().Overlap(fa.Hashes()) + if common.Count() > 0 { + hashType = common.GetOne() + common = hash.Set(hashType) + } + } + return hashType, &fs.HashesOption{Hashes: common} +} + +// Copy src object to dst or f if nil. If dst is nil then it uses +// remote as the name of the new object. +// +// It returns the destination object if possible. Note that this may +// be nil. +func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { + ci := fs.GetConfig(ctx) + tr := accounting.Stats(ctx).NewTransfer(src) + defer func() { + tr.Done(ctx, err) + }() + newDst = dst + if SkipDestructive(ctx, src, "copy") { + in := tr.Account(ctx, nil) + in.DryRun(src.Size()) + return newDst, nil + } + maxTries := ci.LowLevelRetries + tries := 0 + doUpdate := dst != nil + hashType, hashOption := CommonHash(ctx, f, src.Fs()) + + var actionTaken string + for { + // Try server-side copy first - if has optional interface and + // is same underlying remote + actionTaken = "Copied (server-side copy)" + if ci.MaxTransfer >= 0 { + var bytesSoFar int64 + if ci.CutoffMode == fs.CutoffModeCautious { + bytesSoFar = accounting.Stats(ctx).GetBytesWithPending() + src.Size() + } else { + bytesSoFar = accounting.Stats(ctx).GetBytes() + } + if bytesSoFar >= int64(ci.MaxTransfer) { + if ci.CutoffMode == fs.CutoffModeHard { + return nil, accounting.ErrorMaxTransferLimitReachedFatal + } + return nil, accounting.ErrorMaxTransferLimitReachedGraceful + } + } + if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) { + in := tr.Account(ctx, nil) // account the transfer + in.ServerSideCopyStart() + newDst, err = doCopy(ctx, src, remote) + if err == nil { + dst = newDst + in.ServerSideCopyEnd(dst.Size()) // account the bytes for the server-side transfer + err = in.Close() + } else { + _ = in.Close() + } + if err == fs.ErrorCantCopy { + tr.Reset(ctx) // skip incomplete accounting - will be overwritten by the manual copy below + } + } else { + err = fs.ErrorCantCopy + } + // If can't server-side copy, do it manually + if err == fs.ErrorCantCopy { + if doMultiThreadCopy(ctx, f, src) { + // Number of streams proportional to size + streams := src.Size() / int64(ci.MultiThreadCutoff) + // With maximum + if streams > int64(ci.MultiThreadStreams) { + streams = int64(ci.MultiThreadStreams) + } + if streams < 2 { + streams = 2 + } + dst, err = multiThreadCopy(ctx, f, remote, src, int(streams), tr) + if doUpdate { + actionTaken = "Multi-thread Copied (replaced existing)" + } else { + actionTaken = "Multi-thread Copied (new)" + } + } else { + var in0 io.ReadCloser + options := []fs.OpenOption{hashOption} + for _, option := range ci.DownloadHeaders { + options = append(options, option) + } + in0, err = NewReOpen(ctx, src, ci.LowLevelRetries, options...) + if err != nil { + err = errors.Wrap(err, "failed to open source object") + } else { + if src.Size() == -1 { + // -1 indicates unknown size. Use Rcat to handle both remotes supporting and not supporting PutStream. + if doUpdate { + actionTaken = "Copied (Rcat, replaced existing)" + } else { + actionTaken = "Copied (Rcat, new)" + } + // NB Rcat closes in0 + dst, err = Rcat(ctx, f, remote, in0, src.ModTime(ctx)) + newDst = dst + } else { + in := tr.Account(ctx, in0).WithBuffer() // account and buffer the transfer + var wrappedSrc fs.ObjectInfo = src + // We try to pass the original object if possible + if src.Remote() != remote { + wrappedSrc = NewOverrideRemote(src, remote) + } + options := []fs.OpenOption{hashOption} + for _, option := range ci.UploadHeaders { + options = append(options, option) + } + if doUpdate { + actionTaken = "Copied (replaced existing)" + err = dst.Update(ctx, in, wrappedSrc, options...) + } else { + actionTaken = "Copied (new)" + dst, err = f.Put(ctx, in, wrappedSrc, options...) + } + closeErr := in.Close() + if err == nil { + newDst = dst + err = closeErr + } + } + } + } + } + tries++ + if tries >= maxTries { + break + } + // Retry if err returned a retry error + if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) { + fs.Debugf(src, "Received error: %v - low level retry %d/%d", err, tries, maxTries) + tr.Reset(ctx) // skip incomplete accounting - will be overwritten by retry + continue + } + // otherwise finish + break + } + if err != nil { + err = fs.CountError(err) + fs.Errorf(src, "Failed to copy: %v", err) + return newDst, err + } + + // Verify sizes are the same after transfer + if sizeDiffers(ctx, src, dst) { + err = errors.Errorf("corrupted on transfer: sizes differ %d vs %d", src.Size(), dst.Size()) + fs.Errorf(dst, "%v", err) + err = fs.CountError(err) + removeFailedCopy(ctx, dst) + return newDst, err + } + + // Verify hashes are the same after transfer - ignoring blank hashes + if hashType != hash.None { + // checkHashes has logged and counted errors + equal, _, srcSum, dstSum, _ := checkHashes(ctx, src, dst, hashType) + if !equal { + err = errors.Errorf("corrupted on transfer: %v hash differ %q vs %q", hashType, srcSum, dstSum) + fs.Errorf(dst, "%v", err) + err = fs.CountError(err) + removeFailedCopy(ctx, dst) + return newDst, err + } + } + if newDst != nil && src.String() != newDst.String() { + fs.Debugf(src, "%s to: %s", actionTaken, newDst.String()) + } else { + fs.Debugf(src, actionTaken) + } + return newDst, err +} + +// SameObject returns true if src and dst could be pointing to the +// same object. +func SameObject(src, dst fs.Object) bool { + srcFs, dstFs := src.Fs(), dst.Fs() + if !SameConfig(srcFs, dstFs) { + // If same remote type then check ID of objects if available + doSrcID, srcIDOK := src.(fs.IDer) + doDstID, dstIDOK := dst.(fs.IDer) + if srcIDOK && dstIDOK && SameRemoteType(srcFs, dstFs) { + srcID, dstID := doSrcID.ID(), doDstID.ID() + if srcID != "" && srcID == dstID { + return true + } + } + return false + } + srcPath := path.Join(srcFs.Root(), src.Remote()) + dstPath := path.Join(dstFs.Root(), dst.Remote()) + if dst.Fs().Features().CaseInsensitive { + srcPath = strings.ToLower(srcPath) + dstPath = strings.ToLower(dstPath) + } + return srcPath == dstPath +} + +// Move src object to dst or fdst if nil. If dst is nil then it uses +// remote as the name of the new object. +// +// Note that you must check the destination does not exist before +// calling this and pass it as dst. If you pass dst=nil and the +// destination does exist then this may create duplicates or return +// errors. +// +// It returns the destination object if possible. Note that this may +// be nil. +func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) { + tr := accounting.Stats(ctx).NewCheckingTransfer(src) + defer func() { + if err == nil { + accounting.Stats(ctx).Renames(1) + } + tr.Done(ctx, err) + }() + newDst = dst + if SkipDestructive(ctx, src, "move") { + in := tr.Account(ctx, nil) + in.DryRun(src.Size()) + return newDst, nil + } + // See if we have Move available + if doMove := fdst.Features().Move; doMove != nil && (SameConfig(src.Fs(), fdst) || (SameRemoteType(src.Fs(), fdst) && fdst.Features().ServerSideAcrossConfigs)) { + // Delete destination if it exists and is not the same file as src (could be same file while seemingly different if the remote is case insensitive) + if dst != nil && !SameObject(src, dst) { + err = DeleteFile(ctx, dst) + if err != nil { + return newDst, err + } + } + // Move dst <- src + newDst, err = doMove(ctx, src, remote) + switch err { + case nil: + if newDst != nil && src.String() != newDst.String() { + fs.Infof(src, "Moved (server-side) to: %s", newDst.String()) + } else { + fs.Infof(src, "Moved (server-side)") + } + + return newDst, nil + case fs.ErrorCantMove: + fs.Debugf(src, "Can't move, switching to copy") + default: + err = fs.CountError(err) + fs.Errorf(src, "Couldn't move: %v", err) + return newDst, err + } + } + // Move not found or didn't work so copy dst <- src + newDst, err = Copy(ctx, fdst, dst, remote, src) + if err != nil { + fs.Errorf(src, "Not deleting source as copy failed: %v", err) + return newDst, err + } + // Delete src if no error on copy + return newDst, DeleteFile(ctx, src) +} + +// CanServerSideMove returns true if fdst support server-side moves or +// server-side copies +// +// Some remotes simulate rename by server-side copy and delete, so include +// remotes that implements either Mover or Copier. +func CanServerSideMove(fdst fs.Fs) bool { + canMove := fdst.Features().Move != nil + canCopy := fdst.Features().Copy != nil + return canMove || canCopy +} + +// SuffixName adds the current --suffix to the remote, obeying +// --suffix-keep-extension if set +func SuffixName(ctx context.Context, remote string) string { + ci := fs.GetConfig(ctx) + if ci.Suffix == "" { + return remote + } + if ci.SuffixKeepExtension { + ext := path.Ext(remote) + base := remote[:len(remote)-len(ext)] + return base + ci.Suffix + ext + } + return remote + ci.Suffix +} + +// DeleteFileWithBackupDir deletes a single file respecting --dry-run +// and accumulating stats and errors. +// +// If backupDir is set then it moves the file to there instead of +// deleting +func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) { + ci := fs.GetConfig(ctx) + tr := accounting.Stats(ctx).NewCheckingTransfer(dst) + defer func() { + tr.Done(ctx, err) + }() + numDeletes := accounting.Stats(ctx).Deletes(1) + if ci.MaxDelete != -1 && numDeletes > ci.MaxDelete { + return fserrors.FatalError(errors.New("--max-delete threshold reached")) + } + action, actioned := "delete", "Deleted" + if backupDir != nil { + action, actioned = "move into backup dir", "Moved into backup dir" + } + skip := SkipDestructive(ctx, dst, action) + if skip { + // do nothing + } else if backupDir != nil { + err = MoveBackupDir(ctx, backupDir, dst) + } else { + err = dst.Remove(ctx) + } + if err != nil { + fs.Errorf(dst, "Couldn't %s: %v", action, err) + err = fs.CountError(err) + } else if !skip { + fs.Debugf(dst, actioned) + } + return err +} + +// DeleteFile deletes a single file respecting --dry-run and accumulating stats and errors. +// +// If useBackupDir is set and --backup-dir is in effect then it moves +// the file to there instead of deleting +func DeleteFile(ctx context.Context, dst fs.Object) (err error) { + return DeleteFileWithBackupDir(ctx, dst, nil) +} + +// DeleteFilesWithBackupDir removes all the files passed in the +// channel +// +// If backupDir is set the files will be placed into that directory +// instead of being deleted. +func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error { + var wg sync.WaitGroup + ci := fs.GetConfig(ctx) + wg.Add(ci.Transfers) + var errorCount int32 + var fatalErrorCount int32 + + for i := 0; i < ci.Transfers; i++ { + go func() { + defer wg.Done() + for dst := range toBeDeleted { + err := DeleteFileWithBackupDir(ctx, dst, backupDir) + if err != nil { + atomic.AddInt32(&errorCount, 1) + if fserrors.IsFatalError(err) { + fs.Errorf(nil, "Got fatal error on delete: %s", err) + atomic.AddInt32(&fatalErrorCount, 1) + return + } + } + } + }() + } + fs.Debugf(nil, "Waiting for deletions to finish") + wg.Wait() + if errorCount > 0 { + err := errors.Errorf("failed to delete %d files", errorCount) + if fatalErrorCount > 0 { + return fserrors.FatalError(err) + } + return err + } + return nil +} + +// DeleteFiles removes all the files passed in the channel +func DeleteFiles(ctx context.Context, toBeDeleted fs.ObjectsChan) error { + return DeleteFilesWithBackupDir(ctx, toBeDeleted, nil) +} + +// SameRemoteType returns true if fdst and fsrc are the same type +func SameRemoteType(fdst, fsrc fs.Info) bool { + return fmt.Sprintf("%T", fdst) == fmt.Sprintf("%T", fsrc) +} + +// SameConfig returns true if fdst and fsrc are using the same config +// file entry +func SameConfig(fdst, fsrc fs.Info) bool { + return fdst.Name() == fsrc.Name() +} + +// Same returns true if fdst and fsrc point to the same underlying Fs +func Same(fdst, fsrc fs.Info) bool { + return SameConfig(fdst, fsrc) && strings.Trim(fdst.Root(), "/") == strings.Trim(fsrc.Root(), "/") +} + +// fixRoot returns the Root with a trailing / if not empty. It is +// aware of case insensitive filesystems. +func fixRoot(f fs.Info) string { + s := strings.Trim(filepath.ToSlash(f.Root()), "/") + if s != "" { + s += "/" + } + if f.Features().CaseInsensitive { + s = strings.ToLower(s) + } + return s +} + +// Overlapping returns true if fdst and fsrc point to the same +// underlying Fs and they overlap. +func Overlapping(fdst, fsrc fs.Info) bool { + if !SameConfig(fdst, fsrc) { + return false + } + fdstRoot := fixRoot(fdst) + fsrcRoot := fixRoot(fsrc) + return strings.HasPrefix(fdstRoot, fsrcRoot) || strings.HasPrefix(fsrcRoot, fdstRoot) +} + +// SameDir returns true if fdst and fsrc point to the same +// underlying Fs and they are the same directory. +func SameDir(fdst, fsrc fs.Info) bool { + if !SameConfig(fdst, fsrc) { + return false + } + fdstRoot := fixRoot(fdst) + fsrcRoot := fixRoot(fsrc) + return fdstRoot == fsrcRoot +} + +// Retry runs fn up to maxTries times if it returns a retriable error +func Retry(o interface{}, maxTries int, fn func() error) (err error) { + for tries := 1; tries <= maxTries; tries++ { + // Call the function which might error + err = fn() + if err == nil { + break + } + // Retry if err returned a retry error + if fserrors.IsRetryError(err) || fserrors.ShouldRetry(err) { + fs.Debugf(o, "Received error: %v - low level retry %d/%d", err, tries, maxTries) + continue + } + break + } + return err +} + +// ListFn lists the Fs to the supplied function +// +// Lists in parallel which may get them out of order +func ListFn(ctx context.Context, f fs.Fs, fn func(fs.Object)) error { + ci := fs.GetConfig(ctx) + return walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { + entries.ForObject(fn) + return nil + }) +} + +// mutex for synchronized output +var outMutex sync.Mutex + +// SyncPrintf is a global var holding the Printf function used in syncFprintf so that it can be overridden +// Note, despite name, does not provide sync and should not be called directly +// Call syncFprintf, which provides sync +var SyncPrintf = func(format string, a ...interface{}) { + fmt.Printf(format, a...) +} + +// Synchronized fmt.Fprintf +// +// Ignores errors from Fprintf +// +// Updated to print to terminal if no writer is defined +// This special behavior is used to allow easier replacement of the print to terminal code by progress +func syncFprintf(w io.Writer, format string, a ...interface{}) { + outMutex.Lock() + defer outMutex.Unlock() + if w == nil { + SyncPrintf(format, a...) + } else { + _, _ = fmt.Fprintf(w, format, a...) + } +} + +// List the Fs to the supplied writer +// +// Shows size and path - obeys includes and excludes +// +// Lists in parallel which may get them out of order +func List(ctx context.Context, f fs.Fs, w io.Writer) error { + return ListFn(ctx, f, func(o fs.Object) { + syncFprintf(w, "%9d %s\n", o.Size(), o.Remote()) + }) +} + +// ListLong lists the Fs to the supplied writer +// +// Shows size, mod time and path - obeys includes and excludes +// +// Lists in parallel which may get them out of order +func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error { + return ListFn(ctx, f, func(o fs.Object) { + tr := accounting.Stats(ctx).NewCheckingTransfer(o) + defer func() { + tr.Done(ctx, nil) + }() + modTime := o.ModTime(ctx) + syncFprintf(w, "%9d %s %s\n", o.Size(), modTime.Local().Format("2006-01-02 15:04:05.000000000"), o.Remote()) + }) +} + +// hashSum returns the human readable hash for ht passed in. This may +// be UNSUPPORTED or ERROR. If it isn't returning a valid hash it will +// return an error. +func hashSum(ctx context.Context, ht hash.Type, downloadFlag bool, o fs.Object) (string, error) { + var sum string + var err error + + // If downloadFlag is true, download and hash the file. + // If downloadFlag is false, call o.Hash asking the remote for the hash + if downloadFlag { + // Setup: Define accounting, open the file with NewReOpen to provide restarts, account for the transfer, and setup a multi-hasher with the appropriate type + // Execution: io.Copy file to hasher, get hash and encode in hex + + tr := accounting.Stats(ctx).NewTransfer(o) + defer func() { + tr.Done(ctx, err) + }() + + // Open with NewReOpen to provide restarts + var options []fs.OpenOption + for _, option := range fs.GetConfig(ctx).DownloadHeaders { + options = append(options, option) + } + in, err := NewReOpen(ctx, o, fs.GetConfig(ctx).LowLevelRetries, options...) + if err != nil { + return "ERROR", errors.Wrapf(err, "Failed to open file %v", o) + } + + // Account and buffer the transfer + in = tr.Account(ctx, in).WithBuffer() + + // Setup hasher + hasher, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht)) + if err != nil { + return "UNSUPPORTED", errors.Wrap(err, "Hash unsupported") + } + + // Copy to hasher, downloading the file and passing directly to hash + _, err = io.Copy(hasher, in) + if err != nil { + return "ERROR", errors.Wrap(err, "Failed to copy file to hasher") + } + + // Get hash and encode as hex + byteSum, err := hasher.Sum(ht) + if err != nil { + return "ERROR", errors.Wrap(err, "Hasher returned an error") + } + sum = hex.EncodeToString(byteSum) + } else { + tr := accounting.Stats(ctx).NewCheckingTransfer(o) + defer func() { + tr.Done(ctx, err) + }() + + sum, err = o.Hash(ctx, ht) + if err == hash.ErrUnsupported { + return "UNSUPPORTED", errors.Wrap(err, "Hash unsupported") + } else if err != nil { + return "ERROR", errors.Wrapf(err, "Failed to get hash %v from backed: %v", ht, err) + } + } + + return sum, nil +} + +// HashLister does an md5sum equivalent for the hash type passed in +// Updated to handle both standard hex encoding and base64 +// Updated to perform multiple hashes concurrently +func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFlag bool, f fs.Fs, w io.Writer) error { + concurrencyControl := make(chan struct{}, fs.GetConfig(ctx).Transfers) + var wg sync.WaitGroup + err := ListFn(ctx, f, func(o fs.Object) { + wg.Add(1) + concurrencyControl <- struct{}{} + go func() { + defer func() { + <-concurrencyControl + wg.Done() + }() + sum, err := hashSum(ctx, ht, downloadFlag, o) + if outputBase64 && err == nil { + hexBytes, _ := hex.DecodeString(sum) + sum = base64.URLEncoding.EncodeToString(hexBytes) + width := base64.URLEncoding.EncodedLen(hash.Width(ht) / 2) + syncFprintf(w, "%*s %s\n", width, sum, o.Remote()) + } else { + syncFprintf(w, "%*s %s\n", hash.Width(ht), sum, o.Remote()) + } + if err != nil { + err = fs.CountError(err) + fs.Errorf(o, "%v", err) + } + }() + }) + wg.Wait() + return err +} + +// Count counts the objects and their sizes in the Fs +// +// Obeys includes and excludes +func Count(ctx context.Context, f fs.Fs) (objects int64, size int64, err error) { + err = ListFn(ctx, f, func(o fs.Object) { + atomic.AddInt64(&objects, 1) + objectSize := o.Size() + if objectSize > 0 { + atomic.AddInt64(&size, objectSize) + } + }) + return +} + +// ConfigMaxDepth returns the depth to use for a recursive or non recursive listing. +func ConfigMaxDepth(ctx context.Context, recursive bool) int { + ci := fs.GetConfig(ctx) + depth := ci.MaxDepth + if !recursive && depth < 0 { + depth = 1 + } + return depth +} + +// ListDir lists the directories/buckets/containers in the Fs to the supplied writer +func ListDir(ctx context.Context, f fs.Fs, w io.Writer) error { + return walk.ListR(ctx, f, "", false, ConfigMaxDepth(ctx, false), walk.ListDirs, func(entries fs.DirEntries) error { + entries.ForDir(func(dir fs.Directory) { + if dir != nil { + syncFprintf(w, "%12d %13s %9d %s\n", dir.Size(), dir.ModTime(ctx).Local().Format("2006-01-02 15:04:05"), dir.Items(), dir.Remote()) + } + }) + return nil + }) +} + +// Mkdir makes a destination directory or container +func Mkdir(ctx context.Context, f fs.Fs, dir string) error { + if SkipDestructive(ctx, fs.LogDirName(f, dir), "make directory") { + return nil + } + fs.Debugf(fs.LogDirName(f, dir), "Making directory") + err := f.Mkdir(ctx, dir) + if err != nil { + err = fs.CountError(err) + return err + } + return nil +} + +// TryRmdir removes a container but not if not empty. It doesn't +// count errors but may return one. +func TryRmdir(ctx context.Context, f fs.Fs, dir string) error { + accounting.Stats(ctx).DeletedDirs(1) + if SkipDestructive(ctx, fs.LogDirName(f, dir), "remove directory") { + return nil + } + fs.Debugf(fs.LogDirName(f, dir), "Removing directory") + return f.Rmdir(ctx, dir) +} + +// Rmdir removes a container but not if not empty +func Rmdir(ctx context.Context, f fs.Fs, dir string) error { + err := TryRmdir(ctx, f, dir) + if err != nil { + err = fs.CountError(err) + return err + } + return err +} + +// Purge removes a directory and all of its contents +func Purge(ctx context.Context, f fs.Fs, dir string) (err error) { + doFallbackPurge := true + if doPurge := f.Features().Purge; doPurge != nil { + doFallbackPurge = false + accounting.Stats(ctx).DeletedDirs(1) + if SkipDestructive(ctx, fs.LogDirName(f, dir), "purge directory") { + return nil + } + err = doPurge(ctx, dir) + if err == fs.ErrorCantPurge { + doFallbackPurge = true + } + } + if doFallbackPurge { + // DeleteFiles and Rmdir observe --dry-run + err = DeleteFiles(ctx, listToChan(ctx, f, dir)) + if err != nil { + return err + } + err = Rmdirs(ctx, f, dir, false) + } + if err != nil { + err = fs.CountError(err) + return err + } + return nil +} + +// Delete removes all the contents of a container. Unlike Purge, it +// obeys includes and excludes. +func Delete(ctx context.Context, f fs.Fs) error { + ci := fs.GetConfig(ctx) + delChan := make(fs.ObjectsChan, ci.Transfers) + delErr := make(chan error, 1) + go func() { + delErr <- DeleteFiles(ctx, delChan) + }() + err := ListFn(ctx, f, func(o fs.Object) { + delChan <- o + }) + close(delChan) + delError := <-delErr + if err == nil { + err = delError + } + return err +} + +// listToChan will transfer all objects in the listing to the output +// +// If an error occurs, the error will be logged, and it will close the +// channel. +// +// If the error was ErrorDirNotFound then it will be ignored +func listToChan(ctx context.Context, f fs.Fs, dir string) fs.ObjectsChan { + ci := fs.GetConfig(ctx) + o := make(fs.ObjectsChan, ci.Checkers) + go func() { + defer close(o) + err := walk.ListR(ctx, f, dir, true, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { + entries.ForObject(func(obj fs.Object) { + o <- obj + }) + return nil + }) + if err != nil && err != fs.ErrorDirNotFound { + err = errors.Wrap(err, "failed to list") + err = fs.CountError(err) + fs.Errorf(nil, "%v", err) + } + }() + return o +} + +// CleanUp removes the trash for the Fs +func CleanUp(ctx context.Context, f fs.Fs) error { + doCleanUp := f.Features().CleanUp + if doCleanUp == nil { + return errors.Errorf("%v doesn't support cleanup", f) + } + if SkipDestructive(ctx, f, "clean up old files") { + return nil + } + return doCleanUp(ctx) +} + +// wrap a Reader and a Closer together into a ReadCloser +type readCloser struct { + io.Reader + io.Closer +} + +// Cat any files to the io.Writer +// +// if offset == 0 it will be ignored +// if offset > 0 then the file will be seeked to that offset +// if offset < 0 then the file will be seeked that far from the end +// +// if count < 0 then it will be ignored +// if count >= 0 then only that many characters will be output +func Cat(ctx context.Context, f fs.Fs, w io.Writer, offset, count int64) error { + var mu sync.Mutex + ci := fs.GetConfig(ctx) + return ListFn(ctx, f, func(o fs.Object) { + var err error + tr := accounting.Stats(ctx).NewTransfer(o) + defer func() { + tr.Done(ctx, err) + }() + opt := fs.RangeOption{Start: offset, End: -1} + size := o.Size() + if opt.Start < 0 { + opt.Start += size + } + if count >= 0 { + opt.End = opt.Start + count - 1 + } + var options []fs.OpenOption + if opt.Start > 0 || opt.End >= 0 { + options = append(options, &opt) + } + for _, option := range ci.DownloadHeaders { + options = append(options, option) + } + in, err := o.Open(ctx, options...) + if err != nil { + err = fs.CountError(err) + fs.Errorf(o, "Failed to open: %v", err) + return + } + if count >= 0 { + in = &readCloser{Reader: &io.LimitedReader{R: in, N: count}, Closer: in} + } + in = tr.Account(ctx, in).WithBuffer() // account and buffer the transfer + // take the lock just before we output stuff, so at the last possible moment + mu.Lock() + defer mu.Unlock() + _, err = io.Copy(w, in) + if err != nil { + err = fs.CountError(err) + fs.Errorf(o, "Failed to send to output: %v", err) + } + }) +} + +// Rcat reads data from the Reader until EOF and uploads it to a file on remote +func Rcat(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, modTime time.Time) (dst fs.Object, err error) { + ci := fs.GetConfig(ctx) + tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, -1) + defer func() { + tr.Done(ctx, err) + }() + in = tr.Account(ctx, in).WithBuffer() + + readCounter := readers.NewCountingReader(in) + var trackingIn io.Reader + var hasher *hash.MultiHasher + var options []fs.OpenOption + if !ci.IgnoreChecksum { + hashes := hash.NewHashSet(fdst.Hashes().GetOne()) // just pick one hash + hashOption := &fs.HashesOption{Hashes: hashes} + options = append(options, hashOption) + hasher, err = hash.NewMultiHasherTypes(hashes) + if err != nil { + return nil, err + } + trackingIn = io.TeeReader(readCounter, hasher) + } else { + trackingIn = readCounter + } + for _, option := range ci.UploadHeaders { + options = append(options, option) + } + + compare := func(dst fs.Object) error { + var sums map[hash.Type]string + if hasher != nil { + sums = hasher.Sums() + } + src := object.NewStaticObjectInfo(dstFileName, modTime, int64(readCounter.BytesRead()), false, sums, fdst) + if !Equal(ctx, src, dst) { + err = errors.Errorf("corrupted on transfer") + err = fs.CountError(err) + fs.Errorf(dst, "%v", err) + return err + } + return nil + } + + // check if file small enough for direct upload + buf := make([]byte, ci.StreamingUploadCutoff) + if n, err := io.ReadFull(trackingIn, buf); err == io.EOF || err == io.ErrUnexpectedEOF { + fs.Debugf(fdst, "File to upload is small (%d bytes), uploading instead of streaming", n) + src := object.NewMemoryObject(dstFileName, modTime, buf[:n]) + return Copy(ctx, fdst, nil, dstFileName, src) + } + + // Make a new ReadCloser with the bits we've already read + in = &readCloser{ + Reader: io.MultiReader(bytes.NewReader(buf), trackingIn), + Closer: in, + } + + fStreamTo := fdst + canStream := fdst.Features().PutStream != nil + if !canStream { + fs.Debugf(fdst, "Target remote doesn't support streaming uploads, creating temporary local FS to spool file") + tmpLocalFs, err := fs.TemporaryLocalFs(ctx) + if err != nil { + return nil, errors.Wrap(err, "Failed to create temporary local FS to spool file") + } + defer func() { + err := Purge(ctx, tmpLocalFs, "") + if err != nil { + fs.Infof(tmpLocalFs, "Failed to cleanup temporary FS: %v", err) + } + }() + fStreamTo = tmpLocalFs + } + + if SkipDestructive(ctx, dstFileName, "upload from pipe") { + // prevents "broken pipe" errors + _, err = io.Copy(ioutil.Discard, in) + return nil, err + } + + objInfo := object.NewStaticObjectInfo(dstFileName, modTime, -1, false, nil, nil) + if dst, err = fStreamTo.Features().PutStream(ctx, in, objInfo, options...); err != nil { + return dst, err + } + if err = compare(dst); err != nil { + return dst, err + } + if !canStream { + // copy dst (which is the local object we have just streamed to) to the remote + return Copy(ctx, fdst, nil, dstFileName, dst) + } + return dst, nil +} + +// PublicLink adds a "readable by anyone with link" permission on the given file or folder. +func PublicLink(ctx context.Context, f fs.Fs, remote string, expire fs.Duration, unlink bool) (string, error) { + doPublicLink := f.Features().PublicLink + if doPublicLink == nil { + return "", errors.Errorf("%v doesn't support public links", f) + } + return doPublicLink(ctx, remote, expire, unlink) +} + +// Rmdirs removes any empty directories (or directories only +// containing empty directories) under f, including f. +func Rmdirs(ctx context.Context, f fs.Fs, dir string, leaveRoot bool) error { + ci := fs.GetConfig(ctx) + dirEmpty := make(map[string]bool) + dirEmpty[dir] = !leaveRoot + err := walk.Walk(ctx, f, dir, true, ci.MaxDepth, func(dirPath string, entries fs.DirEntries, err error) error { + if err != nil { + err = fs.CountError(err) + fs.Errorf(f, "Failed to list %q: %v", dirPath, err) + return nil + } + for _, entry := range entries { + switch x := entry.(type) { + case fs.Directory: + // add a new directory as empty + dir := x.Remote() + _, found := dirEmpty[dir] + if !found { + dirEmpty[dir] = true + } + case fs.Object: + // mark the parents of the file as being non-empty + dir := x.Remote() + for dir != "" { + dir = path.Dir(dir) + if dir == "." || dir == "/" { + dir = "" + } + empty, found := dirEmpty[dir] + // End if we reach a directory which is non-empty + if found && !empty { + break + } + dirEmpty[dir] = false + } + } + } + return nil + }) + if err != nil { + return errors.Wrap(err, "failed to rmdirs") + } + // Now delete the empty directories, starting from the longest path + var toDelete []string + for dir, empty := range dirEmpty { + if empty { + toDelete = append(toDelete, dir) + } + } + sort.Strings(toDelete) + for i := len(toDelete) - 1; i >= 0; i-- { + dir := toDelete[i] + err := TryRmdir(ctx, f, dir) + if err != nil { + err = fs.CountError(err) + fs.Errorf(dir, "Failed to rmdir: %v", err) + return err + } + } + return nil +} + +// GetCompareDest sets up --compare-dest +func GetCompareDest(ctx context.Context) (CompareDest fs.Fs, err error) { + ci := fs.GetConfig(ctx) + CompareDest, err = cache.Get(ctx, ci.CompareDest) + if err != nil { + return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --compare-dest %q: %v", ci.CompareDest, err)) + } + return CompareDest, nil +} + +// compareDest checks --compare-dest to see if src needs to +// be copied +// +// Returns True if src is in --compare-dest +func compareDest(ctx context.Context, dst, src fs.Object, CompareDest fs.Fs) (NoNeedTransfer bool, err error) { + var remote string + if dst == nil { + remote = src.Remote() + } else { + remote = dst.Remote() + } + CompareDestFile, err := CompareDest.NewObject(ctx, remote) + switch err { + case fs.ErrorObjectNotFound: + return false, nil + case nil: + break + default: + return false, err + } + if Equal(ctx, src, CompareDestFile) { + fs.Debugf(src, "Destination found in --compare-dest, skipping") + return true, nil + } + return false, nil +} + +// GetCopyDest sets up --copy-dest +func GetCopyDest(ctx context.Context, fdst fs.Fs) (CopyDest fs.Fs, err error) { + ci := fs.GetConfig(ctx) + CopyDest, err = cache.Get(ctx, ci.CopyDest) + if err != nil { + return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --copy-dest %q: %v", ci.CopyDest, err)) + } + if !SameConfig(fdst, CopyDest) { + return nil, fserrors.FatalError(errors.New("parameter to --copy-dest has to be on the same remote as destination")) + } + if CopyDest.Features().Copy == nil { + return nil, fserrors.FatalError(errors.New("can't use --copy-dest on a remote which doesn't support server-side copy")) + } + return CopyDest, nil +} + +// copyDest checks --copy-dest to see if src needs to +// be copied +// +// Returns True if src was copied from --copy-dest +func copyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) { + var remote string + if dst == nil { + remote = src.Remote() + } else { + remote = dst.Remote() + } + CopyDestFile, err := CopyDest.NewObject(ctx, remote) + switch err { + case fs.ErrorObjectNotFound: + return false, nil + case nil: + break + default: + return false, err + } + opt := defaultEqualOpt(ctx) + opt.updateModTime = false + if equal(ctx, src, CopyDestFile, opt) { + if dst == nil || !Equal(ctx, src, dst) { + if dst != nil && backupDir != nil { + err = MoveBackupDir(ctx, backupDir, dst) + if err != nil { + return false, errors.Wrap(err, "moving to --backup-dir failed") + } + // If successful zero out the dstObj as it is no longer there + dst = nil + } + _, err := Copy(ctx, fdst, dst, remote, CopyDestFile) + if err != nil { + fs.Errorf(src, "Destination found in --copy-dest, error copying") + return false, nil + } + fs.Debugf(src, "Destination found in --copy-dest, using server-side copy") + return true, nil + } + fs.Debugf(src, "Unchanged skipping") + return true, nil + } + fs.Debugf(src, "Destination not found in --copy-dest") + return false, nil +} + +// CompareOrCopyDest checks --compare-dest and --copy-dest to see if src +// does not need to be copied +// +// Returns True if src does not need to be copied +func CompareOrCopyDest(ctx context.Context, fdst fs.Fs, dst, src fs.Object, CompareOrCopyDest, backupDir fs.Fs) (NoNeedTransfer bool, err error) { + ci := fs.GetConfig(ctx) + if ci.CompareDest != "" { + return compareDest(ctx, dst, src, CompareOrCopyDest) + } else if ci.CopyDest != "" { + return copyDest(ctx, fdst, dst, src, CompareOrCopyDest, backupDir) + } + return false, nil +} + +// NeedTransfer checks to see if src needs to be copied to dst using +// the current config. +// +// Returns a flag which indicates whether the file needs to be +// transferred or not. +func NeedTransfer(ctx context.Context, dst, src fs.Object) bool { + ci := fs.GetConfig(ctx) + if dst == nil { + fs.Debugf(src, "Need to transfer - File not found at Destination") + return true + } + // If we should ignore existing files, don't transfer + if ci.IgnoreExisting { + fs.Debugf(src, "Destination exists, skipping") + return false + } + // If we should upload unconditionally + if ci.IgnoreTimes { + fs.Debugf(src, "Transferring unconditionally as --ignore-times is in use") + return true + } + // If UpdateOlder is in effect, skip if dst is newer than src + if ci.UpdateOlder { + srcModTime := src.ModTime(ctx) + dstModTime := dst.ModTime(ctx) + dt := dstModTime.Sub(srcModTime) + // If have a mutually agreed precision then use that + modifyWindow := fs.GetModifyWindow(ctx, dst.Fs(), src.Fs()) + if modifyWindow == fs.ModTimeNotSupported { + // Otherwise use 1 second as a safe default as + // the resolution of the time a file was + // uploaded. + modifyWindow = time.Second + } + switch { + case dt >= modifyWindow: + fs.Debugf(src, "Destination is newer than source, skipping") + return false + case dt <= -modifyWindow: + // force --checksum on for the check and do update modtimes by default + opt := defaultEqualOpt(ctx) + opt.forceModTimeMatch = true + if equal(ctx, src, dst, opt) { + fs.Debugf(src, "Unchanged skipping") + return false + } + default: + // Do a size only compare unless --checksum is set + opt := defaultEqualOpt(ctx) + opt.sizeOnly = !ci.CheckSum + if equal(ctx, src, dst, opt) { + fs.Debugf(src, "Destination mod time is within %v of source and files identical, skipping", modifyWindow) + return false + } + fs.Debugf(src, "Destination mod time is within %v of source but files differ, transferring", modifyWindow) + } + } else { + // Check to see if changed or not + if Equal(ctx, src, dst) { + fs.Debugf(src, "Unchanged skipping") + return false + } + } + return true +} + +// RcatSize reads data from the Reader until EOF and uploads it to a file on remote. +// Pass in size >=0 if known, <0 if not known +func RcatSize(ctx context.Context, fdst fs.Fs, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (dst fs.Object, err error) { + var obj fs.Object + + if size >= 0 { + var err error + // Size known use Put + tr := accounting.Stats(ctx).NewTransferRemoteSize(dstFileName, size) + defer func() { + tr.Done(ctx, err) + }() + body := ioutil.NopCloser(in) // we let the server close the body + in := tr.Account(ctx, body) // account the transfer (no buffering) + + if SkipDestructive(ctx, dstFileName, "upload from pipe") { + // prevents "broken pipe" errors + _, err = io.Copy(ioutil.Discard, in) + return nil, err + } + + info := object.NewStaticObjectInfo(dstFileName, modTime, size, true, nil, fdst) + obj, err = fdst.Put(ctx, in, info) + if err != nil { + fs.Errorf(dstFileName, "Post request put error: %v", err) + + return nil, err + } + } else { + // Size unknown use Rcat + obj, err = Rcat(ctx, fdst, dstFileName, in, modTime) + if err != nil { + fs.Errorf(dstFileName, "Post request rcat error: %v", err) + + return nil, err + } + } + + return obj, nil +} + +// copyURLFunc is called from CopyURLFn +type copyURLFunc func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) + +// copyURLFn copies the data from the url to the function supplied +func copyURLFn(ctx context.Context, dstFileName string, url string, dstFileNameFromURL bool, fn copyURLFunc) (err error) { + client := fshttp.NewClient(ctx) + resp, err := client.Get(url) + if err != nil { + return err + } + defer fs.CheckClose(resp.Body, &err) + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return errors.Errorf("CopyURL failed: %s", resp.Status) + } + modTime, err := http.ParseTime(resp.Header.Get("Last-Modified")) + if err != nil { + modTime = time.Now() + } + if dstFileNameFromURL { + dstFileName = path.Base(resp.Request.URL.Path) + if dstFileName == "." || dstFileName == "/" { + return errors.Errorf("CopyURL failed: file name wasn't found in url") + } + } + return fn(ctx, dstFileName, resp.Body, resp.ContentLength, modTime) +} + +// CopyURL copies the data from the url to (fdst, dstFileName) +func CopyURL(ctx context.Context, fdst fs.Fs, dstFileName string, url string, dstFileNameFromURL bool, noClobber bool) (dst fs.Object, err error) { + + err = copyURLFn(ctx, dstFileName, url, dstFileNameFromURL, func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) { + if noClobber { + _, err = fdst.NewObject(ctx, dstFileName) + if err == nil { + return errors.New("CopyURL failed: file already exist") + } + } + dst, err = RcatSize(ctx, fdst, dstFileName, in, size, modTime) + return err + }) + return dst, err +} + +// CopyURLToWriter copies the data from the url to the io.Writer supplied +func CopyURLToWriter(ctx context.Context, url string, out io.Writer) (err error) { + return copyURLFn(ctx, "", url, false, func(ctx context.Context, dstFileName string, in io.ReadCloser, size int64, modTime time.Time) (err error) { + _, err = io.Copy(out, in) + return err + }) +} + +// BackupDir returns the correctly configured --backup-dir +func BackupDir(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, srcFileName string) (backupDir fs.Fs, err error) { + ci := fs.GetConfig(ctx) + if ci.BackupDir != "" { + backupDir, err = cache.Get(ctx, ci.BackupDir) + if err != nil { + return nil, fserrors.FatalError(errors.Errorf("Failed to make fs for --backup-dir %q: %v", ci.BackupDir, err)) + } + if !SameConfig(fdst, backupDir) { + return nil, fserrors.FatalError(errors.New("parameter to --backup-dir has to be on the same remote as destination")) + } + if srcFileName == "" { + if Overlapping(fdst, backupDir) { + return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't overlap")) + } + if Overlapping(fsrc, backupDir) { + return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't overlap")) + } + } else { + if ci.Suffix == "" { + if SameDir(fdst, backupDir) { + return nil, fserrors.FatalError(errors.New("destination and parameter to --backup-dir mustn't be the same")) + } + if SameDir(fsrc, backupDir) { + return nil, fserrors.FatalError(errors.New("source and parameter to --backup-dir mustn't be the same")) + } + } + } + } else if ci.Suffix != "" { + // --backup-dir is not set but --suffix is - use the destination as the backupDir + backupDir = fdst + } else { + return nil, fserrors.FatalError(errors.New("internal error: BackupDir called when --backup-dir and --suffix both empty")) + } + if !CanServerSideMove(backupDir) { + return nil, fserrors.FatalError(errors.New("can't use --backup-dir on a remote which doesn't support server-side move or copy")) + } + return backupDir, nil +} + +// MoveBackupDir moves a file to the backup dir +func MoveBackupDir(ctx context.Context, backupDir fs.Fs, dst fs.Object) (err error) { + remoteWithSuffix := SuffixName(ctx, dst.Remote()) + overwritten, _ := backupDir.NewObject(ctx, remoteWithSuffix) + _, err = Move(ctx, backupDir, overwritten, remoteWithSuffix, dst) + return err +} + +// moveOrCopyFile moves or copies a single file possibly to a new name +func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string, cp bool) (err error) { + ci := fs.GetConfig(ctx) + dstFilePath := path.Join(fdst.Root(), dstFileName) + srcFilePath := path.Join(fsrc.Root(), srcFileName) + if fdst.Name() == fsrc.Name() && dstFilePath == srcFilePath { + fs.Debugf(fdst, "don't need to copy/move %s, it is already at target location", dstFileName) + return nil + } + + // Choose operations + Op := Move + if cp { + Op = Copy + } + + // Find src object + srcObj, err := fsrc.NewObject(ctx, srcFileName) + if err != nil { + return err + } + + // Find dst object if it exists + var dstObj fs.Object + if !ci.NoCheckDest { + dstObj, err = fdst.NewObject(ctx, dstFileName) + if err == fs.ErrorObjectNotFound { + dstObj = nil + } else if err != nil { + return err + } + } + + // Special case for changing case of a file on a case insensitive remote + // This will move the file to a temporary name then + // move it back to the intended destination. This is required + // to avoid issues with certain remotes and avoid file deletion. + if !cp && fdst.Name() == fsrc.Name() && fdst.Features().CaseInsensitive && dstFileName != srcFileName && strings.ToLower(dstFilePath) == strings.ToLower(srcFilePath) { + // Create random name to temporarily move file to + tmpObjName := dstFileName + "-rclone-move-" + random.String(8) + _, err := fdst.NewObject(ctx, tmpObjName) + if err != fs.ErrorObjectNotFound { + if err == nil { + return errors.New("found an already existing file with a randomly generated name. Try the operation again") + } + return errors.Wrap(err, "error while attempting to move file to a temporary location") + } + tr := accounting.Stats(ctx).NewTransfer(srcObj) + defer func() { + tr.Done(ctx, err) + }() + tmpObj, err := Op(ctx, fdst, nil, tmpObjName, srcObj) + if err != nil { + return errors.Wrap(err, "error while moving file to temporary location") + } + _, err = Op(ctx, fdst, nil, dstFileName, tmpObj) + return err + } + + var backupDir, copyDestDir fs.Fs + if ci.BackupDir != "" || ci.Suffix != "" { + backupDir, err = BackupDir(ctx, fdst, fsrc, srcFileName) + if err != nil { + return errors.Wrap(err, "creating Fs for --backup-dir failed") + } + } + if ci.CompareDest != "" { + copyDestDir, err = GetCompareDest(ctx) + if err != nil { + return err + } + } else if ci.CopyDest != "" { + copyDestDir, err = GetCopyDest(ctx, fdst) + if err != nil { + return err + } + } + NoNeedTransfer, err := CompareOrCopyDest(ctx, fdst, dstObj, srcObj, copyDestDir, backupDir) + if err != nil { + return err + } + if !NoNeedTransfer && NeedTransfer(ctx, dstObj, srcObj) { + // If destination already exists, then we must move it into --backup-dir if required + if dstObj != nil && backupDir != nil { + err = MoveBackupDir(ctx, backupDir, dstObj) + if err != nil { + return errors.Wrap(err, "moving to --backup-dir failed") + } + // If successful zero out the dstObj as it is no longer there + dstObj = nil + } + + _, err = Op(ctx, fdst, dstObj, dstFileName, srcObj) + } else { + tr := accounting.Stats(ctx).NewCheckingTransfer(srcObj) + if !cp { + err = DeleteFile(ctx, srcObj) + } + tr.Done(ctx, err) + } + return err +} + +// MoveFile moves a single file possibly to a new name +func MoveFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { + return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, false) +} + +// CopyFile moves a single file possibly to a new name +func CopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName string, srcFileName string) (err error) { + return moveOrCopyFile(ctx, fdst, fsrc, dstFileName, srcFileName, true) +} + +// SetTier changes tier of object in remote +func SetTier(ctx context.Context, fsrc fs.Fs, tier string) error { + return ListFn(ctx, fsrc, func(o fs.Object) { + objImpl, ok := o.(fs.SetTierer) + if !ok { + fs.Errorf(fsrc, "Remote object does not implement SetTier") + return + } + err := objImpl.SetTier(tier) + if err != nil { + fs.Errorf(fsrc, "Failed to do SetTier, %v", err) + } + }) +} + +// ListFormat defines files information print format +type ListFormat struct { + separator string + dirSlash bool + absolute bool + output []func(entry *ListJSONItem) string + csv *csv.Writer + buf bytes.Buffer +} + +// SetSeparator changes separator in struct +func (l *ListFormat) SetSeparator(separator string) { + l.separator = separator +} + +// SetDirSlash defines if slash should be printed +func (l *ListFormat) SetDirSlash(dirSlash bool) { + l.dirSlash = dirSlash +} + +// SetAbsolute prints a leading slash in front of path names +func (l *ListFormat) SetAbsolute(absolute bool) { + l.absolute = absolute +} + +// SetCSV defines if the output should be csv +// +// Note that you should call SetSeparator before this if you want a +// custom separator +func (l *ListFormat) SetCSV(useCSV bool) { + if useCSV { + l.csv = csv.NewWriter(&l.buf) + if l.separator != "" { + l.csv.Comma = []rune(l.separator)[0] + } + } else { + l.csv = nil + } +} + +// SetOutput sets functions used to create files information +func (l *ListFormat) SetOutput(output []func(entry *ListJSONItem) string) { + l.output = output +} + +// AddModTime adds file's Mod Time to output +func (l *ListFormat) AddModTime() { + l.AppendOutput(func(entry *ListJSONItem) string { + return entry.ModTime.When.Local().Format("2006-01-02 15:04:05") + }) +} + +// AddSize adds file's size to output +func (l *ListFormat) AddSize() { + l.AppendOutput(func(entry *ListJSONItem) string { + return strconv.FormatInt(entry.Size, 10) + }) +} + +// normalisePath makes sure the path has the correct slashes for the current mode +func (l *ListFormat) normalisePath(entry *ListJSONItem, remote string) string { + if l.absolute && !strings.HasPrefix(remote, "/") { + remote = "/" + remote + } + if entry.IsDir && l.dirSlash { + remote += "/" + } + return remote +} + +// AddPath adds path to file to output +func (l *ListFormat) AddPath() { + l.AppendOutput(func(entry *ListJSONItem) string { + return l.normalisePath(entry, entry.Path) + }) +} + +// AddEncrypted adds the encrypted path to file to output +func (l *ListFormat) AddEncrypted() { + l.AppendOutput(func(entry *ListJSONItem) string { + return l.normalisePath(entry, entry.Encrypted) + }) +} + +// AddHash adds the hash of the type given to the output +func (l *ListFormat) AddHash(ht hash.Type) { + hashName := ht.String() + l.AppendOutput(func(entry *ListJSONItem) string { + if entry.IsDir { + return "" + } + return entry.Hashes[hashName] + }) +} + +// AddID adds file's ID to the output if known +func (l *ListFormat) AddID() { + l.AppendOutput(func(entry *ListJSONItem) string { + return entry.ID + }) +} + +// AddOrigID adds file's Original ID to the output if known +func (l *ListFormat) AddOrigID() { + l.AppendOutput(func(entry *ListJSONItem) string { + return entry.OrigID + }) +} + +// AddTier adds file's Tier to the output if known +func (l *ListFormat) AddTier() { + l.AppendOutput(func(entry *ListJSONItem) string { + return entry.Tier + }) +} + +// AddMimeType adds file's MimeType to the output if known +func (l *ListFormat) AddMimeType() { + l.AppendOutput(func(entry *ListJSONItem) string { + return entry.MimeType + }) +} + +// AppendOutput adds string generated by specific function to printed output +func (l *ListFormat) AppendOutput(functionToAppend func(item *ListJSONItem) string) { + l.output = append(l.output, functionToAppend) +} + +// Format prints information about the DirEntry in the format defined +func (l *ListFormat) Format(entry *ListJSONItem) (result string) { + var out []string + for _, fun := range l.output { + out = append(out, fun(entry)) + } + if l.csv != nil { + l.buf.Reset() + _ = l.csv.Write(out) // can't fail writing to bytes.Buffer + l.csv.Flush() + result = strings.TrimRight(l.buf.String(), "\n") + } else { + result = strings.Join(out, l.separator) + } + return result +} + +// DirMove renames srcRemote to dstRemote +// +// It does this by loading the directory tree into memory (using ListR +// if available) and doing renames in parallel. +func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err error) { + ci := fs.GetConfig(ctx) + + if SkipDestructive(ctx, srcRemote, "dirMove") { + accounting.Stats(ctx).Renames(1) + return nil + } + + // Use DirMove if possible + if doDirMove := f.Features().DirMove; doDirMove != nil { + err = doDirMove(ctx, f, srcRemote, dstRemote) + if err == nil { + accounting.Stats(ctx).Renames(1) + } + return err + } + + // Load the directory tree into memory + tree, err := walk.NewDirTree(ctx, f, srcRemote, true, -1) + if err != nil { + return errors.Wrap(err, "RenameDir tree walk") + } + + // Get the directories in sorted order + dirs := tree.Dirs() + + // Make the destination directories - must be done in order not in parallel + for _, dir := range dirs { + dstPath := dstRemote + dir[len(srcRemote):] + err := f.Mkdir(ctx, dstPath) + if err != nil { + return errors.Wrap(err, "RenameDir mkdir") + } + } + + // Rename the files in parallel + type rename struct { + o fs.Object + newPath string + } + renames := make(chan rename, ci.Transfers) + g, gCtx := errgroup.WithContext(context.Background()) + for i := 0; i < ci.Transfers; i++ { + g.Go(func() error { + for job := range renames { + dstOverwritten, _ := f.NewObject(gCtx, job.newPath) + _, err := Move(gCtx, f, dstOverwritten, job.newPath, job.o) + if err != nil { + return err + } + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + } + + } + return nil + }) + } + for dir, entries := range tree { + dstPath := dstRemote + dir[len(srcRemote):] + for _, entry := range entries { + if o, ok := entry.(fs.Object); ok { + renames <- rename{o, path.Join(dstPath, path.Base(o.Remote()))} + } + } + } + close(renames) + err = g.Wait() + if err != nil { + return errors.Wrap(err, "RenameDir renames") + } + + // Remove the source directories in reverse order + for i := len(dirs) - 1; i >= 0; i-- { + err := f.Rmdir(ctx, dirs[i]) + if err != nil { + return errors.Wrap(err, "RenameDir rmdir") + } + } + + return nil +} + +// FsInfo provides information about a remote +type FsInfo struct { + // Name of the remote (as passed into NewFs) + Name string + + // Root of the remote (as passed into NewFs) + Root string + + // String returns a description of the FS + String string + + // Precision of the ModTimes in this Fs in Nanoseconds + Precision time.Duration + + // Returns the supported hash types of the filesystem + Hashes []string + + // Features returns the optional features of this Fs + Features map[string]bool +} + +// GetFsInfo gets the information (FsInfo) about a given Fs +func GetFsInfo(f fs.Fs) *FsInfo { + info := &FsInfo{ + Name: f.Name(), + Root: f.Root(), + String: f.String(), + Precision: f.Precision(), + Hashes: make([]string, 0, 4), + Features: f.Features().Enabled(), + } + for _, hashType := range f.Hashes().Array() { + info.Hashes = append(info.Hashes, hashType.String()) + } + return info +} + +var ( + interactiveMu sync.Mutex + skipped = map[string]bool{} +) + +// skipDestructiveChoose asks the user which action to take +// +// Call with interactiveMu held +func skipDestructiveChoose(ctx context.Context, subject interface{}, action string) (skip bool) { + fmt.Printf("rclone: %s \"%v\"?\n", action, subject) + switch i := config.CommandDefault([]string{ + "yYes, this is OK", + "nNo, skip this", + fmt.Sprintf("sSkip all %s operations with no more questions", action), + fmt.Sprintf("!Do all %s operations with no more questions", action), + "qExit rclone now.", + }, 0); i { + case 'y': + skip = false + case 'n': + skip = true + case 's': + skip = true + skipped[action] = true + fs.Logf(nil, "Skipping all %s operations from now on without asking", action) + case '!': + skip = false + skipped[action] = false + fs.Logf(nil, "Doing all %s operations from now on without asking", action) + case 'q': + fs.Logf(nil, "Quitting rclone now") + atexit.Run() + os.Exit(0) + default: + skip = true + fs.Errorf(nil, "Bad choice %c", i) + } + return skip +} + +// SkipDestructive should be called whenever rclone is about to do an destructive operation. +// +// It will check the --dry-run flag and it will ask the user if the --interactive flag is set. +// +// subject should be the object or directory in use +// +// action should be a descriptive word or short phrase +// +// Together they should make sense in this sentence: "Rclone is about +// to action subject". +func SkipDestructive(ctx context.Context, subject interface{}, action string) (skip bool) { + var flag string + ci := fs.GetConfig(ctx) + switch { + case ci.DryRun: + flag = "--dry-run" + skip = true + case ci.Interactive: + flag = "--interactive" + interactiveMu.Lock() + defer interactiveMu.Unlock() + var found bool + skip, found = skipped[action] + if !found { + skip = skipDestructiveChoose(ctx, subject, action) + } + default: + return false + } + if skip { + size := int64(-1) + if do, ok := subject.(interface{ Size() int64 }); ok { + size = do.Size() + } + if size >= 0 { + fs.Logf(subject, "Skipped %s as %s is set (size %v)", fs.LogValue("skipped", action), flag, fs.LogValue("size", fs.SizeSuffix(size))) + } else { + fs.Logf(subject, "Skipped %s as %s is set", fs.LogValue("skipped", action), flag) + } + } + return skip +} diff --git a/vendor/github.com/rclone/rclone/fs/operations/rc.go b/vendor/github.com/rclone/rclone/fs/operations/rc.go new file mode 100644 index 00000000000..38060884440 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/operations/rc.go @@ -0,0 +1,511 @@ +package operations + +import ( + "context" + "io" + "mime" + "mime/multipart" + "net/http" + "path" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/rc" +) + +func init() { + rc.Add(rc.Call{ + Path: "operations/list", + AuthRequired: true, + Fn: rcList, + Title: "List the given remote and path in JSON format", + Help: `This takes the following parameters + +- fs - a remote name string e.g. "drive:" +- remote - a path within that remote e.g. "dir" +- opt - a dictionary of options to control the listing (optional) + - recurse - If set recurse directories + - noModTime - If set return modification time + - showEncrypted - If set show decrypted names + - showOrigIDs - If set show the IDs for each item if known + - showHash - If set return a dictionary of hashes + +The result is + +- list + - This is an array of objects as described in the lsjson command + +See the [lsjson command](/commands/rclone_lsjson/) for more information on the above and examples. +`, + }) +} + +// List the directory +func rcList(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, remote, err := rc.GetFsAndRemote(ctx, in) + if err != nil { + return nil, err + } + var opt ListJSONOpt + err = in.GetStruct("opt", &opt) + if rc.NotErrParamNotFound(err) { + return nil, err + } + var list = []*ListJSONItem{} + err = ListJSON(ctx, f, remote, &opt, func(item *ListJSONItem) error { + list = append(list, item) + return nil + }) + if err != nil { + return nil, err + } + out = make(rc.Params) + out["list"] = list + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "operations/about", + AuthRequired: true, + Fn: rcAbout, + Title: "Return the space used on the remote", + Help: `This takes the following parameters + +- fs - a remote name string e.g. "drive:" + +The result is as returned from rclone about --json + +See the [about command](/commands/rclone_size/) command for more information on the above. +`, + }) +} + +// About the remote +func rcAbout(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, err := rc.GetFs(ctx, in) + if err != nil { + return nil, err + } + doAbout := f.Features().About + if doAbout == nil { + return nil, errors.Errorf("%v doesn't support about", f) + } + u, err := doAbout(ctx) + if err != nil { + return nil, errors.Wrap(err, "about call failed") + } + err = rc.Reshape(&out, u) + if err != nil { + return nil, errors.Wrap(err, "about Reshape failed") + } + return out, nil +} + +func init() { + for _, copy := range []bool{false, true} { + copy := copy + name := "Move" + if copy { + name = "Copy" + } + rc.Add(rc.Call{ + Path: "operations/" + strings.ToLower(name) + "file", + AuthRequired: true, + Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) { + return rcMoveOrCopyFile(ctx, in, copy) + }, + Title: name + " a file from source remote to destination remote", + Help: `This takes the following parameters + +- srcFs - a remote name string e.g. "drive:" for the source +- srcRemote - a path within that remote e.g. "file.txt" for the source +- dstFs - a remote name string e.g. "drive2:" for the destination +- dstRemote - a path within that remote e.g. "file2.txt" for the destination +`, + }) + } +} + +// Copy a file +func rcMoveOrCopyFile(ctx context.Context, in rc.Params, cp bool) (out rc.Params, err error) { + srcFs, srcRemote, err := rc.GetFsAndRemoteNamed(ctx, in, "srcFs", "srcRemote") + if err != nil { + return nil, err + } + dstFs, dstRemote, err := rc.GetFsAndRemoteNamed(ctx, in, "dstFs", "dstRemote") + if err != nil { + return nil, err + } + return nil, moveOrCopyFile(ctx, dstFs, srcFs, dstRemote, srcRemote, cp) +} + +func init() { + for _, op := range []struct { + name string + title string + help string + noRemote bool + needsRequest bool + }{ + {name: "mkdir", title: "Make a destination directory or container"}, + {name: "rmdir", title: "Remove an empty directory or container"}, + {name: "purge", title: "Remove a directory or container and all of its contents"}, + {name: "rmdirs", title: "Remove all the empty directories in the path", help: "- leaveRoot - boolean, set to true not to delete the root\n"}, + {name: "delete", title: "Remove files in the path", noRemote: true}, + {name: "deletefile", title: "Remove the single file pointed to"}, + {name: "copyurl", title: "Copy the URL to the object", help: "- url - string, URL to read from\n - autoFilename - boolean, set to true to retrieve destination file name from url"}, + {name: "uploadfile", title: "Upload file using multiform/form-data", help: "- each part in body represents a file to be uploaded", needsRequest: true}, + {name: "cleanup", title: "Remove trashed files in the remote or path", noRemote: true}, + } { + op := op + remote := "- remote - a path within that remote e.g. \"dir\"\n" + if op.noRemote { + remote = "" + } + rc.Add(rc.Call{ + Path: "operations/" + op.name, + AuthRequired: true, + NeedsRequest: op.needsRequest, + Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) { + return rcSingleCommand(ctx, in, op.name, op.noRemote) + }, + Title: op.title, + Help: `This takes the following parameters + +- fs - a remote name string e.g. "drive:" +` + remote + op.help + ` +See the [` + op.name + ` command](/commands/rclone_` + op.name + `/) command for more information on the above. +`, + }) + } +} + +// Run a single command, e.g. Mkdir +func rcSingleCommand(ctx context.Context, in rc.Params, name string, noRemote bool) (out rc.Params, err error) { + var ( + f fs.Fs + remote string + ) + if noRemote { + f, err = rc.GetFs(ctx, in) + } else { + f, remote, err = rc.GetFsAndRemote(ctx, in) + } + if err != nil { + return nil, err + } + switch name { + case "mkdir": + return nil, Mkdir(ctx, f, remote) + case "rmdir": + return nil, Rmdir(ctx, f, remote) + case "purge": + return nil, Purge(ctx, f, remote) + case "rmdirs": + leaveRoot, err := in.GetBool("leaveRoot") + if rc.NotErrParamNotFound(err) { + return nil, err + } + return nil, Rmdirs(ctx, f, remote, leaveRoot) + case "delete": + return nil, Delete(ctx, f) + case "deletefile": + o, err := f.NewObject(ctx, remote) + if err != nil { + return nil, err + } + return nil, DeleteFile(ctx, o) + case "copyurl": + url, err := in.GetString("url") + if err != nil { + return nil, err + } + autoFilename, _ := in.GetBool("autoFilename") + noClobber, _ := in.GetBool("noClobber") + + _, err = CopyURL(ctx, f, remote, url, autoFilename, noClobber) + return nil, err + case "uploadfile": + + var request *http.Request + request, err := in.GetHTTPRequest() + + if err != nil { + return nil, err + } + + contentType := request.Header.Get("Content-Type") + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, err + } + + if strings.HasPrefix(mediaType, "multipart/") { + mr := multipart.NewReader(request.Body, params["boundary"]) + for { + p, err := mr.NextPart() + if err == io.EOF { + return nil, nil + } + if err != nil { + return nil, err + } + if p.FileName() != "" { + obj, err := Rcat(ctx, f, path.Join(remote, p.FileName()), p, time.Now()) + if err != nil { + return nil, err + } + fs.Debugf(obj, "Upload Succeeded") + } + } + } + return nil, nil + case "cleanup": + return nil, CleanUp(ctx, f) + } + panic("unknown rcSingleCommand type") +} + +func init() { + rc.Add(rc.Call{ + Path: "operations/size", + AuthRequired: true, + Fn: rcSize, + Title: "Count the number of bytes and files in remote", + Help: `This takes the following parameters + +- fs - a remote name string e.g. "drive:path/to/dir" + +Returns + +- count - number of files +- bytes - number of bytes in those files + +See the [size command](/commands/rclone_size/) command for more information on the above. +`, + }) +} + +// Size a directory +func rcSize(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, err := rc.GetFs(ctx, in) + if err != nil { + return nil, err + } + count, bytes, err := Count(ctx, f) + if err != nil { + return nil, err + } + out = make(rc.Params) + out["count"] = count + out["bytes"] = bytes + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "operations/publiclink", + AuthRequired: true, + Fn: rcPublicLink, + Title: "Create or retrieve a public link to the given file or folder.", + Help: `This takes the following parameters + +- fs - a remote name string e.g. "drive:" +- remote - a path within that remote e.g. "dir" +- unlink - boolean - if set removes the link rather than adding it (optional) +- expire - string - the expiry time of the link e.g. "1d" (optional) + +Returns + +- url - URL of the resource + +See the [link command](/commands/rclone_link/) command for more information on the above. +`, + }) +} + +// Make a public link +func rcPublicLink(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, remote, err := rc.GetFsAndRemote(ctx, in) + if err != nil { + return nil, err + } + unlink, _ := in.GetBool("unlink") + expire, err := in.GetDuration("expire") + if err != nil && !rc.IsErrParamNotFound(err) { + return nil, err + } + url, err := PublicLink(ctx, f, remote, fs.Duration(expire), unlink) + if err != nil { + return nil, err + } + out = make(rc.Params) + out["url"] = url + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "operations/fsinfo", + Fn: rcFsInfo, + Title: "Return information about the remote", + Help: `This takes the following parameters + +- fs - a remote name string e.g. "drive:" + +This returns info about the remote passed in; + +` + "```" + ` +{ + // optional features and whether they are available or not + "Features": { + "About": true, + "BucketBased": false, + "CanHaveEmptyDirectories": true, + "CaseInsensitive": false, + "ChangeNotify": false, + "CleanUp": false, + "Copy": false, + "DirCacheFlush": false, + "DirMove": true, + "DuplicateFiles": false, + "GetTier": false, + "ListR": false, + "MergeDirs": false, + "Move": true, + "OpenWriterAt": true, + "PublicLink": false, + "Purge": true, + "PutStream": true, + "PutUnchecked": false, + "ReadMimeType": false, + "ServerSideAcrossConfigs": false, + "SetTier": false, + "SetWrapper": false, + "UnWrap": false, + "WrapFs": false, + "WriteMimeType": false + }, + // Names of hashes available + "Hashes": [ + "MD5", + "SHA-1", + "DropboxHash", + "QuickXorHash" + ], + "Name": "local", // Name as created + "Precision": 1, // Precision of timestamps in ns + "Root": "/", // Path as created + "String": "Local file system at /" // how the remote will appear in logs +} +` + "```" + ` + +This command does not have a command line equivalent so use this instead: + + rclone rc --loopback operations/fsinfo fs=remote: + +`, + }) +} + +// Fsinfo the remote +func rcFsInfo(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, err := rc.GetFs(ctx, in) + if err != nil { + return nil, err + } + info := GetFsInfo(f) + err = rc.Reshape(&out, info) + if err != nil { + return nil, errors.Wrap(err, "fsinfo Reshape failed") + } + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "backend/command", + AuthRequired: true, + Fn: rcBackend, + Title: "Runs a backend command.", + Help: `This takes the following parameters + +- command - a string with the command name +- fs - a remote name string e.g. "drive:" +- arg - a list of arguments for the backend command +- opt - a map of string to string of options + +Returns + +- result - result from the backend command + +For example + + rclone rc backend/command command=noop fs=. -o echo=yes -o blue -a path1 -a path2 + +Returns + +` + "```" + ` +{ + "result": { + "arg": [ + "path1", + "path2" + ], + "name": "noop", + "opt": { + "blue": "", + "echo": "yes" + } + } +} +` + "```" + ` + +Note that this is the direct equivalent of using this "backend" +command: + + rclone backend noop . -o echo=yes -o blue path1 path2 + +Note that arguments must be preceded by the "-a" flag + +See the [backend](/commands/rclone_backend/) command for more information. +`, + }) +} + +// Make a public link +func rcBackend(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, err := rc.GetFs(ctx, in) + if err != nil { + return nil, err + } + doCommand := f.Features().Command + if doCommand == nil { + return nil, errors.Errorf("%v: doesn't support backend commands", f) + } + command, err := in.GetString("command") + if err != nil { + return nil, err + } + var opt = map[string]string{} + err = in.GetStructMissingOK("opt", &opt) + if err != nil { + return nil, err + } + var arg = []string{} + err = in.GetStructMissingOK("arg", &arg) + if err != nil { + return nil, err + } + result, err := doCommand(context.Background(), command, arg, opt) + if err != nil { + return nil, errors.Wrapf(err, "command %q failed", command) + + } + out = make(rc.Params) + out["result"] = result + return out, nil +} diff --git a/vendor/github.com/rclone/rclone/fs/operations/reopen.go b/vendor/github.com/rclone/rclone/fs/operations/reopen.go new file mode 100644 index 00000000000..688c5d05768 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/operations/reopen.go @@ -0,0 +1,144 @@ +package operations + +import ( + "context" + "io" + "sync" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/fserrors" +) + +// ReOpen is a wrapper for an object reader which reopens the stream on error +type ReOpen struct { + ctx context.Context + mu sync.Mutex // mutex to protect the below + src fs.Object // object to open + options []fs.OpenOption // option to pass to initial open + rc io.ReadCloser // underlying stream + read int64 // number of bytes read from this stream + maxTries int // maximum number of retries + tries int // number of retries we've had so far in this stream + err error // if this is set then Read/Close calls will return it + opened bool // if set then rc is valid and needs closing +} + +var ( + errorFileClosed = errors.New("file already closed") + errorTooManyTries = errors.New("failed to reopen: too many retries") +) + +// NewReOpen makes a handle which will reopen itself and seek to where it was on errors +// +// If hashOption is set this will be applied when reading from the start +// +// If rangeOption is set then this will applied when reading from the +// start, and updated on retries. +func NewReOpen(ctx context.Context, src fs.Object, maxTries int, options ...fs.OpenOption) (rc io.ReadCloser, err error) { + h := &ReOpen{ + ctx: ctx, + src: src, + maxTries: maxTries, + options: options, + } + h.mu.Lock() + defer h.mu.Unlock() + err = h.open() + if err != nil { + return nil, err + } + return h, nil +} + +// open the underlying handle - call with lock held +// +// we don't retry here as the Open() call will itself have low level retries +func (h *ReOpen) open() error { + opts := []fs.OpenOption{} + var hashOption *fs.HashesOption + var rangeOption *fs.RangeOption + for _, option := range h.options { + switch option.(type) { + case *fs.HashesOption: + hashOption = option.(*fs.HashesOption) + case *fs.RangeOption: + rangeOption = option.(*fs.RangeOption) + case *fs.HTTPOption: + opts = append(opts, option) + default: + if option.Mandatory() { + fs.Logf(h.src, "Unsupported mandatory option: %v", option) + } + } + } + if h.read == 0 { + if rangeOption != nil { + opts = append(opts, rangeOption) + } + if hashOption != nil { + // put hashOption on if reading from the start, ditch otherwise + opts = append(opts, hashOption) + } + } else { + if rangeOption != nil { + // range to the read point + opts = append(opts, &fs.RangeOption{Start: rangeOption.Start + h.read, End: rangeOption.End}) + } else { + // seek to the read point + opts = append(opts, &fs.SeekOption{Offset: h.read}) + } + } + h.tries++ + if h.tries > h.maxTries { + h.err = errorTooManyTries + } else { + h.rc, h.err = h.src.Open(h.ctx, opts...) + } + if h.err != nil { + if h.tries > 1 { + fs.Debugf(h.src, "Reopen failed after %d bytes read: %v", h.read, h.err) + } + return h.err + } + h.opened = true + return nil +} + +// Read bytes retrying as necessary +func (h *ReOpen) Read(p []byte) (n int, err error) { + h.mu.Lock() + defer h.mu.Unlock() + if h.err != nil { + // return a previous error if there is one + return n, h.err + } + n, err = h.rc.Read(p) + if err != nil { + h.err = err + } + h.read += int64(n) + if err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) { + // close underlying stream + h.opened = false + _ = h.rc.Close() + // reopen stream, clearing error if successful + fs.Debugf(h.src, "Reopening on read failure after %d bytes: retry %d/%d: %v", h.read, h.tries, h.maxTries, err) + if h.open() == nil { + err = nil + } + } + return n, err +} + +// Close the stream +func (h *ReOpen) Close() error { + h.mu.Lock() + defer h.mu.Unlock() + if !h.opened { + return errorFileClosed + } + h.opened = false + h.err = errorFileClosed + return h.rc.Close() +} diff --git a/vendor/github.com/rclone/rclone/fs/options.go b/vendor/github.com/rclone/rclone/fs/options.go new file mode 100644 index 00000000000..a85cd6f00cc --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/options.go @@ -0,0 +1,285 @@ +// Define the options for Open + +package fs + +import ( + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs/hash" +) + +// OpenOption is an interface describing options for Open +type OpenOption interface { + fmt.Stringer + + // Header returns the option as an HTTP header + Header() (key string, value string) + + // Mandatory returns whether this option can be ignored or not + Mandatory() bool +} + +// RangeOption defines an HTTP Range option with start and end. If +// either start or end are < 0 then they will be omitted. +// +// End may be bigger than the Size of the object in which case it will +// be capped to the size of the object. +// +// Note that the End is inclusive, so to fetch 100 bytes you would use +// RangeOption{Start: 0, End: 99} +// +// If Start is specified but End is not then it will fetch from Start +// to the end of the file. +// +// If End is specified, but Start is not then it will fetch the last +// End bytes. +// +// Examples: +// +// RangeOption{Start: 0, End: 99} - fetch the first 100 bytes +// RangeOption{Start: 100, End: 199} - fetch the second 100 bytes +// RangeOption{Start: 100, End: -1} - fetch bytes from offset 100 to the end +// RangeOption{Start: -1, End: 100} - fetch the last 100 bytes +// +// A RangeOption implements a single byte-range-spec from +// https://tools.ietf.org/html/rfc7233#section-2.1 +type RangeOption struct { + Start int64 + End int64 +} + +// Header formats the option as an http header +func (o *RangeOption) Header() (key string, value string) { + key = "Range" + value = "bytes=" + if o.Start >= 0 { + value += strconv.FormatInt(o.Start, 10) + + } + value += "-" + if o.End >= 0 { + value += strconv.FormatInt(o.End, 10) + } + return key, value +} + +// ParseRangeOption parses a RangeOption from a Range: header. +// It only accepts single ranges. +func ParseRangeOption(s string) (po *RangeOption, err error) { + const preamble = "bytes=" + if !strings.HasPrefix(s, preamble) { + return nil, errors.New("Range: header invalid: doesn't start with " + preamble) + } + s = s[len(preamble):] + if strings.IndexRune(s, ',') >= 0 { + return nil, errors.New("Range: header invalid: contains multiple ranges which isn't supported") + } + dash := strings.IndexRune(s, '-') + if dash < 0 { + return nil, errors.New("Range: header invalid: contains no '-'") + } + start, end := strings.TrimSpace(s[:dash]), strings.TrimSpace(s[dash+1:]) + o := RangeOption{Start: -1, End: -1} + if start != "" { + o.Start, err = strconv.ParseInt(start, 10, 64) + if err != nil || o.Start < 0 { + return nil, errors.New("Range: header invalid: bad start") + } + } + if end != "" { + o.End, err = strconv.ParseInt(end, 10, 64) + if err != nil || o.End < 0 { + return nil, errors.New("Range: header invalid: bad end") + } + } + return &o, nil +} + +// String formats the option into human readable form +func (o *RangeOption) String() string { + return fmt.Sprintf("RangeOption(%d,%d)", o.Start, o.End) +} + +// Mandatory returns whether the option must be parsed or can be ignored +func (o *RangeOption) Mandatory() bool { + return true +} + +// Decode interprets the RangeOption into an offset and a limit +// +// The offset is the start of the stream and the limit is how many +// bytes should be read from it. If the limit is -1 then the stream +// should be read to the end. +func (o *RangeOption) Decode(size int64) (offset, limit int64) { + if o.Start >= 0 { + offset = o.Start + if o.End >= 0 { + limit = o.End - o.Start + 1 + } else { + limit = -1 + } + } else { + if o.End >= 0 { + offset = size - o.End + } else { + offset = 0 + } + limit = -1 + } + return offset, limit +} + +// FixRangeOption looks through the slice of options and adjusts any +// RangeOption~s found that request a fetch from the end into an +// absolute fetch using the size passed in and makes sure the range does +// not exceed filesize. Some remotes (e.g. Onedrive, Box) don't support +// range requests which index from the end. +func FixRangeOption(options []OpenOption, size int64) { + if size == 0 { + // if size 0 then remove RangeOption~s + // replacing with a NullOptions~s which won't be rendered + for i := range options { + if _, ok := options[i].(*RangeOption); ok { + options[i] = NullOption{} + + } + } + return + } + for i := range options { + option := options[i] + if x, ok := option.(*RangeOption); ok { + // If start is < 0 then fetch from the end + if x.Start < 0 { + x = &RangeOption{Start: size - x.End, End: -1} + options[i] = x + } + if x.End > size { + x = &RangeOption{Start: x.Start, End: size - 1} + options[i] = x + } + } + } +} + +// SeekOption defines an HTTP Range option with start only. +type SeekOption struct { + Offset int64 +} + +// Header formats the option as an http header +func (o *SeekOption) Header() (key string, value string) { + key = "Range" + value = fmt.Sprintf("bytes=%d-", o.Offset) + return key, value +} + +// String formats the option into human readable form +func (o *SeekOption) String() string { + return fmt.Sprintf("SeekOption(%d)", o.Offset) +} + +// Mandatory returns whether the option must be parsed or can be ignored +func (o *SeekOption) Mandatory() bool { + return true +} + +// HTTPOption defines a general purpose HTTP option +type HTTPOption struct { + Key string + Value string +} + +// Header formats the option as an http header +func (o *HTTPOption) Header() (key string, value string) { + return o.Key, o.Value +} + +// String formats the option into human readable form +func (o *HTTPOption) String() string { + return fmt.Sprintf("HTTPOption(%q,%q)", o.Key, o.Value) +} + +// Mandatory returns whether the option must be parsed or can be ignored +func (o *HTTPOption) Mandatory() bool { + return false +} + +// HashesOption defines an option used to tell the local fs to limit +// the number of hashes it calculates. +type HashesOption struct { + Hashes hash.Set +} + +// Header formats the option as an http header +func (o *HashesOption) Header() (key string, value string) { + return "", "" +} + +// String formats the option into human readable form +func (o *HashesOption) String() string { + return fmt.Sprintf("HashesOption(%v)", o.Hashes) +} + +// Mandatory returns whether the option must be parsed or can be ignored +func (o *HashesOption) Mandatory() bool { + return false +} + +// NullOption defines an Option which does nothing +type NullOption struct { +} + +// Header formats the option as an http header +func (o NullOption) Header() (key string, value string) { + return "", "" +} + +// String formats the option into human readable form +func (o NullOption) String() string { + return fmt.Sprintf("NullOption()") +} + +// Mandatory returns whether the option must be parsed or can be ignored +func (o NullOption) Mandatory() bool { + return false +} + +// OpenOptionAddHeaders adds each header found in options to the +// headers map provided the key was non empty. +func OpenOptionAddHeaders(options []OpenOption, headers map[string]string) { + for _, option := range options { + key, value := option.Header() + if key != "" && value != "" { + headers[key] = value + } + } +} + +// OpenOptionHeaders adds each header found in options to the +// headers map provided the key was non empty. +// +// It returns a nil map if options was empty +func OpenOptionHeaders(options []OpenOption) (headers map[string]string) { + if len(options) == 0 { + return nil + } + headers = make(map[string]string, len(options)) + OpenOptionAddHeaders(options, headers) + return headers +} + +// OpenOptionAddHTTPHeaders Sets each header found in options to the +// http.Header map provided the key was non empty. +func OpenOptionAddHTTPHeaders(headers http.Header, options []OpenOption) { + for _, option := range options { + key, value := option.Header() + if key != "" && value != "" { + headers.Set(key, value) + } + } +} diff --git a/vendor/github.com/rclone/rclone/fs/parseduration.go b/vendor/github.com/rclone/rclone/fs/parseduration.go new file mode 100644 index 00000000000..b5104cc6c64 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/parseduration.go @@ -0,0 +1,206 @@ +package fs + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration is a time.Duration with some more parsing options +type Duration time.Duration + +// DurationOff is the default value for flags which can be turned off +const DurationOff = Duration((1 << 63) - 1) + +// Turn Duration into a string +func (d Duration) String() string { + if d == DurationOff { + return "off" + } + for i := len(ageSuffixes) - 2; i >= 0; i-- { + ageSuffix := &ageSuffixes[i] + if math.Abs(float64(d)) >= float64(ageSuffix.Multiplier) { + timeUnits := float64(d) / float64(ageSuffix.Multiplier) + return strconv.FormatFloat(timeUnits, 'f', -1, 64) + ageSuffix.Suffix + } + } + return time.Duration(d).String() +} + +// IsSet returns if the duration is != DurationOff +func (d Duration) IsSet() bool { + return d != DurationOff +} + +// We use time conventions +var ageSuffixes = []struct { + Suffix string + Multiplier time.Duration +}{ + {Suffix: "d", Multiplier: time.Hour * 24}, + {Suffix: "w", Multiplier: time.Hour * 24 * 7}, + {Suffix: "M", Multiplier: time.Hour * 24 * 30}, + {Suffix: "y", Multiplier: time.Hour * 24 * 365}, + + // Default to second + {Suffix: "", Multiplier: time.Second}, +} + +// parse the age as suffixed ages +func parseDurationSuffixes(age string) (time.Duration, error) { + var period float64 + + for _, ageSuffix := range ageSuffixes { + if strings.HasSuffix(age, ageSuffix.Suffix) { + numberString := age[:len(age)-len(ageSuffix.Suffix)] + var err error + period, err = strconv.ParseFloat(numberString, 64) + if err != nil { + return time.Duration(0), err + } + period *= float64(ageSuffix.Multiplier) + break + } + } + + return time.Duration(period), nil +} + +// time formats to try parsing ages as - in order +var timeFormats = []string{ + time.RFC3339, + "2006-01-02T15:04:05", + "2006-01-02 15:04:05", + "2006-01-02", +} + +// parse the age as time before the epoch in various date formats +func parseDurationDates(age string, epoch time.Time) (t time.Duration, err error) { + var instant time.Time + for _, timeFormat := range timeFormats { + instant, err = time.Parse(timeFormat, age) + if err == nil { + return epoch.Sub(instant), nil + } + } + return t, err +} + +// parseDurationFromNow parses a duration string. Allows ParseDuration to match the time +// package and easier testing within the fs package. +func parseDurationFromNow(age string, getNow func() time.Time) (d time.Duration, err error) { + if age == "off" { + return time.Duration(DurationOff), nil + } + + // Attempt to parse as a time.Duration first + d, err = time.ParseDuration(age) + if err == nil { + return d, nil + } + + d, err = parseDurationSuffixes(age) + if err == nil { + return d, nil + } + + d, err = parseDurationDates(age, getNow()) + if err == nil { + return d, nil + } + + return d, err +} + +// ParseDuration parses a duration string. Accept ms|s|m|h|d|w|M|y suffixes. Defaults to second if not provided +func ParseDuration(age string) (time.Duration, error) { + return parseDurationFromNow(age, time.Now) +} + +// ReadableString parses d into a human readable duration. +// Based on https://github.com/hako/durafmt +func (d Duration) ReadableString() string { + switch d { + case DurationOff: + return "off" + case 0: + return "0s" + } + + readableString := "" + + // Check for minus durations. + if d < 0 { + readableString += "-" + } + + duration := time.Duration(math.Abs(float64(d))) + + // Convert duration. + seconds := int64(duration.Seconds()) % 60 + minutes := int64(duration.Minutes()) % 60 + hours := int64(duration.Hours()) % 24 + days := int64(duration/(24*time.Hour)) % 365 % 7 + + // Edge case between 364 and 365 days. + // We need to calculate weeks from what is left from years + leftYearDays := int64(duration/(24*time.Hour)) % 365 + weeks := leftYearDays / 7 + if leftYearDays >= 364 && leftYearDays < 365 { + weeks = 52 + } + + years := int64(duration/(24*time.Hour)) / 365 + milliseconds := int64(duration/time.Millisecond) - + (seconds * 1000) - (minutes * 60000) - (hours * 3600000) - + (days * 86400000) - (weeks * 604800000) - (years * 31536000000) + + // Create a map of the converted duration time. + durationMap := map[string]int64{ + "ms": milliseconds, + "s": seconds, + "m": minutes, + "h": hours, + "d": days, + "w": weeks, + "y": years, + } + + // Construct duration string. + for _, u := range [...]string{"y", "w", "d", "h", "m", "s", "ms"} { + v := durationMap[u] + strval := strconv.FormatInt(v, 10) + if v == 0 { + continue + } + readableString += strval + u + } + + return readableString +} + +// Set a Duration +func (d *Duration) Set(s string) error { + duration, err := ParseDuration(s) + if err != nil { + return err + } + *d = Duration(duration) + return nil +} + +// Type of the value +func (d Duration) Type() string { + return "Duration" +} + +// Scan implements the fmt.Scanner interface +func (d *Duration) Scan(s fmt.ScanState, ch rune) error { + token, err := s.Token(true, nil) + if err != nil { + return err + } + return d.Set(string(token)) +} diff --git a/vendor/github.com/rclone/rclone/fs/rc/cache.go b/vendor/github.com/rclone/rclone/fs/rc/cache.go new file mode 100644 index 00000000000..958a1db3f4b --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/rc/cache.go @@ -0,0 +1,45 @@ +// Utilities for accessing the Fs cache + +package rc + +import ( + "context" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/cache" +) + +// GetFsNamed gets an fs.Fs named fsName either from the cache or creates it afresh +func GetFsNamed(ctx context.Context, in Params, fsName string) (f fs.Fs, err error) { + fsString, err := in.GetString(fsName) + if err != nil { + return nil, err + } + + return cache.Get(ctx, fsString) +} + +// GetFs gets an fs.Fs named "fs" either from the cache or creates it afresh +func GetFs(ctx context.Context, in Params) (f fs.Fs, err error) { + return GetFsNamed(ctx, in, "fs") +} + +// GetFsAndRemoteNamed gets the fsName parameter from in, makes a +// remote or fetches it from the cache then gets the remoteName +// parameter from in too. +func GetFsAndRemoteNamed(ctx context.Context, in Params, fsName, remoteName string) (f fs.Fs, remote string, err error) { + remote, err = in.GetString(remoteName) + if err != nil { + return + } + f, err = GetFsNamed(ctx, in, fsName) + return + +} + +// GetFsAndRemote gets the `fs` parameter from in, makes a remote or +// fetches it from the cache then gets the `remote` parameter from in +// too. +func GetFsAndRemote(ctx context.Context, in Params) (f fs.Fs, remote string, err error) { + return GetFsAndRemoteNamed(ctx, in, "fs", "remote") +} diff --git a/vendor/github.com/rclone/rclone/fs/rc/config.go b/vendor/github.com/rclone/rclone/fs/rc/config.go new file mode 100644 index 00000000000..5add5b21218 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/rc/config.go @@ -0,0 +1,126 @@ +// Implement config options reading and writing +// +// This is done here rather than in fs/fs.go so we don't cause a circular dependency + +package rc + +import ( + "context" + + "github.com/pkg/errors" +) + +var ( + optionBlock = map[string]interface{}{} + optionReload = map[string]func(context.Context) error{} +) + +// AddOption adds an option set +func AddOption(name string, option interface{}) { + optionBlock[name] = option +} + +// AddOptionReload adds an option set with a reload function to be +// called when options are changed +func AddOptionReload(name string, option interface{}, reload func(context.Context) error) { + optionBlock[name] = option + optionReload[name] = reload +} + +func init() { + Add(Call{ + Path: "options/blocks", + Fn: rcOptionsBlocks, + Title: "List all the option blocks", + Help: `Returns +- options - a list of the options block names`, + }) +} + +// Show the list of all the option blocks +func rcOptionsBlocks(ctx context.Context, in Params) (out Params, err error) { + options := []string{} + for name := range optionBlock { + options = append(options, name) + } + out = make(Params) + out["options"] = options + return out, nil +} + +func init() { + Add(Call{ + Path: "options/get", + Fn: rcOptionsGet, + Title: "Get all the options", + Help: `Returns an object where keys are option block names and values are an +object with the current option values in. + +This shows the internal names of the option within rclone which should +map to the external options very easily with a few exceptions. +`, + }) +} + +// Show the list of all the option blocks +func rcOptionsGet(ctx context.Context, in Params) (out Params, err error) { + out = make(Params) + for name, options := range optionBlock { + out[name] = options + } + return out, nil +} + +func init() { + Add(Call{ + Path: "options/set", + Fn: rcOptionsSet, + Title: "Set an option", + Help: `Parameters + +- option block name containing an object with + - key: value + +Repeated as often as required. + +Only supply the options you wish to change. If an option is unknown +it will be silently ignored. Not all options will have an effect when +changed like this. + +For example: + +This sets DEBUG level logs (-vv) + + rclone rc options/set --json '{"main": {"LogLevel": 8}}' + +And this sets INFO level logs (-v) + + rclone rc options/set --json '{"main": {"LogLevel": 7}}' + +And this sets NOTICE level logs (normal without -v) + + rclone rc options/set --json '{"main": {"LogLevel": 6}}' +`, + }) +} + +// Set an option in an option block +func rcOptionsSet(ctx context.Context, in Params) (out Params, err error) { + for name, options := range in { + current := optionBlock[name] + if current == nil { + return nil, errors.Errorf("unknown option block %q", name) + } + err := Reshape(current, options) + if err != nil { + return nil, errors.Wrapf(err, "failed to write options from block %q", name) + } + if reload := optionReload[name]; reload != nil { + err = reload(ctx) + if err != nil { + return nil, errors.Wrapf(err, "failed to reload options from block %q", name) + } + } + } + return out, nil +} diff --git a/vendor/github.com/rclone/rclone/fs/rc/internal.go b/vendor/github.com/rclone/rclone/fs/rc/internal.go new file mode 100644 index 00000000000..a3018bc4a07 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/rc/internal.go @@ -0,0 +1,479 @@ +// Define the internal rc functions + +package rc + +import ( + "context" + "net/http" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/coreos/go-semver/semver" + "github.com/pkg/errors" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/obscure" + "github.com/rclone/rclone/lib/atexit" +) + +func init() { + Add(Call{ + Path: "rc/noopauth", + AuthRequired: true, + Fn: rcNoop, + Title: "Echo the input to the output parameters requiring auth", + Help: ` +This echoes the input parameters to the output parameters for testing +purposes. It can be used to check that rclone is still alive and to +check that parameter passing is working properly.`, + }) + Add(Call{ + Path: "rc/noop", + Fn: rcNoop, + Title: "Echo the input to the output parameters", + Help: ` +This echoes the input parameters to the output parameters for testing +purposes. It can be used to check that rclone is still alive and to +check that parameter passing is working properly.`, + }) +} + +// Echo the input to the output parameters +func rcNoop(ctx context.Context, in Params) (out Params, err error) { + return in, nil +} + +func init() { + Add(Call{ + Path: "rc/error", + Fn: rcError, + Title: "This returns an error", + Help: ` +This returns an error with the input as part of its error string. +Useful for testing error handling.`, + }) +} + +// Return an error regardless +func rcError(ctx context.Context, in Params) (out Params, err error) { + return nil, errors.Errorf("arbitrary error on input %+v", in) +} + +func init() { + Add(Call{ + Path: "rc/list", + Fn: rcList, + Title: "List all the registered remote control commands", + Help: ` +This lists all the registered remote control commands as a JSON map in +the commands response.`, + }) +} + +// List the registered commands +func rcList(ctx context.Context, in Params) (out Params, err error) { + out = make(Params) + out["commands"] = Calls.List() + return out, nil +} + +func init() { + Add(Call{ + Path: "core/pid", + Fn: rcPid, + Title: "Return PID of current process", + Help: ` +This returns PID of current process. +Useful for stopping rclone process.`, + }) +} + +// Return PID of current process +func rcPid(ctx context.Context, in Params) (out Params, err error) { + out = make(Params) + out["pid"] = os.Getpid() + return out, nil +} + +func init() { + Add(Call{ + Path: "core/memstats", + Fn: rcMemStats, + Title: "Returns the memory statistics", + Help: ` +This returns the memory statistics of the running program. What the values mean +are explained in the go docs: https://golang.org/pkg/runtime/#MemStats + +The most interesting values for most people are: + +* HeapAlloc: This is the amount of memory rclone is actually using +* HeapSys: This is the amount of memory rclone has obtained from the OS +* Sys: this is the total amount of memory requested from the OS + * It is virtual memory so may include unused memory +`, + }) +} + +// Return the memory statistics +func rcMemStats(ctx context.Context, in Params) (out Params, err error) { + out = make(Params) + var m runtime.MemStats + runtime.ReadMemStats(&m) + out["Alloc"] = m.Alloc + out["TotalAlloc"] = m.TotalAlloc + out["Sys"] = m.Sys + out["Mallocs"] = m.Mallocs + out["Frees"] = m.Frees + out["HeapAlloc"] = m.HeapAlloc + out["HeapSys"] = m.HeapSys + out["HeapIdle"] = m.HeapIdle + out["HeapInuse"] = m.HeapInuse + out["HeapReleased"] = m.HeapReleased + out["HeapObjects"] = m.HeapObjects + out["StackInuse"] = m.StackInuse + out["StackSys"] = m.StackSys + out["MSpanInuse"] = m.MSpanInuse + out["MSpanSys"] = m.MSpanSys + out["MCacheInuse"] = m.MCacheInuse + out["MCacheSys"] = m.MCacheSys + out["BuckHashSys"] = m.BuckHashSys + out["GCSys"] = m.GCSys + out["OtherSys"] = m.OtherSys + return out, nil +} + +func init() { + Add(Call{ + Path: "core/gc", + Fn: rcGc, + Title: "Runs a garbage collection.", + Help: ` +This tells the go runtime to do a garbage collection run. It isn't +necessary to call this normally, but it can be useful for debugging +memory problems. +`, + }) +} + +// Do a garbage collection run +func rcGc(ctx context.Context, in Params) (out Params, err error) { + runtime.GC() + return nil, nil +} + +func init() { + Add(Call{ + Path: "core/version", + Fn: rcVersion, + Title: "Shows the current version of rclone and the go runtime.", + Help: ` +This shows the current version of go and the go runtime + +- version - rclone version, e.g. "v1.53.0" +- decomposed - version number as [major, minor, patch] +- isGit - boolean - true if this was compiled from the git version +- isBeta - boolean - true if this is a beta version +- os - OS in use as according to Go +- arch - cpu architecture in use according to Go +- goVersion - version of Go runtime in use + +`, + }) +} + +// Return version info +func rcVersion(ctx context.Context, in Params) (out Params, err error) { + version, err := semver.NewVersion(fs.Version[1:]) + if err != nil { + return nil, err + } + out = Params{ + "version": fs.Version, + "decomposed": version.Slice(), + "isGit": strings.HasSuffix(fs.Version, "-DEV"), + "isBeta": version.PreRelease != "", + "os": runtime.GOOS, + "arch": runtime.GOARCH, + "goVersion": runtime.Version(), + } + return out, nil +} + +func init() { + Add(Call{ + Path: "core/obscure", + Fn: rcObscure, + Title: "Obscures a string passed in.", + Help: ` +Pass a clear string and rclone will obscure it for the config file: +- clear - string + +Returns +- obscured - string +`, + }) +} + +// Return obscured string +func rcObscure(ctx context.Context, in Params) (out Params, err error) { + clear, err := in.GetString("clear") + if err != nil { + return nil, err + } + obscured, err := obscure.Obscure(clear) + if err != nil { + return nil, err + } + out = Params{ + "obscured": obscured, + } + return out, nil +} + +func init() { + Add(Call{ + Path: "core/quit", + Fn: rcQuit, + Title: "Terminates the app.", + Help: ` +(optional) Pass an exit code to be used for terminating the app: +- exitCode - int +`, + }) +} + +// Terminates app +func rcQuit(ctx context.Context, in Params) (out Params, err error) { + code, err := in.GetInt64("exitCode") + + if IsErrParamInvalid(err) { + return nil, err + } + if IsErrParamNotFound(err) { + code = 0 + } + exitCode := int(code) + + go func(exitCode int) { + time.Sleep(time.Millisecond * 1500) + atexit.Run() + os.Exit(exitCode) + }(exitCode) + + return nil, nil +} + +func init() { + Add(Call{ + Path: "debug/set-mutex-profile-fraction", + Fn: rcSetMutexProfileFraction, + Title: "Set runtime.SetMutexProfileFraction for mutex profiling.", + Help: ` +SetMutexProfileFraction controls the fraction of mutex contention +events that are reported in the mutex profile. On average 1/rate +events are reported. The previous rate is returned. + +To turn off profiling entirely, pass rate 0. To just read the current +rate, pass rate < 0. (For n>1 the details of sampling may change.) + +Once this is set you can look use this to profile the mutex contention: + + go tool pprof http://localhost:5572/debug/pprof/mutex + +Parameters + +- rate - int + +Results + +- previousRate - int +`, + }) +} + +// Terminates app +func rcSetMutexProfileFraction(ctx context.Context, in Params) (out Params, err error) { + rate, err := in.GetInt64("rate") + if err != nil { + return nil, err + } + previousRate := runtime.SetMutexProfileFraction(int(rate)) + out = make(Params) + out["previousRate"] = previousRate + return out, nil +} + +func init() { + Add(Call{ + Path: "debug/set-block-profile-rate", + Fn: rcSetBlockProfileRate, + Title: "Set runtime.SetBlockProfileRate for blocking profiling.", + Help: ` +SetBlockProfileRate controls the fraction of goroutine blocking events +that are reported in the blocking profile. The profiler aims to sample +an average of one blocking event per rate nanoseconds spent blocked. + +To include every blocking event in the profile, pass rate = 1. To turn +off profiling entirely, pass rate <= 0. + +After calling this you can use this to see the blocking profile: + + go tool pprof http://localhost:5572/debug/pprof/block + +Parameters + +- rate - int +`, + }) +} + +// Terminates app +func rcSetBlockProfileRate(ctx context.Context, in Params) (out Params, err error) { + rate, err := in.GetInt64("rate") + if err != nil { + return nil, err + } + runtime.SetBlockProfileRate(int(rate)) + return nil, nil +} + +func init() { + Add(Call{ + Path: "core/command", + AuthRequired: true, + Fn: rcRunCommand, + NeedsRequest: true, + NeedsResponse: true, + Title: "Run a rclone terminal command over rc.", + Help: `This takes the following parameters + +- command - a string with the command name +- arg - a list of arguments for the backend command +- opt - a map of string to string of options +- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR") + - defaults to "COMBINED_OUTPUT" if not set + - the STREAM returnTypes will write the output to the body of the HTTP message + - the COMBINED_OUTPUT will write the output to the "result" parameter + +Returns + +- result - result from the backend command + - only set when using returnType "COMBINED_OUTPUT" +- error - set if rclone exits with an error code +- returnType - one of ("COMBINED_OUTPUT", "STREAM", "STREAM_ONLY_STDOUT", "STREAM_ONLY_STDERR") + +For example + + rclone rc core/command command=ls -a mydrive:/ -o max-depth=1 + rclone rc core/command -a ls -a mydrive:/ -o max-depth=1 + +Returns + +` + "```" + ` +{ + "error": false, + "result": "" +} + +OR +{ + "error": true, + "result": "" +} + +` + "```" + ` +`, + }) +} + +// rcRunCommand runs an rclone command with the given args and flags +func rcRunCommand(ctx context.Context, in Params) (out Params, err error) { + command, err := in.GetString("command") + if err != nil { + command = "" + } + + var opt = map[string]string{} + err = in.GetStructMissingOK("opt", &opt) + if err != nil { + return nil, err + } + + var arg = []string{} + err = in.GetStructMissingOK("arg", &arg) + if err != nil { + return nil, err + } + + returnType, err := in.GetString("returnType") + if err != nil { + returnType = "COMBINED_OUTPUT" + } + + var httpResponse http.ResponseWriter + httpResponse, err = in.GetHTTPResponseWriter() + if err != nil { + return nil, errors.Errorf("response object is required\n" + err.Error()) + } + + var allArgs = []string{} + if command != "" { + // Add the command e.g.: ls to the args + allArgs = append(allArgs, command) + } + // Add all from arg + for _, cur := range arg { + allArgs = append(allArgs, cur) + } + + // Add flags to args for e.g. --max-depth 1 comes in as { max-depth 1 }. + // Convert it to [ max-depth, 1 ] and append to args list + for key, value := range opt { + if len(key) == 1 { + allArgs = append(allArgs, "-"+key) + } else { + allArgs = append(allArgs, "--"+key) + } + allArgs = append(allArgs, value) + } + + // Get the path for the current executable which was used to run rclone. + ex, err := os.Executable() + if err != nil { + return nil, err + } + + cmd := exec.CommandContext(ctx, ex, allArgs...) + + if returnType == "COMBINED_OUTPUT" { + // Run the command and get the output for error and stdout combined. + + out, err := cmd.CombinedOutput() + + if err != nil { + return Params{ + "result": string(out), + "error": true, + }, nil + } + return Params{ + "result": string(out), + "error": false, + }, nil + } else if returnType == "STREAM_ONLY_STDOUT" { + cmd.Stdout = httpResponse + } else if returnType == "STREAM_ONLY_STDERR" { + cmd.Stderr = httpResponse + } else if returnType == "STREAM" { + cmd.Stdout = httpResponse + cmd.Stderr = httpResponse + } else { + return nil, errors.Errorf("Unknown returnType %q", returnType) + } + + err = cmd.Run() + return nil, err +} diff --git a/vendor/github.com/rclone/rclone/fs/rc/jobs/job.go b/vendor/github.com/rclone/rclone/fs/rc/jobs/job.go new file mode 100644 index 00000000000..5296bfe705b --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/rc/jobs/job.go @@ -0,0 +1,366 @@ +// Manage background jobs that the rc is running + +package jobs + +import ( + "context" + "fmt" + "runtime/debug" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/rc" +) + +// Job describes an asynchronous task started via the rc package +type Job struct { + mu sync.Mutex + ID int64 `json:"id"` + Group string `json:"group"` + StartTime time.Time `json:"startTime"` + EndTime time.Time `json:"endTime"` + Error string `json:"error"` + Finished bool `json:"finished"` + Success bool `json:"success"` + Duration float64 `json:"duration"` + Output rc.Params `json:"output"` + Stop func() `json:"-"` + listeners []*func() + + // realErr is the Error before printing it as a string, it's used to return + // the real error to the upper application layers while still printing the + // string error message. + realErr error +} + +// mark the job as finished +func (job *Job) finish(out rc.Params, err error) { + job.mu.Lock() + job.EndTime = time.Now() + if out == nil { + out = make(rc.Params) + } + job.Output = out + job.Duration = job.EndTime.Sub(job.StartTime).Seconds() + if err != nil { + job.realErr = err + job.Error = err.Error() + job.Success = false + } else { + job.realErr = nil + job.Error = "" + job.Success = true + } + job.Finished = true + + // Notify listeners that the job is finished + for i := range job.listeners { + go (*job.listeners[i])() + } + + job.mu.Unlock() + running.kickExpire() // make sure this job gets expired +} + +func (job *Job) addListener(fn *func()) { + job.mu.Lock() + defer job.mu.Unlock() + job.listeners = append(job.listeners, fn) +} + +func (job *Job) removeListener(fn *func()) { + job.mu.Lock() + defer job.mu.Unlock() + for i, ln := range job.listeners { + if ln == fn { + job.listeners = append(job.listeners[:i], job.listeners[i+1:]...) + return + } + } +} + +// run the job until completion writing the return status +func (job *Job) run(ctx context.Context, fn rc.Func, in rc.Params) { + defer func() { + if r := recover(); r != nil { + job.finish(nil, errors.Errorf("panic received: %v \n%s", r, string(debug.Stack()))) + } + }() + job.finish(fn(ctx, in)) +} + +// Jobs describes a collection of running tasks +type Jobs struct { + mu sync.RWMutex + jobs map[int64]*Job + opt *rc.Options + expireRunning bool +} + +var ( + running = newJobs() + jobID = int64(0) +) + +// newJobs makes a new Jobs structure +func newJobs() *Jobs { + return &Jobs{ + jobs: map[int64]*Job{}, + opt: &rc.DefaultOpt, + } +} + +// SetOpt sets the options when they are known +func SetOpt(opt *rc.Options) { + running.opt = opt +} + +// SetInitialJobID allows for setting jobID before starting any jobs. +func SetInitialJobID(id int64) { + if !atomic.CompareAndSwapInt64(&jobID, 0, id) { + panic("Setting jobID is only possible before starting any jobs") + } +} + +// kickExpire makes sure Expire is running +func (jobs *Jobs) kickExpire() { + jobs.mu.Lock() + defer jobs.mu.Unlock() + if !jobs.expireRunning { + time.AfterFunc(jobs.opt.JobExpireInterval, jobs.Expire) + jobs.expireRunning = true + } +} + +// Expire expires any jobs that haven't been collected +func (jobs *Jobs) Expire() { + jobs.mu.Lock() + defer jobs.mu.Unlock() + now := time.Now() + for ID, job := range jobs.jobs { + job.mu.Lock() + if job.Finished && now.Sub(job.EndTime) > jobs.opt.JobExpireDuration { + delete(jobs.jobs, ID) + } + job.mu.Unlock() + } + if len(jobs.jobs) != 0 { + time.AfterFunc(jobs.opt.JobExpireInterval, jobs.Expire) + jobs.expireRunning = true + } else { + jobs.expireRunning = false + } +} + +// IDs returns the IDs of the running jobs +func (jobs *Jobs) IDs() (IDs []int64) { + jobs.mu.RLock() + defer jobs.mu.RUnlock() + IDs = []int64{} + for ID := range jobs.jobs { + IDs = append(IDs, ID) + } + return IDs +} + +// Get a job with a given ID or nil if it doesn't exist +func (jobs *Jobs) Get(ID int64) *Job { + jobs.mu.RLock() + defer jobs.mu.RUnlock() + return jobs.jobs[ID] +} + +func getGroup(in rc.Params) string { + // Check to see if the group is set + group, err := in.GetString("_group") + if rc.NotErrParamNotFound(err) { + fs.Errorf(nil, "Can't get _group param %+v", err) + } + delete(in, "_group") + return group +} + +// NewAsyncJob start a new asynchronous Job off +func (jobs *Jobs) NewAsyncJob(fn rc.Func, in rc.Params) *Job { + id := atomic.AddInt64(&jobID, 1) + + group := getGroup(in) + if group == "" { + group = fmt.Sprintf("job/%d", id) + } + ctx := accounting.WithStatsGroup(context.Background(), group) + ctx, cancel := context.WithCancel(ctx) + stop := func() { + cancel() + // Wait for cancel to propagate before returning. + <-ctx.Done() + } + job := &Job{ + ID: id, + Group: group, + StartTime: time.Now(), + Stop: stop, + } + jobs.mu.Lock() + jobs.jobs[job.ID] = job + jobs.mu.Unlock() + go job.run(ctx, fn, in) + return job +} + +// NewSyncJob start a new synchronous Job off +func (jobs *Jobs) NewSyncJob(ctx context.Context, in rc.Params) (*Job, context.Context) { + id := atomic.AddInt64(&jobID, 1) + group := getGroup(in) + if group == "" { + group = fmt.Sprintf("job/%d", id) + } + ctxG := accounting.WithStatsGroup(ctx, fmt.Sprintf("job/%d", id)) + ctx, cancel := context.WithCancel(ctxG) + stop := func() { + cancel() + // Wait for cancel to propagate before returning. + <-ctx.Done() + } + job := &Job{ + ID: id, + Group: group, + StartTime: time.Now(), + Stop: stop, + } + jobs.mu.Lock() + jobs.jobs[job.ID] = job + jobs.mu.Unlock() + return job, ctx +} + +// StartAsyncJob starts a new job asynchronously and returns a Param suitable +// for output. +func StartAsyncJob(fn rc.Func, in rc.Params) (rc.Params, error) { + job := running.NewAsyncJob(fn, in) + out := make(rc.Params) + out["jobid"] = job.ID + return out, nil +} + +// ExecuteJob executes new job synchronously and returns a Param suitable for +// output. +func ExecuteJob(ctx context.Context, fn rc.Func, in rc.Params) (rc.Params, int64, error) { + job, ctx := running.NewSyncJob(ctx, in) + job.run(ctx, fn, in) + return job.Output, job.ID, job.realErr +} + +// OnFinish adds listener to jobid that will be triggered when job is finished. +// It returns a function to cancel listening. +func OnFinish(jobID int64, fn func()) (func(), error) { + job := running.Get(jobID) + if job == nil { + return func() {}, errors.New("job not found") + } + if job.Finished { + fn() + } else { + job.addListener(&fn) + } + return func() { job.removeListener(&fn) }, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "job/status", + Fn: rcJobStatus, + Title: "Reads the status of the job ID", + Help: `Parameters + +- jobid - id of the job (integer) + +Results + +- finished - boolean +- duration - time in seconds that the job ran for +- endTime - time the job finished (e.g. "2018-10-26T18:50:20.528746884+01:00") +- error - error from the job or empty string for no error +- finished - boolean whether the job has finished or not +- id - as passed in above +- startTime - time the job started (e.g. "2018-10-26T18:50:20.528336039+01:00") +- success - boolean - true for success false otherwise +- output - output of the job as would have been returned if called synchronously +- progress - output of the progress related to the underlying job +`, + }) +} + +// Returns the status of a job +func rcJobStatus(ctx context.Context, in rc.Params) (out rc.Params, err error) { + jobID, err := in.GetInt64("jobid") + if err != nil { + return nil, err + } + job := running.Get(jobID) + if job == nil { + return nil, errors.New("job not found") + } + job.mu.Lock() + defer job.mu.Unlock() + out = make(rc.Params) + err = rc.Reshape(&out, job) + if err != nil { + return nil, errors.Wrap(err, "reshape failed in job status") + } + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "job/list", + Fn: rcJobList, + Title: "Lists the IDs of the running jobs", + Help: `Parameters - None + +Results + +- jobids - array of integer job ids +`, + }) +} + +// Returns list of job ids. +func rcJobList(ctx context.Context, in rc.Params) (out rc.Params, err error) { + out = make(rc.Params) + out["jobids"] = running.IDs() + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "job/stop", + Fn: rcJobStop, + Title: "Stop the running job", + Help: `Parameters + +- jobid - id of the job (integer) +`, + }) +} + +// Stops the running job. +func rcJobStop(ctx context.Context, in rc.Params) (out rc.Params, err error) { + jobID, err := in.GetInt64("jobid") + if err != nil { + return nil, err + } + job := running.Get(jobID) + if job == nil { + return nil, errors.New("job not found") + } + job.mu.Lock() + defer job.mu.Unlock() + out = make(rc.Params) + job.Stop() + return out, nil +} diff --git a/vendor/github.com/rclone/rclone/fs/rc/params.go b/vendor/github.com/rclone/rclone/fs/rc/params.go new file mode 100644 index 00000000000..f9353de523c --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/rc/params.go @@ -0,0 +1,281 @@ +// Parameter parsing + +package rc + +import ( + "encoding/json" + "fmt" + "math" + "net/http" + "strconv" + "time" + + "github.com/pkg/errors" + + "github.com/rclone/rclone/fs" +) + +// Params is the input and output type for the Func +type Params map[string]interface{} + +// ErrParamNotFound - this is returned from the Get* functions if the +// parameter isn't found along with a zero value of the requested +// item. +// +// Returning an error of this type from an rc.Func will cause the http +// method to return http.StatusBadRequest +type ErrParamNotFound string + +// Error turns this error into a string +func (e ErrParamNotFound) Error() string { + return fmt.Sprintf("Didn't find key %q in input", string(e)) +} + +// IsErrParamNotFound returns whether err is ErrParamNotFound +func IsErrParamNotFound(err error) bool { + _, isNotFound := err.(ErrParamNotFound) + return isNotFound +} + +// NotErrParamNotFound returns true if err != nil and +// !IsErrParamNotFound(err) +// +// This is for checking error returns of the Get* functions to ignore +// error not found returns and take the default value. +func NotErrParamNotFound(err error) bool { + return err != nil && !IsErrParamNotFound(err) +} + +// ErrParamInvalid - this is returned from the Get* functions if the +// parameter is invalid. +// +// +// Returning an error of this type from an rc.Func will cause the http +// method to return http.StatusBadRequest +type ErrParamInvalid struct { + error +} + +// IsErrParamInvalid returns whether err is ErrParamInvalid +func IsErrParamInvalid(err error) bool { + _, isInvalid := err.(ErrParamInvalid) + return isInvalid +} + +// Reshape reshapes one blob of data into another via json serialization +// +// out should be a pointer type +// +// This isn't a very efficient way of dealing with this! +func Reshape(out interface{}, in interface{}) error { + b, err := json.Marshal(in) + if err != nil { + return errors.Wrapf(err, "Reshape failed to Marshal") + } + err = json.Unmarshal(b, out) + if err != nil { + return errors.Wrapf(err, "Reshape failed to Unmarshal") + } + return nil +} + +// Copy shallow copies the Params +func (p Params) Copy() (out Params) { + out = make(Params, len(p)) + for k, v := range p { + out[k] = v + } + return out +} + +// Get gets a parameter from the input +// +// If the parameter isn't found then error will be of type +// ErrParamNotFound and the returned value will be nil. +func (p Params) Get(key string) (interface{}, error) { + value, ok := p[key] + if !ok { + return nil, ErrParamNotFound(key) + } + return value, nil +} + +// GetHTTPRequest gets a http.Request parameter associated with the request with the key "_request" +// +// If the parameter isn't found then error will be of type +// ErrParamNotFound and the returned value will be nil. +func (p Params) GetHTTPRequest() (*http.Request, error) { + key := "_request" + value, err := p.Get(key) + if err != nil { + return nil, err + } + request, ok := value.(*http.Request) + if !ok { + return nil, ErrParamInvalid{errors.Errorf("expecting http.request value for key %q (was %T)", key, value)} + } + return request, nil +} + +// GetHTTPResponseWriter gets a http.ResponseWriter parameter associated with the request with the key "_response" +// +// If the parameter isn't found then error will be of type +// ErrParamNotFound and the returned value will be nil. +func (p Params) GetHTTPResponseWriter() (http.ResponseWriter, error) { + key := "_response" + value, err := p.Get(key) + if err != nil { + return nil, err + } + request, ok := value.(http.ResponseWriter) + if !ok { + return nil, ErrParamInvalid{errors.Errorf("expecting http.ResponseWriter value for key %q (was %T)", key, value)} + } + return request, nil +} + +// GetString gets a string parameter from the input +// +// If the parameter isn't found then error will be of type +// ErrParamNotFound and the returned value will be "". +func (p Params) GetString(key string) (string, error) { + value, err := p.Get(key) + if err != nil { + return "", err + } + str, ok := value.(string) + if !ok { + return "", ErrParamInvalid{errors.Errorf("expecting string value for key %q (was %T)", key, value)} + } + return str, nil +} + +// GetInt64 gets an int64 parameter from the input +// +// If the parameter isn't found then error will be of type +// ErrParamNotFound and the returned value will be 0. +func (p Params) GetInt64(key string) (int64, error) { + value, err := p.Get(key) + if err != nil { + return 0, err + } + switch x := value.(type) { + case int: + return int64(x), nil + case int64: + return x, nil + case float64: + if x > math.MaxInt64 || x < math.MinInt64 { + return 0, ErrParamInvalid{errors.Errorf("key %q (%v) overflows int64 ", key, value)} + } + return int64(x), nil + case string: + i, err := strconv.ParseInt(x, 10, 0) + if err != nil { + return 0, ErrParamInvalid{errors.Wrapf(err, "couldn't parse key %q (%v) as int64", key, value)} + } + return i, nil + } + return 0, ErrParamInvalid{errors.Errorf("expecting int64 value for key %q (was %T)", key, value)} +} + +// GetFloat64 gets a float64 parameter from the input +// +// If the parameter isn't found then error will be of type +// ErrParamNotFound and the returned value will be 0. +func (p Params) GetFloat64(key string) (float64, error) { + value, err := p.Get(key) + if err != nil { + return 0, err + } + switch x := value.(type) { + case float64: + return x, nil + case int: + return float64(x), nil + case int64: + return float64(x), nil + case string: + f, err := strconv.ParseFloat(x, 64) + if err != nil { + return 0, ErrParamInvalid{errors.Wrapf(err, "couldn't parse key %q (%v) as float64", key, value)} + } + return f, nil + } + return 0, ErrParamInvalid{errors.Errorf("expecting float64 value for key %q (was %T)", key, value)} +} + +// GetBool gets a boolean parameter from the input +// +// If the parameter isn't found then error will be of type +// ErrParamNotFound and the returned value will be false. +func (p Params) GetBool(key string) (bool, error) { + value, err := p.Get(key) + if err != nil { + return false, err + } + switch x := value.(type) { + case int: + return x != 0, nil + case int64: + return x != 0, nil + case float64: + return x != 0, nil + case bool: + return x, nil + case string: + b, err := strconv.ParseBool(x) + if err != nil { + return false, ErrParamInvalid{errors.Wrapf(err, "couldn't parse key %q (%v) as bool", key, value)} + } + return b, nil + } + return false, ErrParamInvalid{errors.Errorf("expecting bool value for key %q (was %T)", key, value)} +} + +// GetStruct gets a struct from key from the input into the struct +// pointed to by out. out must be a pointer type. +// +// If the parameter isn't found then error will be of type +// ErrParamNotFound and out will be unchanged. +func (p Params) GetStruct(key string, out interface{}) error { + value, err := p.Get(key) + if err != nil { + return err + } + err = Reshape(out, value) + if err != nil { + if valueStr, ok := value.(string); ok { + // try to unmarshal as JSON if string + err = json.Unmarshal([]byte(valueStr), out) + if err == nil { + return nil + } + } + return ErrParamInvalid{errors.Wrapf(err, "key %q", key)} + } + return nil +} + +// GetStructMissingOK works like GetStruct but doesn't return an error +// if the key is missing +func (p Params) GetStructMissingOK(key string, out interface{}) error { + _, ok := p[key] + if !ok { + return nil + } + return p.GetStruct(key, out) +} + +// GetDuration get the duration parameters from in +func (p Params) GetDuration(key string) (time.Duration, error) { + s, err := p.GetString(key) + if err != nil { + return 0, err + } + duration, err := fs.ParseDuration(s) + if err != nil { + return 0, ErrParamInvalid{errors.Wrap(err, "parse duration")} + } + return duration, nil +} diff --git a/vendor/github.com/rclone/rclone/fs/rc/rc.go b/vendor/github.com/rclone/rclone/fs/rc/rc.go new file mode 100644 index 00000000000..57d9f18dac0 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/rc/rc.go @@ -0,0 +1,54 @@ +// Package rc implements a remote control server and registry for rclone +// +// To register your internal calls, call rc.Add(path, function). Your +// function should take ane return a Param. It can also return an +// error. Use rc.NewError to wrap an existing error along with an +// http response type if another response other than 500 internal +// error is required on error. +package rc + +import ( + "encoding/json" + "io" + _ "net/http/pprof" // install the pprof http handlers + "time" + + "github.com/rclone/rclone/cmd/serve/httplib" +) + +// Options contains options for the remote control server +type Options struct { + HTTPOptions httplib.Options + Enabled bool // set to enable the server + Serve bool // set to serve files from remotes + Files string // set to enable serving files locally + NoAuth bool // set to disable auth checks on AuthRequired methods + WebUI bool // set to launch the web ui + WebGUIUpdate bool // set to check new update + WebGUIForceUpdate bool // set to force download new update + WebGUINoOpenBrowser bool // set to disable auto opening browser + WebGUIFetchURL string // set the default url for fetching webgui + AccessControlAllowOrigin string // set the access control for CORS configuration + EnableMetrics bool // set to disable prometheus metrics on /metrics + JobExpireDuration time.Duration + JobExpireInterval time.Duration +} + +// DefaultOpt is the default values used for Options +var DefaultOpt = Options{ + HTTPOptions: httplib.DefaultOpt, + Enabled: false, + JobExpireDuration: 60 * time.Second, + JobExpireInterval: 10 * time.Second, +} + +func init() { + DefaultOpt.HTTPOptions.ListenAddr = "localhost:5572" +} + +// WriteJSON writes JSON in out to w +func WriteJSON(w io.Writer, out Params) error { + enc := json.NewEncoder(w) + enc.SetIndent("", "\t") + return enc.Encode(out) +} diff --git a/vendor/github.com/rclone/rclone/fs/rc/registry.go b/vendor/github.com/rclone/rclone/fs/rc/registry.go new file mode 100644 index 00000000000..4cc80541ac4 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/rc/registry.go @@ -0,0 +1,80 @@ +// Define the registry + +package rc + +import ( + "context" + "sort" + "strings" + "sync" + + "github.com/rclone/rclone/fs" +) + +// Func defines a type for a remote control function +type Func func(ctx context.Context, in Params) (out Params, err error) + +// Call defines info about a remote control function and is used in +// the Add function to create new entry points. +type Call struct { + Path string // path to activate this RC + Fn Func `json:"-"` // function to call + Title string // help for the function + AuthRequired bool // if set then this call requires authorisation to be set + Help string // multi-line markdown formatted help + NeedsRequest bool // if set then this call will be passed the original request object as _request + NeedsResponse bool // if set then this call will be passed the original response object as _response +} + +// Registry holds the list of all the registered remote control functions +type Registry struct { + mu sync.RWMutex + call map[string]*Call +} + +// NewRegistry makes a new registry for remote control functions +func NewRegistry() *Registry { + return &Registry{ + call: make(map[string]*Call), + } +} + +// Add a call to the registry +func (r *Registry) Add(call Call) { + r.mu.Lock() + defer r.mu.Unlock() + call.Path = strings.Trim(call.Path, "/") + call.Help = strings.TrimSpace(call.Help) + fs.Debugf(nil, "Adding path %q to remote control registry", call.Path) + r.call[call.Path] = &call +} + +// Get a Call from a path or nil +func (r *Registry) Get(path string) *Call { + r.mu.RLock() + defer r.mu.RUnlock() + return r.call[path] +} + +// List of all calls in alphabetical order +func (r *Registry) List() (out []*Call) { + r.mu.RLock() + defer r.mu.RUnlock() + var keys []string + for key := range r.call { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + out = append(out, r.call[key]) + } + return out +} + +// Calls is the global registry of Call objects +var Calls = NewRegistry() + +// Add a function to the global registry +func Add(call Call) { + Calls.Add(call) +} diff --git a/vendor/github.com/rclone/rclone/fs/sizesuffix.go b/vendor/github.com/rclone/rclone/fs/sizesuffix.go new file mode 100644 index 00000000000..c1d1b79c954 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/sizesuffix.go @@ -0,0 +1,145 @@ +package fs + +// SizeSuffix is parsed by flag with k/M/G suffixes +import ( + "fmt" + "math" + "sort" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +// SizeSuffix is an int64 with a friendly way of printing setting +type SizeSuffix int64 + +// Common multipliers for SizeSuffix +const ( + Byte SizeSuffix = 1 << (iota * 10) + KibiByte + MebiByte + GibiByte + TebiByte + PebiByte + ExbiByte +) + +// Turn SizeSuffix into a string and a suffix +func (x SizeSuffix) string() (string, string) { + scaled := float64(0) + suffix := "" + switch { + case x < 0: + return "off", "" + case x == 0: + return "0", "" + case x < 1<<10: + scaled = float64(x) + suffix = "" + case x < 1<<20: + scaled = float64(x) / (1 << 10) + suffix = "k" + case x < 1<<30: + scaled = float64(x) / (1 << 20) + suffix = "M" + case x < 1<<40: + scaled = float64(x) / (1 << 30) + suffix = "G" + case x < 1<<50: + scaled = float64(x) / (1 << 40) + suffix = "T" + default: + scaled = float64(x) / (1 << 50) + suffix = "P" + } + if math.Floor(scaled) == scaled { + return fmt.Sprintf("%.0f", scaled), suffix + } + return fmt.Sprintf("%.3f", scaled), suffix +} + +// String turns SizeSuffix into a string +func (x SizeSuffix) String() string { + val, suffix := x.string() + return val + suffix +} + +// Unit turns SizeSuffix into a string with a unit +func (x SizeSuffix) Unit(unit string) string { + val, suffix := x.string() + if val == "off" { + return val + } + return val + " " + suffix + unit +} + +// Set a SizeSuffix +func (x *SizeSuffix) Set(s string) error { + if len(s) == 0 { + return errors.New("empty string") + } + if strings.ToLower(s) == "off" { + *x = -1 + return nil + } + suffix := s[len(s)-1] + suffixLen := 1 + var multiplier float64 + switch suffix { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.': + suffixLen = 0 + multiplier = 1 << 10 + case 'b', 'B': + multiplier = 1 + case 'k', 'K': + multiplier = 1 << 10 + case 'm', 'M': + multiplier = 1 << 20 + case 'g', 'G': + multiplier = 1 << 30 + case 't', 'T': + multiplier = 1 << 40 + case 'p', 'P': + multiplier = 1 << 50 + default: + return errors.Errorf("bad suffix %q", suffix) + } + s = s[:len(s)-suffixLen] + value, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + if value < 0 { + return errors.Errorf("size can't be negative %q", s) + } + value *= multiplier + *x = SizeSuffix(value) + return nil +} + +// Type of the value +func (x *SizeSuffix) Type() string { + return "SizeSuffix" +} + +// Scan implements the fmt.Scanner interface +func (x *SizeSuffix) Scan(s fmt.ScanState, ch rune) error { + token, err := s.Token(true, nil) + if err != nil { + return err + } + return x.Set(string(token)) +} + +// SizeSuffixList is a slice SizeSuffix values +type SizeSuffixList []SizeSuffix + +func (l SizeSuffixList) Len() int { return len(l) } +func (l SizeSuffixList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l SizeSuffixList) Less(i, j int) bool { return l[i] < l[j] } + +// Sort sorts the list +func (l SizeSuffixList) Sort() { + sort.Sort(l) +} diff --git a/vendor/github.com/rclone/rclone/fs/sync/pipe.go b/vendor/github.com/rclone/rclone/fs/sync/pipe.go new file mode 100644 index 00000000000..d28527e7b6e --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/sync/pipe.go @@ -0,0 +1,235 @@ +package sync + +import ( + "context" + "math/bits" + "strconv" + "strings" + "sync" + + "github.com/aalpar/deheap" + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/fserrors" +) + +// compare two items for order by +type lessFn func(a, b fs.ObjectPair) bool + +// pipe provides an unbounded channel like experience +// +// Note unlike channels these aren't strictly ordered. +type pipe struct { + mu sync.Mutex + c chan struct{} + queue []fs.ObjectPair + closed bool + totalSize int64 + stats func(items int, totalSize int64) + less lessFn + fraction int +} + +func newPipe(orderBy string, stats func(items int, totalSize int64), maxBacklog int) (*pipe, error) { + if maxBacklog < 0 { + maxBacklog = (1 << (bits.UintSize - 1)) - 1 // largest positive int + } + less, fraction, err := newLess(orderBy) + if err != nil { + return nil, fserrors.FatalError(err) + } + p := &pipe{ + c: make(chan struct{}, maxBacklog), + stats: stats, + less: less, + fraction: fraction, + } + if p.less != nil { + deheap.Init(p) + } + return p, nil +} + +// Len satisfy heap.Interface - must be called with lock held +func (p *pipe) Len() int { + return len(p.queue) +} + +// Len satisfy heap.Interface - must be called with lock held +func (p *pipe) Less(i, j int) bool { + return p.less(p.queue[i], p.queue[j]) +} + +// Swap satisfy heap.Interface - must be called with lock held +func (p *pipe) Swap(i, j int) { + p.queue[i], p.queue[j] = p.queue[j], p.queue[i] +} + +// Push satisfy heap.Interface - must be called with lock held +func (p *pipe) Push(item interface{}) { + p.queue = append(p.queue, item.(fs.ObjectPair)) +} + +// Pop satisfy heap.Interface - must be called with lock held +func (p *pipe) Pop() interface{} { + old := p.queue + n := len(old) + item := old[n-1] + old[n-1] = fs.ObjectPair{} // avoid memory leak + p.queue = old[0 : n-1] + return item +} + +// Put a pair into the pipe +// +// It returns ok = false if the context was cancelled +// +// It will panic if you call it after Close() +func (p *pipe) Put(ctx context.Context, pair fs.ObjectPair) (ok bool) { + if ctx.Err() != nil { + return false + } + p.mu.Lock() + if p.less == nil { + // no order-by + p.queue = append(p.queue, pair) + } else { + deheap.Push(p, pair) + } + size := pair.Src.Size() + if size > 0 { + p.totalSize += size + } + p.stats(len(p.queue), p.totalSize) + p.mu.Unlock() + select { + case <-ctx.Done(): + return false + case p.c <- struct{}{}: + } + return true +} + +// Get a pair from the pipe +// +// If fraction is > the mixed fraction set in the pipe then it gets it +// from the other end of the heap if order-by is in effect +// +// It returns ok = false if the context was cancelled or Close() has +// been called. +func (p *pipe) GetMax(ctx context.Context, fraction int) (pair fs.ObjectPair, ok bool) { + if ctx.Err() != nil { + return + } + select { + case <-ctx.Done(): + return + case _, ok = <-p.c: + if !ok { + return + } + } + p.mu.Lock() + if p.less == nil { + // no order-by + pair = p.queue[0] + p.queue[0] = fs.ObjectPair{} // avoid memory leak + p.queue = p.queue[1:] + } else if p.fraction < 0 || fraction < p.fraction { + pair = deheap.Pop(p).(fs.ObjectPair) + } else { + pair = deheap.PopMax(p).(fs.ObjectPair) + } + size := pair.Src.Size() + if size > 0 { + p.totalSize -= size + } + if p.totalSize < 0 { + p.totalSize = 0 + } + p.stats(len(p.queue), p.totalSize) + p.mu.Unlock() + return pair, true +} + +// Get a pair from the pipe +// +// It returns ok = false if the context was cancelled or Close() has +// been called. +func (p *pipe) Get(ctx context.Context) (pair fs.ObjectPair, ok bool) { + return p.GetMax(ctx, -1) +} + +// Stats reads the number of items in the queue and the totalSize +func (p *pipe) Stats() (items int, totalSize int64) { + p.mu.Lock() + items, totalSize = len(p.queue), p.totalSize + p.mu.Unlock() + return items, totalSize +} + +// Close the pipe +// +// Writes to a closed pipe will panic as will double closing a pipe +func (p *pipe) Close() { + p.mu.Lock() + close(p.c) + p.closed = true + p.mu.Unlock() +} + +// newLess returns a less function for the heap comparison or nil if +// one is not required +func newLess(orderBy string) (less lessFn, fraction int, err error) { + fraction = -1 + if orderBy == "" { + return nil, fraction, nil + } + parts := strings.Split(strings.ToLower(orderBy), ",") + switch parts[0] { + case "name": + less = func(a, b fs.ObjectPair) bool { + return a.Src.Remote() < b.Src.Remote() + } + case "size": + less = func(a, b fs.ObjectPair) bool { + return a.Src.Size() < b.Src.Size() + } + case "modtime": + less = func(a, b fs.ObjectPair) bool { + ctx := context.Background() + return a.Src.ModTime(ctx).Before(b.Src.ModTime(ctx)) + } + default: + return nil, fraction, errors.Errorf("unknown --order-by comparison %q", parts[0]) + } + descending := false + if len(parts) > 1 { + switch parts[1] { + case "ascending", "asc": + case "descending", "desc": + descending = true + case "mixed": + fraction = 50 + if len(parts) > 2 { + fraction, err = strconv.Atoi(parts[2]) + if err != nil { + return nil, fraction, errors.Errorf("bad mixed fraction --order-by %q", parts[2]) + } + } + + default: + return nil, fraction, errors.Errorf("unknown --order-by sort direction %q", parts[1]) + } + } + if (fraction >= 0 && len(parts) > 3) || (fraction < 0 && len(parts) > 2) { + return nil, fraction, errors.Errorf("bad --order-by string %q", orderBy) + } + if descending { + oldLess := less + less = func(a, b fs.ObjectPair) bool { + return !oldLess(a, b) + } + } + return less, fraction, nil +} diff --git a/vendor/github.com/rclone/rclone/fs/sync/rc.go b/vendor/github.com/rclone/rclone/fs/sync/rc.go new file mode 100644 index 00000000000..b00d2adb1e2 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/sync/rc.go @@ -0,0 +1,61 @@ +package sync + +import ( + "context" + + "github.com/rclone/rclone/fs/rc" +) + +func init() { + for _, name := range []string{"sync", "copy", "move"} { + name := name + moveHelp := "" + if name == "move" { + moveHelp = "- deleteEmptySrcDirs - delete empty src directories if set\n" + } + rc.Add(rc.Call{ + Path: "sync/" + name, + AuthRequired: true, + Fn: func(ctx context.Context, in rc.Params) (rc.Params, error) { + return rcSyncCopyMove(ctx, in, name) + }, + Title: name + " a directory from source remote to destination remote", + Help: `This takes the following parameters + +- srcFs - a remote name string e.g. "drive:src" for the source +- dstFs - a remote name string e.g. "drive:dst" for the destination +` + moveHelp + ` + +See the [` + name + ` command](/commands/rclone_` + name + `/) command for more information on the above.`, + }) + } +} + +// Sync/Copy/Move a file +func rcSyncCopyMove(ctx context.Context, in rc.Params, name string) (out rc.Params, err error) { + srcFs, err := rc.GetFsNamed(ctx, in, "srcFs") + if err != nil { + return nil, err + } + dstFs, err := rc.GetFsNamed(ctx, in, "dstFs") + if err != nil { + return nil, err + } + createEmptySrcDirs, err := in.GetBool("createEmptySrcDirs") + if rc.NotErrParamNotFound(err) { + return nil, err + } + switch name { + case "sync": + return nil, Sync(ctx, dstFs, srcFs, createEmptySrcDirs) + case "copy": + return nil, CopyDir(ctx, dstFs, srcFs, createEmptySrcDirs) + case "move": + deleteEmptySrcDirs, err := in.GetBool("deleteEmptySrcDirs") + if rc.NotErrParamNotFound(err) { + return nil, err + } + return nil, MoveDir(ctx, dstFs, srcFs, deleteEmptySrcDirs, createEmptySrcDirs) + } + panic("unknown rcSyncCopyMove type") +} diff --git a/vendor/github.com/rclone/rclone/fs/sync/sync.go b/vendor/github.com/rclone/rclone/fs/sync/sync.go new file mode 100644 index 00000000000..a6238d57286 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/sync/sync.go @@ -0,0 +1,1182 @@ +// Package sync is the implementation of sync/copy/move +package sync + +import ( + "context" + "fmt" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/filter" + "github.com/rclone/rclone/fs/fserrors" + "github.com/rclone/rclone/fs/hash" + "github.com/rclone/rclone/fs/march" + "github.com/rclone/rclone/fs/operations" +) + +type syncCopyMove struct { + // parameters + fdst fs.Fs + fsrc fs.Fs + deleteMode fs.DeleteMode // how we are doing deletions + DoMove bool + copyEmptySrcDirs bool + deleteEmptySrcDirs bool + dstDir string + srcDir string + paths []string + // internal state + ci *fs.ConfigInfo // global config + fi *filter.Filter // filter config + ctx context.Context // internal context for controlling go-routines + cancel func() // cancel the context + inCtx context.Context // internal context for controlling march + inCancel func() // cancel the march context + noTraverse bool // if set don't traverse the dst + noCheckDest bool // if set transfer all objects regardless without checking dst + noUnicodeNormalization bool // don't normalize unicode characters in filenames + deletersWg sync.WaitGroup // for delete before go routine + deleteFilesCh chan fs.Object // channel to receive deletes if delete before + trackRenames bool // set if we should do server-side renames + trackRenamesStrategy trackRenamesStrategy // strategies used for tracking renames + dstFilesMu sync.Mutex // protect dstFiles + dstFiles map[string]fs.Object // dst files, always filled + srcFiles map[string]fs.Object // src files, only used if deleteBefore + srcFilesChan chan fs.Object // passes src objects + srcFilesResult chan error // error result of src listing + dstFilesResult chan error // error result of dst listing + dstEmptyDirsMu sync.Mutex // protect dstEmptyDirs + dstEmptyDirs map[string]fs.DirEntry // potentially empty directories + srcEmptyDirsMu sync.Mutex // protect srcEmptyDirs + srcEmptyDirs map[string]fs.DirEntry // potentially empty directories + checkerWg sync.WaitGroup // wait for checkers + toBeChecked *pipe // checkers channel + transfersWg sync.WaitGroup // wait for transfers + toBeUploaded *pipe // copiers channel + errorMu sync.Mutex // Mutex covering the errors variables + err error // normal error from copy process + noRetryErr error // error with NoRetry set + fatalErr error // fatal error + commonHash hash.Type // common hash type between src and dst + modifyWindow time.Duration // modify window between fsrc, fdst + renameMapMu sync.Mutex // mutex to protect the below + renameMap map[string][]fs.Object // dst files by hash - only used by trackRenames + renamerWg sync.WaitGroup // wait for renamers + toBeRenamed *pipe // renamers channel + trackRenamesWg sync.WaitGroup // wg for background track renames + trackRenamesCh chan fs.Object // objects are pumped in here + renameCheck []fs.Object // accumulate files to check for rename here + compareCopyDest fs.Fs // place to check for files to server-side copy + backupDir fs.Fs // place to store overwrites/deletes + checkFirst bool // if set run all the checkers before starting transfers + useSrcBaseName bool // use only source basename when coping or moving to destination directory +} + +type trackRenamesStrategy byte + +const ( + trackRenamesStrategyHash trackRenamesStrategy = 1 << iota + trackRenamesStrategyModtime + trackRenamesStrategyLeaf +) + +func (strategy trackRenamesStrategy) hash() bool { + return (strategy & trackRenamesStrategyHash) != 0 +} + +func (strategy trackRenamesStrategy) modTime() bool { + return (strategy & trackRenamesStrategyModtime) != 0 +} + +func (strategy trackRenamesStrategy) leaf() bool { + return (strategy & trackRenamesStrategyLeaf) != 0 +} + +func newSyncCopyMove(ctx context.Context, fdst fs.Fs, remoteDst string, fsrc fs.Fs, remoteSrc string, paths []string, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool, useSrcBaseName bool) (*syncCopyMove, error) { + if (deleteMode != fs.DeleteModeOff || DoMove) && operations.Overlapping(fdst, fsrc) { + return nil, fserrors.FatalError(fs.ErrorOverlapping) + } + ci := fs.GetConfig(ctx) + fi := filter.GetConfig(ctx) + s := &syncCopyMove{ + ci: ci, + fi: fi, + fdst: fdst, + dstDir: remoteDst, + fsrc: fsrc, + srcDir: remoteSrc, + paths: paths, + deleteMode: deleteMode, + DoMove: DoMove, + copyEmptySrcDirs: copyEmptySrcDirs, + deleteEmptySrcDirs: deleteEmptySrcDirs, + srcFilesChan: make(chan fs.Object, ci.Checkers+ci.Transfers), + srcFilesResult: make(chan error, 1), + dstFilesResult: make(chan error, 1), + dstEmptyDirs: make(map[string]fs.DirEntry), + srcEmptyDirs: make(map[string]fs.DirEntry), + noTraverse: ci.NoTraverse, + noCheckDest: ci.NoCheckDest, + noUnicodeNormalization: ci.NoUnicodeNormalization, + deleteFilesCh: make(chan fs.Object, ci.Checkers), + trackRenames: ci.TrackRenames, + commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(), + modifyWindow: fs.GetModifyWindow(ctx, fsrc, fdst), + trackRenamesCh: make(chan fs.Object, ci.Checkers), + checkFirst: ci.CheckFirst, + useSrcBaseName: useSrcBaseName, + } + backlog := ci.MaxBacklog + if s.checkFirst { + fs.Infof(s.fdst, "Running all checks before starting transfers") + backlog = -1 + } + var err error + s.toBeChecked, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetCheckQueue, backlog) + if err != nil { + return nil, err + } + s.toBeUploaded, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetTransferQueue, backlog) + if err != nil { + return nil, err + } + s.toBeRenamed, err = newPipe(ci.OrderBy, accounting.Stats(ctx).SetRenameQueue, backlog) + if err != nil { + return nil, err + } + // If a max session duration has been defined add a deadline to the context + if ci.MaxDuration > 0 { + endTime := time.Now().Add(ci.MaxDuration) + fs.Infof(s.fdst, "Transfer session deadline: %s", endTime.Format("2006/01/02 15:04:05")) + s.ctx, s.cancel = context.WithDeadline(ctx, endTime) + } else { + s.ctx, s.cancel = context.WithCancel(ctx) + } + // Input context - cancel this for graceful stop + s.inCtx, s.inCancel = context.WithCancel(s.ctx) + if s.noTraverse && s.deleteMode != fs.DeleteModeOff { + fs.Errorf(nil, "Ignoring --no-traverse with sync") + s.noTraverse = false + } + s.trackRenamesStrategy, err = parseTrackRenamesStrategy(ci.TrackRenamesStrategy) + if err != nil { + return nil, err + } + if s.noCheckDest { + if s.deleteMode != fs.DeleteModeOff { + return nil, errors.New("can't use --no-check-dest with sync: use copy instead") + } + if ci.Immutable { + return nil, errors.New("can't use --no-check-dest with --immutable") + } + if s.backupDir != nil { + return nil, errors.New("can't use --no-check-dest with --backup-dir") + } + } + if s.trackRenames { + // Don't track renames for remotes without server-side move support. + if !operations.CanServerSideMove(fdst) { + fs.Errorf(fdst, "Ignoring --track-renames as the destination does not support server-side move or copy") + s.trackRenames = false + } + if s.trackRenamesStrategy.hash() && s.commonHash == hash.None { + fs.Errorf(fdst, "Ignoring --track-renames as the source and destination do not have a common hash") + s.trackRenames = false + } + + if s.trackRenamesStrategy.modTime() && s.modifyWindow == fs.ModTimeNotSupported { + fs.Errorf(fdst, "Ignoring --track-renames as either the source or destination do not support modtime") + s.trackRenames = false + } + + if s.deleteMode == fs.DeleteModeOff { + fs.Errorf(fdst, "Ignoring --track-renames as it doesn't work with copy or move, only sync") + s.trackRenames = false + } + } + if s.trackRenames { + // track renames needs delete after + if s.deleteMode != fs.DeleteModeOff { + s.deleteMode = fs.DeleteModeAfter + } + if s.noTraverse { + fs.Errorf(nil, "Ignoring --no-traverse with --track-renames") + s.noTraverse = false + } + } + // Make Fs for --backup-dir if required + if ci.BackupDir != "" || ci.Suffix != "" { + var err error + s.backupDir, err = operations.BackupDir(ctx, fdst, fsrc, "") + if err != nil { + return nil, err + } + } + if ci.CompareDest != "" { + var err error + s.compareCopyDest, err = operations.GetCompareDest(ctx) + if err != nil { + return nil, err + } + } else if ci.CopyDest != "" { + var err error + s.compareCopyDest, err = operations.GetCopyDest(ctx, fdst) + if err != nil { + return nil, err + } + } + return s, nil +} + +// Check to see if the context has been cancelled +func (s *syncCopyMove) aborting() bool { + return s.ctx.Err() != nil +} + +// This reads the map and pumps it into the channel passed in, closing +// the channel at the end +func (s *syncCopyMove) pumpMapToChan(files map[string]fs.Object, out chan<- fs.Object) { +outer: + for _, o := range files { + if s.aborting() { + break outer + } + select { + case out <- o: + case <-s.ctx.Done(): + break outer + } + } + close(out) + s.srcFilesResult <- nil +} + +// This checks the types of errors returned while copying files +func (s *syncCopyMove) processError(err error) { + if err == nil { + return + } + if err == context.DeadlineExceeded { + err = fserrors.NoRetryError(err) + } else if err == accounting.ErrorMaxTransferLimitReachedGraceful { + if s.inCtx.Err() == nil { + fs.Logf(nil, "%v - stopping transfers", err) + // Cancel the march and stop the pipes + s.inCancel() + } + } + s.errorMu.Lock() + defer s.errorMu.Unlock() + switch { + case fserrors.IsFatalError(err): + if !s.aborting() { + fs.Errorf(nil, "Cancelling sync due to fatal error: %v", err) + s.cancel() + } + s.fatalErr = err + case fserrors.IsNoRetryError(err): + s.noRetryErr = err + default: + s.err = err + } +} + +// Returns the current error (if any) in the order of precedence +// +// fatalErr +// normal error +// noRetryErr +func (s *syncCopyMove) currentError() error { + s.errorMu.Lock() + defer s.errorMu.Unlock() + if s.fatalErr != nil { + return s.fatalErr + } + if s.err != nil { + return s.err + } + return s.noRetryErr +} + +// pairChecker reads Objects~s on in send to out if they need transferring. +// +// FIXME potentially doing lots of hashes at once +func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) { + defer wg.Done() + for { + pair, ok := in.GetMax(s.inCtx, fraction) + if !ok { + return + } + src := pair.Src + var err error + tr := accounting.Stats(s.ctx).NewCheckingTransfer(src) + // Check to see if can store this + ok = false + if src.Storable() { + NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, pair.Dst, pair.Src, s.compareCopyDest, s.backupDir) + if err != nil { + s.processError(err) + } + if !NoNeedTransfer && operations.NeedTransfer(s.ctx, pair.Dst, pair.Src) { + // If files are treated as immutable, fail if destination exists and does not match + if s.ci.Immutable && pair.Dst != nil { + err := fs.CountError(fserrors.NoRetryError(fs.ErrorImmutableModified)) + fs.Errorf(pair.Dst, "Source and destination exist but do not match: %v", err) + s.processError(err) + } else { + // If destination already exists, then we must move it into --backup-dir if required + if pair.Dst != nil && s.backupDir != nil { + err := operations.MoveBackupDir(s.ctx, s.backupDir, pair.Dst) + if err != nil { + s.processError(err) + } else { + // If successful zero out the dst as it is no longer there and copy the file + pair.Dst = nil + ok = out.Put(s.ctx, pair) + if !ok { + return + } + } + } else { + ok = out.Put(s.ctx, pair) + if !ok { + return + } + } + } + } else { + // If moving need to delete the files we don't need to copy + if s.DoMove { + // Delete src if no error on copy + if operations.SameObject(src, pair.Dst) { + fs.Logf(src, "Not removing source file as it is the same file as the destination") + } else { + s.processError(operations.DeleteFile(s.ctx, src)) + } + } + } + } + if !ok { + accounting.Stats(s.ctx).UpdateSkipped(tr.Snapshot().Size) + } + tr.Done(s.ctx, err) + } +} + +// pairRenamer reads Objects~s on in and attempts to rename them, +// otherwise it sends them out if they need transferring. +func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) { + defer wg.Done() + for { + pair, ok := in.GetMax(s.inCtx, fraction) + if !ok { + return + } + src := pair.Src + if !s.tryRename(src) { + // pass on if not renamed + ok = out.Put(s.ctx, pair) + if !ok { + return + } + } else { + accounting.Stats(s.ctx).UpdateSkipped(src.Size()) + } + } +} + +// pairCopyOrMove reads Objects on in and moves or copies them. +func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs, fraction int, wg *sync.WaitGroup) { + defer wg.Done() + var err error + for { + pair, ok := in.GetMax(s.inCtx, fraction) + if !ok { + return + } + src := pair.Src + name := s.dstObjectName(pair) + if s.DoMove { + _, err = operations.Move(ctx, fdst, pair.Dst, name, src) + } else { + _, err = operations.Copy(ctx, fdst, pair.Dst, name, src) + } + s.processError(err) + } +} + +// dstObjectName returns full name of the object as it should be on the +// destination after copy or move. +func (s *syncCopyMove) dstObjectName(pair fs.ObjectPair) string { + if !s.useSrcBaseName { + return pair.Src.Remote() + } + + // Example when coping: + // data:test_keyspace/snapshots/123/bla/somefile.file + // to + // s3:bucket/backup/long/path/to/123 + // this should return backup/long/path/to/123/bla/somefile.file + // So we get full path on the destination fs. + return path.Join(s.dstDir, strings.TrimPrefix(pair.Src.Remote(), s.srcDir)) +} + +// This starts the background checkers. +func (s *syncCopyMove) startCheckers() { + s.checkerWg.Add(s.ci.Checkers) + for i := 0; i < s.ci.Checkers; i++ { + fraction := (100 * i) / s.ci.Checkers + go s.pairChecker(s.toBeChecked, s.toBeUploaded, fraction, &s.checkerWg) + } +} + +// This stops the background checkers +func (s *syncCopyMove) stopCheckers() { + s.toBeChecked.Close() + fs.Debugf(s.fdst, "Waiting for checks to finish") + s.checkerWg.Wait() +} + +// This starts the background transfers +func (s *syncCopyMove) startTransfers() { + s.transfersWg.Add(s.ci.Transfers) + for i := 0; i < s.ci.Transfers; i++ { + fraction := (100 * i) / s.ci.Transfers + go s.pairCopyOrMove(s.ctx, s.toBeUploaded, s.fdst, fraction, &s.transfersWg) + } +} + +// This stops the background transfers +func (s *syncCopyMove) stopTransfers() { + s.toBeUploaded.Close() + fs.Debugf(s.fdst, "Waiting for transfers to finish") + s.transfersWg.Wait() +} + +// This starts the background renamers. +func (s *syncCopyMove) startRenamers() { + if !s.trackRenames { + return + } + s.renamerWg.Add(s.ci.Checkers) + for i := 0; i < s.ci.Checkers; i++ { + fraction := (100 * i) / s.ci.Checkers + go s.pairRenamer(s.toBeRenamed, s.toBeUploaded, fraction, &s.renamerWg) + } +} + +// This stops the background renamers +func (s *syncCopyMove) stopRenamers() { + if !s.trackRenames { + return + } + s.toBeRenamed.Close() + fs.Debugf(s.fdst, "Waiting for renames to finish") + s.renamerWg.Wait() +} + +// This starts the collection of possible renames +func (s *syncCopyMove) startTrackRenames() { + if !s.trackRenames { + return + } + s.trackRenamesWg.Add(1) + go func() { + defer s.trackRenamesWg.Done() + for o := range s.trackRenamesCh { + s.renameCheck = append(s.renameCheck, o) + } + }() +} + +// This stops the background rename collection +func (s *syncCopyMove) stopTrackRenames() { + if !s.trackRenames { + return + } + close(s.trackRenamesCh) + s.trackRenamesWg.Wait() +} + +// This starts the background deletion of files for --delete-during +func (s *syncCopyMove) startDeleters() { + if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly { + return + } + s.deletersWg.Add(1) + go func() { + defer s.deletersWg.Done() + err := operations.DeleteFilesWithBackupDir(s.ctx, s.deleteFilesCh, s.backupDir) + s.processError(err) + }() +} + +// This stops the background deleters +func (s *syncCopyMove) stopDeleters() { + if s.deleteMode != fs.DeleteModeDuring && s.deleteMode != fs.DeleteModeOnly { + return + } + close(s.deleteFilesCh) + s.deletersWg.Wait() +} + +// This deletes the files in the dstFiles map. If checkSrcMap is set +// then it checks to see if they exist first in srcFiles the source +// file map, otherwise it unconditionally deletes them. If +// checkSrcMap is clear then it assumes that the any source files that +// have been found have been removed from dstFiles already. +func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error { + if accounting.Stats(s.ctx).Errored() && !s.ci.IgnoreErrors { + fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting) + return fs.ErrorNotDeleting + } + + // Delete the spare files + toDelete := make(fs.ObjectsChan, s.ci.Transfers) + go func() { + outer: + for remote, o := range s.dstFiles { + if checkSrcMap { + _, exists := s.srcFiles[remote] + if exists { + continue + } + } + if s.aborting() { + break + } + select { + case <-s.ctx.Done(): + break outer + case toDelete <- o: + } + } + close(toDelete) + }() + return operations.DeleteFilesWithBackupDir(s.ctx, toDelete, s.backupDir) +} + +// This deletes the empty directories in the slice passed in. It +// ignores any errors deleting directories +func (s *syncCopyMove) deleteEmptyDirectories(ctx context.Context, f fs.Fs, entriesMap map[string]fs.DirEntry) error { + if len(entriesMap) == 0 { + return nil + } + if accounting.Stats(ctx).Errored() && !s.ci.IgnoreErrors { + fs.Errorf(f, "%v", fs.ErrorNotDeletingDirs) + return fs.ErrorNotDeletingDirs + } + + var entries fs.DirEntries + for _, entry := range entriesMap { + entries = append(entries, entry) + } + // Now delete the empty directories starting from the longest path + sort.Sort(entries) + var errorCount int + var okCount int + for i := len(entries) - 1; i >= 0; i-- { + entry := entries[i] + dir, ok := entry.(fs.Directory) + if ok { + // TryRmdir only deletes empty directories + err := operations.TryRmdir(ctx, f, dir.Remote()) + if err != nil { + fs.Debugf(fs.LogDirName(f, dir.Remote()), "Failed to Rmdir: %v", err) + errorCount++ + } else { + okCount++ + } + } else { + fs.Errorf(f, "Not a directory: %v", entry) + } + } + if errorCount > 0 { + fs.Debugf(f, "failed to delete %d directories", errorCount) + } + if okCount > 0 { + fs.Debugf(f, "deleted %d directories", okCount) + } + return nil +} + +// This copies the empty directories in the slice passed in and logs +// any errors copying the directories +func copyEmptyDirectories(ctx context.Context, f fs.Fs, entries map[string]fs.DirEntry) error { + if len(entries) == 0 { + return nil + } + + var okCount int + for _, entry := range entries { + dir, ok := entry.(fs.Directory) + if ok { + err := operations.Mkdir(ctx, f, dir.Remote()) + if err != nil { + fs.Errorf(fs.LogDirName(f, dir.Remote()), "Failed to Mkdir: %v", err) + } else { + okCount++ + } + } else { + fs.Errorf(f, "Not a directory: %v", entry) + } + } + + if accounting.Stats(ctx).Errored() { + fs.Debugf(f, "failed to copy %d directories", accounting.Stats(ctx).GetErrors()) + } + + if okCount > 0 { + fs.Debugf(f, "copied %d directories", okCount) + } + return nil +} + +func (s *syncCopyMove) srcParentDirCheck(entry fs.DirEntry) { + // If we are moving files then we don't want to remove directories with files in them + // from the srcEmptyDirs as we are about to move them making the directory empty. + if s.DoMove { + return + } + parentDir := path.Dir(entry.Remote()) + if parentDir == "." { + parentDir = "" + } + if _, ok := s.srcEmptyDirs[parentDir]; ok { + delete(s.srcEmptyDirs, parentDir) + } +} + +// parseTrackRenamesStrategy turns a config string into a trackRenamesStrategy +func parseTrackRenamesStrategy(strategies string) (strategy trackRenamesStrategy, err error) { + if len(strategies) == 0 { + return strategy, nil + } + for _, s := range strings.Split(strategies, ",") { + switch s { + case "hash": + strategy |= trackRenamesStrategyHash + case "modtime": + strategy |= trackRenamesStrategyModtime + case "leaf": + strategy |= trackRenamesStrategyLeaf + case "size": + // ignore + default: + return strategy, errors.Errorf("unknown track renames strategy %q", s) + } + } + return strategy, nil +} + +// renameID makes a string with the size and the other identifiers of the requested rename strategies +// +// it may return an empty string in which case no hash could be made +func (s *syncCopyMove) renameID(obj fs.Object, renamesStrategy trackRenamesStrategy, precision time.Duration) string { + var builder strings.Builder + + fmt.Fprintf(&builder, "%d", obj.Size()) + + if renamesStrategy.hash() { + var err error + hash, err := obj.Hash(s.ctx, s.commonHash) + + if err != nil { + fs.Debugf(obj, "Hash failed: %v", err) + return "" + } + if hash == "" { + return "" + } + + builder.WriteRune(',') + builder.WriteString(hash) + } + + // for renamesStrategy.modTime() we don't add to the hash but we check the times in + // popRenameMap + + if renamesStrategy.leaf() { + builder.WriteRune(',') + builder.WriteString(path.Base(obj.Remote())) + } + + return builder.String() +} + +// pushRenameMap adds the object with hash to the rename map +func (s *syncCopyMove) pushRenameMap(hash string, obj fs.Object) { + s.renameMapMu.Lock() + s.renameMap[hash] = append(s.renameMap[hash], obj) + s.renameMapMu.Unlock() +} + +// popRenameMap finds the object with hash and pop the first match from +// renameMap or returns nil if not found. +func (s *syncCopyMove) popRenameMap(hash string, src fs.Object) (dst fs.Object) { + s.renameMapMu.Lock() + defer s.renameMapMu.Unlock() + dsts, ok := s.renameMap[hash] + if ok && len(dsts) > 0 { + // Element to remove + i := 0 + + // If using track renames strategy modtime then we need to check the modtimes here + if s.trackRenamesStrategy.modTime() { + i = -1 + srcModTime := src.ModTime(s.ctx) + for j, dst := range dsts { + dstModTime := dst.ModTime(s.ctx) + dt := dstModTime.Sub(srcModTime) + if dt < s.modifyWindow && dt > -s.modifyWindow { + i = j + break + } + } + // If nothing matched then return nil + if i < 0 { + return nil + } + } + + // Remove the entry and return it + dst = dsts[i] + dsts = append(dsts[:i], dsts[i+1:]...) + if len(dsts) > 0 { + s.renameMap[hash] = dsts + } else { + delete(s.renameMap, hash) + } + } + return dst +} + +// makeRenameMap builds a map of the destination files by hash that +// match sizes in the slice of objects in s.renameCheck +func (s *syncCopyMove) makeRenameMap() { + fs.Infof(s.fdst, "Making map for --track-renames") + + // first make a map of possible sizes we need to check + possibleSizes := map[int64]struct{}{} + for _, obj := range s.renameCheck { + possibleSizes[obj.Size()] = struct{}{} + } + + // pump all the dstFiles into in + in := make(chan fs.Object, s.ci.Checkers) + go s.pumpMapToChan(s.dstFiles, in) + + // now make a map of size,hash for all dstFiles + s.renameMap = make(map[string][]fs.Object) + var wg sync.WaitGroup + wg.Add(s.ci.Transfers) + for i := 0; i < s.ci.Transfers; i++ { + go func() { + defer wg.Done() + for obj := range in { + // only create hash for dst fs.Object if its size could match + if _, found := possibleSizes[obj.Size()]; found { + tr := accounting.Stats(s.ctx).NewCheckingTransfer(obj) + hash := s.renameID(obj, s.trackRenamesStrategy, s.modifyWindow) + + if hash != "" { + s.pushRenameMap(hash, obj) + } + + tr.Done(s.ctx, nil) + } + } + }() + } + wg.Wait() + fs.Infof(s.fdst, "Finished making map for --track-renames") +} + +// tryRename renames an src object when doing track renames if +// possible, it returns true if the object was renamed. +func (s *syncCopyMove) tryRename(src fs.Object) bool { + // Calculate the hash of the src object + hash := s.renameID(src, s.trackRenamesStrategy, fs.GetModifyWindow(s.ctx, s.fsrc, s.fdst)) + + if hash == "" { + return false + } + + // Get a match on fdst + dst := s.popRenameMap(hash, src) + if dst == nil { + return false + } + + // Find dst object we are about to overwrite if it exists + dstOverwritten, _ := s.fdst.NewObject(s.ctx, src.Remote()) + + // Rename dst to have name src.Remote() + _, err := operations.Move(s.ctx, s.fdst, dstOverwritten, src.Remote(), dst) + if err != nil { + fs.Debugf(src, "Failed to rename to %q: %v", dst.Remote(), err) + return false + } + + // remove file from dstFiles if present + s.dstFilesMu.Lock() + delete(s.dstFiles, dst.Remote()) + s.dstFilesMu.Unlock() + + fs.Infof(src, "Renamed from %q", dst.Remote()) + return true +} + +// Syncs fsrc into fdst +// +// # If paths is provided then only those paths will be synced +// +// If Delete is true then it deletes any files in fdst that aren't in fsrc +// +// # If DoMove is true then files will be moved instead of copied +// +// dir is the start directory, "" for root +func (s *syncCopyMove) run() error { + if operations.Same(s.fdst, s.fsrc) { + fs.Errorf(s.fdst, "Nothing to do as source and destination are the same") + return nil + } + + // Start background checking and transferring pipeline + s.startCheckers() + s.startRenamers() + if !s.checkFirst { + s.startTransfers() + } + s.startDeleters() + s.dstFiles = make(map[string]fs.Object) + + s.startTrackRenames() + + // set up a march over fdst and fsrc + m := &march.March{ + Ctx: s.inCtx, + Fdst: s.fdst, + DstDir: s.dstDir, + Fsrc: s.fsrc, + SrcDir: s.srcDir, + Paths: s.paths, + NoTraverse: s.noTraverse, + Callback: s, + DstIncludeAll: s.fi.Opt.DeleteExcluded, + NoCheckDest: s.noCheckDest, + NoUnicodeNormalization: s.noUnicodeNormalization, + } + s.processError(m.Run(s.ctx)) + + s.stopTrackRenames() + if s.trackRenames { + // Build the map of the remaining dstFiles by hash + s.makeRenameMap() + // Attempt renames for all the files which don't have a matching dst + for _, src := range s.renameCheck { + ok := s.toBeRenamed.Put(s.ctx, fs.ObjectPair{Src: src, Dst: nil}) + if !ok { + break + } + } + } + + // Stop background checking and transferring pipeline + s.stopCheckers() + if s.checkFirst { + fs.Infof(s.fdst, "Checks finished, now starting transfers") + s.startTransfers() + } + s.stopRenamers() + s.stopTransfers() + s.stopDeleters() + + if s.copyEmptySrcDirs { + s.processError(copyEmptyDirectories(s.ctx, s.fdst, s.srcEmptyDirs)) + } + + // Delete files after + if s.deleteMode == fs.DeleteModeAfter { + if s.currentError() != nil && !s.ci.IgnoreErrors { + fs.Errorf(s.fdst, "%v", fs.ErrorNotDeleting) + } else { + s.processError(s.deleteFiles(false)) + } + } + + // Prune empty directories + if s.deleteMode != fs.DeleteModeOff { + if s.currentError() != nil && !s.ci.IgnoreErrors { + fs.Errorf(s.fdst, "%v", fs.ErrorNotDeletingDirs) + } else { + s.processError(s.deleteEmptyDirectories(s.ctx, s.fdst, s.dstEmptyDirs)) + } + } + + // Delete empty fsrc subdirectories + // if DoMove and --delete-empty-src-dirs flag is set + if s.DoMove && s.deleteEmptySrcDirs { + //delete empty subdirectories that were part of the move + s.processError(s.deleteEmptyDirectories(s.ctx, s.fsrc, s.srcEmptyDirs)) + } + + // Read the error out of the context if there is one + s.processError(s.ctx.Err()) + + // Print nothing to transfer message if there were no transfers and no errors + if s.deleteMode != fs.DeleteModeOnly && accounting.Stats(s.ctx).GetTransfers() == 0 && s.currentError() == nil { + fs.Infof(nil, "There was nothing to transfer") + } + + // cancel the context to free resources + s.cancel() + return s.currentError() +} + +// DstOnly have an object which is in the destination only +func (s *syncCopyMove) DstOnly(dst fs.DirEntry) (recurse bool) { + if s.deleteMode == fs.DeleteModeOff { + return false + } + switch x := dst.(type) { + case fs.Object: + switch s.deleteMode { + case fs.DeleteModeAfter: + // record object as needs deleting + s.dstFilesMu.Lock() + s.dstFiles[x.Remote()] = x + s.dstFilesMu.Unlock() + case fs.DeleteModeDuring, fs.DeleteModeOnly: + select { + case <-s.ctx.Done(): + return + case s.deleteFilesCh <- x: + } + default: + panic(fmt.Sprintf("unexpected delete mode %d", s.deleteMode)) + } + case fs.Directory: + // Do the same thing to the entire contents of the directory + // Record directory as it is potentially empty and needs deleting + if s.fdst.Features().CanHaveEmptyDirectories { + s.dstEmptyDirsMu.Lock() + s.dstEmptyDirs[dst.Remote()] = dst + s.dstEmptyDirsMu.Unlock() + } + return true + default: + panic("Bad object in DirEntries") + + } + return false +} + +// SrcOnly have an object which is in the source only +func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) { + if s.deleteMode == fs.DeleteModeOnly { + return false + } + switch x := src.(type) { + case fs.Object: + // If it's a copy operation, + // remove parent directory from srcEmptyDirs + // since it's not really empty + s.srcEmptyDirsMu.Lock() + s.srcParentDirCheck(src) + s.srcEmptyDirsMu.Unlock() + + if s.trackRenames { + // Save object to check for a rename later + select { + case <-s.ctx.Done(): + return + case s.trackRenamesCh <- x: + } + } else { + // Check CompareDest && CopyDest + NoNeedTransfer, err := operations.CompareOrCopyDest(s.ctx, s.fdst, nil, x, s.compareCopyDest, s.backupDir) + if err != nil { + s.processError(err) + } + if !NoNeedTransfer { + // No need to check since doesn't exist + ok := s.toBeUploaded.Put(s.ctx, fs.ObjectPair{Src: x, Dst: nil}) + if !ok { + return + } + } else { + accounting.Stats(s.ctx).UpdateSkipped(x.Size()) + } + } + case fs.Directory: + // Do the same thing to the entire contents of the directory + // Record the directory for deletion + s.srcEmptyDirsMu.Lock() + s.srcParentDirCheck(src) + s.srcEmptyDirs[src.Remote()] = src + s.srcEmptyDirsMu.Unlock() + return true + default: + panic("Bad object in DirEntries") + } + return false +} + +// Match is called when src and dst are present, so sync src to dst +func (s *syncCopyMove) Match(ctx context.Context, dst, src fs.DirEntry) (recurse bool) { + switch srcX := src.(type) { + case fs.Object: + s.srcEmptyDirsMu.Lock() + s.srcParentDirCheck(src) + s.srcEmptyDirsMu.Unlock() + + if s.deleteMode == fs.DeleteModeOnly { + return false + } + dstX, ok := dst.(fs.Object) + if ok { + ok = s.toBeChecked.Put(s.ctx, fs.ObjectPair{Src: srcX, Dst: dstX}) + if !ok { + return false + } + } else { + // FIXME src is file, dst is directory + err := errors.New("can't overwrite directory with file") + fs.Errorf(dst, "%v", err) + s.processError(err) + } + case fs.Directory: + // Do the same thing to the entire contents of the directory + _, ok := dst.(fs.Directory) + if ok { + // Only record matched (src & dst) empty dirs when performing move + if s.DoMove { + // Record the src directory for deletion + s.srcEmptyDirsMu.Lock() + s.srcParentDirCheck(src) + s.srcEmptyDirs[src.Remote()] = src + s.srcEmptyDirsMu.Unlock() + } + + return true + } + // FIXME src is dir, dst is file + err := errors.New("can't overwrite file with directory") + fs.Errorf(dst, "%v", err) + s.processError(err) + default: + panic("Bad object in DirEntries") + } + return false +} + +// Syncs fsrc into fdst +// +// If Delete is true then it deletes any files in fdst that aren't in fsrc +// +// If DoMove is true then files will be moved instead of copied +// +// dir is the start directory, "" for root + +func runSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { + ci := fs.GetConfig(ctx) + if deleteMode != fs.DeleteModeOff && DoMove { + return fserrors.FatalError(errors.New("can't delete and move at the same time")) + } + // Run an extra pass to delete only + if deleteMode == fs.DeleteModeBefore { + if ci.TrackRenames { + return fserrors.FatalError(errors.New("can't use --delete-before with --track-renames")) + } + // only delete stuff during in this pass + do, err := newSyncCopyMove(ctx, fdst, "", fsrc, "", nil, fs.DeleteModeOnly, false, deleteEmptySrcDirs, copyEmptySrcDirs, false) + if err != nil { + return err + } + err = do.run() + if err != nil { + return err + } + // Next pass does a copy only + deleteMode = fs.DeleteModeOff + } + do, err := newSyncCopyMove(ctx, fdst, "", fsrc, "", nil, deleteMode, DoMove, deleteEmptySrcDirs, copyEmptySrcDirs, false) + if err != nil { + return err + } + return do.run() +} + +// Sync fsrc into fdst +func Sync(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error { + ci := fs.GetConfig(ctx) + return runSyncCopyMove(ctx, fdst, fsrc, ci.DeleteMode, false, false, copyEmptySrcDirs) +} + +// CopyDir copies fsrc into fdst +func CopyDir(ctx context.Context, fdst, fsrc fs.Fs, copyEmptySrcDirs bool) error { + return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, false, false, copyEmptySrcDirs) +} + +// CopyDir2 copies files from fsrc/remoteSrc into fdst/remoteDst. +func CopyDir2(ctx context.Context, fdst fs.Fs, remoteDst string, fsrc fs.Fs, remoteSrc string, doMove bool) error { + do, err := newSyncCopyMove(ctx, fdst, remoteDst, fsrc, remoteSrc, nil, fs.DeleteModeOff, doMove, false, false, true) + if err != nil { + return err + } + return do.run() +} + +// CopyPaths copies provided list of paths from fsrc/remoteSrc/path into fdst/remoteDst/path. +func CopyPaths(ctx context.Context, fdst fs.Fs, remoteDst string, fsrc fs.Fs, remoteSrc string, paths []string, copyEmptySrcDirs bool) error { + do, err := newSyncCopyMove(ctx, fdst, remoteDst, fsrc, remoteSrc, paths, fs.DeleteModeOff, false, false, copyEmptySrcDirs, true) + if err != nil { + return err + } + return do.run() +} + +// moveDir moves fsrc into fdst +func moveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { + return runSyncCopyMove(ctx, fdst, fsrc, fs.DeleteModeOff, true, deleteEmptySrcDirs, copyEmptySrcDirs) +} + +// MoveDir moves fsrc into fdst +func MoveDir(ctx context.Context, fdst, fsrc fs.Fs, deleteEmptySrcDirs bool, copyEmptySrcDirs bool) error { + fi := filter.GetConfig(ctx) + if operations.Same(fdst, fsrc) { + fs.Errorf(fdst, "Nothing to do as source and destination are the same") + return nil + } + + // First attempt to use DirMover if exists, same Fs and no filters are active + if fdstDirMove := fdst.Features().DirMove; fdstDirMove != nil && operations.SameConfig(fsrc, fdst) && fi.InActive() { + if operations.SkipDestructive(ctx, fdst, "server-side directory move") { + return nil + } + fs.Debugf(fdst, "Using server-side directory move") + err := fdstDirMove(ctx, fsrc, "", "") + switch err { + case fs.ErrorCantDirMove, fs.ErrorDirExists: + fs.Infof(fdst, "Server side directory move failed - fallback to file moves: %v", err) + case nil: + fs.Infof(fdst, "Server side directory move succeeded") + return nil + default: + err = fs.CountError(err) + fs.Errorf(fdst, "Server side directory move failed: %v", err) + return err + } + } + + // Otherwise move the files one by one + return moveDir(ctx, fdst, fsrc, deleteEmptySrcDirs, copyEmptySrcDirs) +} diff --git a/vendor/github.com/rclone/rclone/fs/version.go b/vendor/github.com/rclone/rclone/fs/version.go new file mode 100644 index 00000000000..ff8c7f68e29 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/version.go @@ -0,0 +1,4 @@ +package fs + +// Version of rclone +var Version = "v1.54.0-DEV" diff --git a/vendor/github.com/rclone/rclone/fs/versioncheck.go b/vendor/github.com/rclone/rclone/fs/versioncheck.go new file mode 100644 index 00000000000..6116dd99c93 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/versioncheck.go @@ -0,0 +1,7 @@ +//+build !go1.12 + +package fs + +// Upgrade to Go version 1.12 to compile rclone - latest stable go +// compiler recommended. +func init() { Go_version_1_12_required_for_compilation() } diff --git a/vendor/github.com/rclone/rclone/fs/walk/walk.go b/vendor/github.com/rclone/rclone/fs/walk/walk.go new file mode 100644 index 00000000000..b731fcf5d77 --- /dev/null +++ b/vendor/github.com/rclone/rclone/fs/walk/walk.go @@ -0,0 +1,662 @@ +// Package walk walks directories +package walk + +import ( + "context" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/dirtree" + "github.com/rclone/rclone/fs/filter" + "github.com/rclone/rclone/fs/list" +) + +// ErrorSkipDir is used as a return value from Walk to indicate that the +// directory named in the call is to be skipped. It is not returned as +// an error by any function. +var ErrorSkipDir = errors.New("skip this directory") + +// ErrorCantListR is returned by WalkR if the underlying Fs isn't +// capable of doing a recursive listing. +var ErrorCantListR = errors.New("recursive directory listing not available") + +// Func is the type of the function called for directory +// visited by Walk. The path argument contains remote path to the directory. +// +// If there was a problem walking to directory named by path, the +// incoming error will describe the problem and the function can +// decide how to handle that error (and Walk will not descend into +// that directory). If an error is returned, processing stops. The +// sole exception is when the function returns the special value +// ErrorSkipDir. If the function returns ErrorSkipDir, Walk skips the +// directory's contents entirely. +type Func func(path string, entries fs.DirEntries, err error) error + +// Walk lists the directory. +// +// If includeAll is not set it will use the filters defined. +// +// If maxLevel is < 0 then it will recurse indefinitely, else it will +// only do maxLevel levels. +// +// It calls fn for each tranche of DirEntries read. +// +// Note that fn will not be called concurrently whereas the directory +// listing will proceed concurrently. +// +// Parent directories are always listed before their children +// +// This is implemented by WalkR if Config.UseListR is true +// and f supports it and level > 1, or WalkN otherwise. +// +// If --files-from and --no-traverse is set then a DirTree will be +// constructed with just those files in and then walked with WalkR +// +// NB (f, path) to be replaced by fs.Dir at some point +func Walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { + ci := fs.GetConfig(ctx) + fi := filter.GetConfig(ctx) + if ci.NoTraverse && fi.HaveFilesFrom() { + return walkR(ctx, f, path, includeAll, maxLevel, fn, fi.MakeListR(ctx, f.NewObject)) + } + // FIXME should this just be maxLevel < 0 - why the maxLevel > 1 + if (maxLevel < 0 || maxLevel > 1) && ci.UseListR && f.Features().ListR != nil { + return walkListR(ctx, f, path, includeAll, maxLevel, fn) + } + return walkListDirSorted(ctx, f, path, includeAll, maxLevel, fn) +} + +// ListType is uses to choose which combination of files or directories is requires +type ListType byte + +// Types of listing for ListR +const ( + ListObjects ListType = 1 << iota // list objects only + ListDirs // list dirs only + ListAll = ListObjects | ListDirs // list files and dirs +) + +// Objects returns true if the list type specifies objects +func (l ListType) Objects() bool { + return (l & ListObjects) != 0 +} + +// Dirs returns true if the list type specifies dirs +func (l ListType) Dirs() bool { + return (l & ListDirs) != 0 +} + +// Filter in (inplace) to only contain the type of list entry required +func (l ListType) Filter(in *fs.DirEntries) { + if l == ListAll { + return + } + out := (*in)[:0] + for _, entry := range *in { + switch entry.(type) { + case fs.Object: + if l.Objects() { + out = append(out, entry) + } + case fs.Directory: + if l.Dirs() { + out = append(out, entry) + } + default: + fs.Errorf(nil, "Unknown object type %T", entry) + } + } + *in = out +} + +// ListR lists the directory recursively. +// +// If includeAll is not set it will use the filters defined. +// +// If maxLevel is < 0 then it will recurse indefinitely, else it will +// only do maxLevel levels. +// +// If synthesizeDirs is set then for bucket based remotes it will +// synthesize directories from the file structure. This uses extra +// memory so don't set this if you don't need directories, likewise do +// set this if you are interested in directories. +// +// It calls fn for each tranche of DirEntries read. Note that these +// don't necessarily represent a directory +// +// Note that fn will not be called concurrently whereas the directory +// listing will proceed concurrently. +// +// Directories are not listed in any particular order so you can't +// rely on parents coming before children or alphabetical ordering +// +// This is implemented by using ListR on the backend if possible and +// efficient, otherwise by Walk. +// +// NB (f, path) to be replaced by fs.Dir at some point +func ListR(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error { + fi := filter.GetConfig(ctx) + // FIXME disable this with --no-fast-list ??? `--disable ListR` will do it... + doListR := f.Features().ListR + + // Can't use ListR if... + if doListR == nil || // ...no ListR + fi.HaveFilesFrom() || // ...using --files-from + maxLevel >= 0 || // ...using bounded recursion + len(fi.Opt.ExcludeFile) > 0 || // ...using --exclude-file + fi.UsesDirectoryFilters() { // ...using any directory filters + return listRwalk(ctx, f, path, includeAll, maxLevel, listType, fn) + } + return listR(ctx, f, path, includeAll, listType, fn, doListR, listType.Dirs() && f.Features().BucketBased) +} + +// listRwalk walks the file tree for ListR using Walk +func listRwalk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, listType ListType, fn fs.ListRCallback) error { + var listErr error + walkErr := Walk(ctx, f, path, includeAll, maxLevel, func(path string, entries fs.DirEntries, err error) error { + // Carry on listing but return the error at the end + if err != nil { + listErr = err + err = fs.CountError(err) + fs.Errorf(path, "error listing: %v", err) + return nil + } + listType.Filter(&entries) + return fn(entries) + }) + if listErr != nil { + return listErr + } + return walkErr +} + +// dirMap keeps track of directories made for bucket based remotes. +// true => directory has been sent +// false => directory has been seen but not sent +type dirMap struct { + mu sync.Mutex + m map[string]bool + root string +} + +// make a new dirMap +func newDirMap(root string) *dirMap { + return &dirMap{ + m: make(map[string]bool), + root: root, + } +} + +// add adds a directory and parents with sent +func (dm *dirMap) add(dir string, sent bool) { + for { + if dir == dm.root || dir == "" { + return + } + currentSent, found := dm.m[dir] + if found { + // If it has been sent already then nothing more to do + if currentSent { + return + } + // If not sent already don't override + if !sent { + return + } + // currentSent == false && sent == true so needs overriding + } + dm.m[dir] = sent + // Add parents in as unsent + dir = parentDir(dir) + sent = false + } +} + +// parentDir finds the parent directory of path +func parentDir(entryPath string) string { + dirPath := path.Dir(entryPath) + if dirPath == "." { + dirPath = "" + } + return dirPath +} + +// add all the directories in entries and their parents to the dirMap +func (dm *dirMap) addEntries(entries fs.DirEntries) error { + dm.mu.Lock() + defer dm.mu.Unlock() + for _, entry := range entries { + switch x := entry.(type) { + case fs.Object: + dm.add(parentDir(x.Remote()), false) + case fs.Directory: + dm.add(x.Remote(), true) + default: + return errors.Errorf("unknown object type %T", entry) + } + } + return nil +} + +// send any missing parents to fn +func (dm *dirMap) sendEntries(fn fs.ListRCallback) (err error) { + // Count the strings first so we allocate the minimum memory + n := 0 + for _, sent := range dm.m { + if !sent { + n++ + } + } + if n == 0 { + return nil + } + dirs := make([]string, 0, n) + // Fill the dirs up then sort it + for dir, sent := range dm.m { + if !sent { + dirs = append(dirs, dir) + } + } + sort.Strings(dirs) + // Now convert to bulkier Dir in batches and send + now := time.Now() + list := NewListRHelper(fn) + for _, dir := range dirs { + err = list.Add(fs.NewDir(dir, now)) + if err != nil { + return err + } + } + return list.Flush() +} + +// listR walks the file tree using ListR +func listR(ctx context.Context, f fs.Fs, path string, includeAll bool, listType ListType, fn fs.ListRCallback, doListR fs.ListRFn, synthesizeDirs bool) error { + fi := filter.GetConfig(ctx) + includeDirectory := fi.IncludeDirectory(ctx, f) + if !includeAll { + includeAll = fi.InActive() + } + var dm *dirMap + if synthesizeDirs { + dm = newDirMap(path) + } + var mu sync.Mutex + err := doListR(ctx, path, func(entries fs.DirEntries) (err error) { + if synthesizeDirs { + err = dm.addEntries(entries) + if err != nil { + return err + } + } + listType.Filter(&entries) + if !includeAll { + filteredEntries := entries[:0] + for _, entry := range entries { + var include bool + switch x := entry.(type) { + case fs.Object: + include = fi.IncludeObject(ctx, x) + case fs.Directory: + include, err = includeDirectory(x.Remote()) + if err != nil { + return err + } + default: + return errors.Errorf("unknown object type %T", entry) + } + if include { + filteredEntries = append(filteredEntries, entry) + } else { + fs.Debugf(entry, "Excluded from sync (and deletion)") + } + } + entries = filteredEntries + } + mu.Lock() + defer mu.Unlock() + return fn(entries) + }) + if err != nil { + return err + } + if synthesizeDirs { + err = dm.sendEntries(fn) + if err != nil { + return err + } + } + return nil +} + +// walkListDirSorted lists the directory. +// +// It implements Walk using non recursive directory listing. +func walkListDirSorted(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { + return walk(ctx, f, path, includeAll, maxLevel, fn, list.DirSorted) +} + +// walkListR lists the directory. +// +// It implements Walk using recursive directory listing if +// available, or returns ErrorCantListR if not. +func walkListR(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func) error { + listR := f.Features().ListR + if listR == nil { + return ErrorCantListR + } + return walkR(ctx, f, path, includeAll, maxLevel, fn, listR) +} + +type listDirFunc func(ctx context.Context, fs fs.Fs, includeAll bool, dir string) (entries fs.DirEntries, err error) + +func walk(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listDir listDirFunc) error { + var ( + wg sync.WaitGroup // sync closing of go routines + traversing sync.WaitGroup // running directory traversals + doClose sync.Once // close the channel once + mu sync.Mutex // stop fn being called concurrently + ci = fs.GetConfig(ctx) // current config + ) + // listJob describe a directory listing that needs to be done + type listJob struct { + remote string + depth int + } + + in := make(chan listJob, ci.Checkers) + errs := make(chan error, 1) + quit := make(chan struct{}) + closeQuit := func() { + doClose.Do(func() { + close(quit) + go func() { + for range in { + traversing.Done() + } + }() + }) + } + for i := 0; i < ci.Checkers; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case job, ok := <-in: + if !ok { + return + } + entries, err := listDir(ctx, f, includeAll, job.remote) + var jobs []listJob + if err == nil && job.depth != 0 { + entries.ForDir(func(dir fs.Directory) { + // Recurse for the directory + jobs = append(jobs, listJob{ + remote: dir.Remote(), + depth: job.depth - 1, + }) + }) + } + mu.Lock() + err = fn(job.remote, entries, err) + mu.Unlock() + // NB once we have passed entries to fn we mustn't touch it again + if err != nil && err != ErrorSkipDir { + traversing.Done() + err = fs.CountError(err) + fs.Errorf(job.remote, "error listing: %v", err) + closeQuit() + // Send error to error channel if space + select { + case errs <- err: + default: + } + continue + } + if err == nil && len(jobs) > 0 { + traversing.Add(len(jobs)) + go func() { + // Now we have traversed this directory, send these + // jobs off for traversal in the background + for _, newJob := range jobs { + in <- newJob + } + }() + } + traversing.Done() + case <-quit: + return + } + } + }() + } + // Start the process + traversing.Add(1) + in <- listJob{ + remote: path, + depth: maxLevel - 1, + } + traversing.Wait() + close(in) + wg.Wait() + close(errs) + // return the first error returned or nil + return <-errs +} + +func walkRDirTree(ctx context.Context, f fs.Fs, startPath string, includeAll bool, maxLevel int, listR fs.ListRFn) (dirtree.DirTree, error) { + fi := filter.GetConfig(ctx) + dirs := dirtree.New() + // Entries can come in arbitrary order. We use toPrune to keep + // all directories to exclude later. + toPrune := make(map[string]bool) + includeDirectory := fi.IncludeDirectory(ctx, f) + var mu sync.Mutex + err := listR(ctx, startPath, func(entries fs.DirEntries) error { + mu.Lock() + defer mu.Unlock() + for _, entry := range entries { + slashes := strings.Count(entry.Remote(), "/") + switch x := entry.(type) { + case fs.Object: + // Make sure we don't delete excluded files if not required + if includeAll || fi.IncludeObject(ctx, x) { + if maxLevel < 0 || slashes <= maxLevel-1 { + dirs.Add(x) + } else { + // Make sure we include any parent directories of excluded objects + dirPath := x.Remote() + for ; slashes > maxLevel-1; slashes-- { + dirPath = parentDir(dirPath) + } + dirs.CheckParent(startPath, dirPath) + } + } else { + fs.Debugf(x, "Excluded from sync (and deletion)") + } + // Check if we need to prune a directory later. + if !includeAll && len(fi.Opt.ExcludeFile) > 0 { + basename := path.Base(x.Remote()) + if basename == fi.Opt.ExcludeFile { + excludeDir := parentDir(x.Remote()) + toPrune[excludeDir] = true + fs.Debugf(basename, "Excluded from sync (and deletion) based on exclude file") + } + } + case fs.Directory: + inc, err := includeDirectory(x.Remote()) + if err != nil { + return err + } + if includeAll || inc { + if maxLevel < 0 || slashes <= maxLevel-1 { + if slashes == maxLevel-1 { + // Just add the object if at maxLevel + dirs.Add(x) + } else { + dirs.AddDir(x) + } + } + } else { + fs.Debugf(x, "Excluded from sync (and deletion)") + } + default: + return errors.Errorf("unknown object type %T", entry) + } + } + return nil + }) + if err != nil { + return nil, err + } + dirs.CheckParents(startPath) + if len(dirs) == 0 { + dirs[startPath] = nil + } + err = dirs.Prune(toPrune) + if err != nil { + return nil, err + } + dirs.Sort() + return dirs, nil +} + +// Create a DirTree using List +func walkNDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, listDir listDirFunc) (dirtree.DirTree, error) { + dirs := make(dirtree.DirTree) + fn := func(dirPath string, entries fs.DirEntries, err error) error { + if err == nil { + dirs[dirPath] = entries + } + return err + } + err := walk(ctx, f, path, includeAll, maxLevel, fn, listDir) + if err != nil { + return nil, err + } + return dirs, nil +} + +// NewDirTree returns a DirTree filled with the directory listing +// using the parameters supplied. +// +// If includeAll is not set it will use the filters defined. +// +// If maxLevel is < 0 then it will recurse indefinitely, else it will +// only do maxLevel levels. +// +// This is implemented by WalkR if f supports ListR and level > 1, or +// WalkN otherwise. +// +// If --files-from and --no-traverse is set then a DirTree will be +// constructed with just those files in. +// +// NB (f, path) to be replaced by fs.Dir at some point +func NewDirTree(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int) (dirtree.DirTree, error) { + ci := fs.GetConfig(ctx) + fi := filter.GetConfig(ctx) + // if --no-traverse and --files-from build DirTree just from files + if ci.NoTraverse && fi.HaveFilesFrom() { + return walkRDirTree(ctx, f, path, includeAll, maxLevel, fi.MakeListR(ctx, f.NewObject)) + } + // if have ListR; and recursing; and not using --files-from; then build a DirTree with ListR + if ListR := f.Features().ListR; (maxLevel < 0 || maxLevel > 1) && ListR != nil && !fi.HaveFilesFrom() { + return walkRDirTree(ctx, f, path, includeAll, maxLevel, ListR) + } + // otherwise just use List + return walkNDirTree(ctx, f, path, includeAll, maxLevel, list.DirSorted) +} + +func walkR(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int, fn Func, listR fs.ListRFn) error { + dirs, err := walkRDirTree(ctx, f, path, includeAll, maxLevel, listR) + if err != nil { + return err + } + skipping := false + skipPrefix := "" + emptyDir := fs.DirEntries{} + for _, dirPath := range dirs.Dirs() { + if skipping { + // Skip over directories as required + if strings.HasPrefix(dirPath, skipPrefix) { + continue + } + skipping = false + } + entries := dirs[dirPath] + if entries == nil { + entries = emptyDir + } + err = fn(dirPath, entries, nil) + if err == ErrorSkipDir { + skipping = true + skipPrefix = dirPath + if skipPrefix != "" { + skipPrefix += "/" + } + } else if err != nil { + return err + } + } + return nil +} + +// GetAll runs ListR getting all the results +func GetAll(ctx context.Context, f fs.Fs, path string, includeAll bool, maxLevel int) (objs []fs.Object, dirs []fs.Directory, err error) { + err = ListR(ctx, f, path, includeAll, maxLevel, ListAll, func(entries fs.DirEntries) error { + for _, entry := range entries { + switch x := entry.(type) { + case fs.Object: + objs = append(objs, x) + case fs.Directory: + dirs = append(dirs, x) + } + } + return nil + }) + return +} + +// ListRHelper is used in the implementation of ListR to accumulate DirEntries +type ListRHelper struct { + callback fs.ListRCallback + entries fs.DirEntries +} + +// NewListRHelper should be called from ListR with the callback passed in +func NewListRHelper(callback fs.ListRCallback) *ListRHelper { + return &ListRHelper{ + callback: callback, + } +} + +// send sends the stored entries to the callback if there are >= max +// entries. +func (lh *ListRHelper) send(max int) (err error) { + if len(lh.entries) >= max { + err = lh.callback(lh.entries) + lh.entries = lh.entries[:0] + } + return err +} + +// Add an entry to the stored entries and send them if there are more +// than a certain amount +func (lh *ListRHelper) Add(entry fs.DirEntry) error { + if entry == nil { + return nil + } + lh.entries = append(lh.entries, entry) + return lh.send(100) +} + +// Flush the stored entries (if any) sending them to the callback +func (lh *ListRHelper) Flush() error { + return lh.send(1) +} diff --git a/vendor/github.com/rclone/rclone/lib/atexit/atexit.go b/vendor/github.com/rclone/rclone/lib/atexit/atexit.go new file mode 100644 index 00000000000..d0c3b08ad1e --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/atexit/atexit.go @@ -0,0 +1,107 @@ +// Package atexit provides handling for functions you want called when +// the program exits unexpectedly due to a signal. +// +// You should also make sure you call Run in the normal exit path. +package atexit + +import ( + "os" + "os/signal" + "sync" + "sync/atomic" + + "github.com/rclone/rclone/fs" +) + +var ( + fns = make(map[FnHandle]bool) + fnsMutex sync.Mutex + exitChan chan os.Signal + exitOnce sync.Once + registerOnce sync.Once + signalled int32 +) + +// FnHandle is the type of the handle returned by function `Register` +// that can be used to unregister an at-exit function +type FnHandle *func() + +// Register a function to be called on exit. +// Returns a handle which can be used to unregister the function with `Unregister`. +func Register(fn func()) FnHandle { + fnsMutex.Lock() + fns[&fn] = true + fnsMutex.Unlock() + + // Run AtExit handlers on exitSignals so everything gets tidied up properly + registerOnce.Do(func() { + exitChan = make(chan os.Signal, 1) + signal.Notify(exitChan, exitSignals...) + go func() { + sig := <-exitChan + if sig == nil { + return + } + atomic.StoreInt32(&signalled, 1) + fs.Infof(nil, "Signal received: %s", sig) + Run() + fs.Infof(nil, "Exiting...") + os.Exit(0) + }() + }) + + return &fn +} + +// Signalled returns true if an exit signal has been received +func Signalled() bool { + return atomic.LoadInt32(&signalled) != 0 +} + +// Unregister a function using the handle returned by `Register` +func Unregister(handle FnHandle) { + fnsMutex.Lock() + defer fnsMutex.Unlock() + delete(fns, handle) +} + +// IgnoreSignals disables the signal handler and prevents Run from being executed automatically +func IgnoreSignals() { + registerOnce.Do(func() {}) + if exitChan != nil { + signal.Stop(exitChan) + close(exitChan) + exitChan = nil + } +} + +// Run all the at exit functions if they haven't been run already +func Run() { + exitOnce.Do(func() { + fnsMutex.Lock() + defer fnsMutex.Unlock() + for fnHandle := range fns { + (*fnHandle)() + } + }) +} + +// OnError registers fn with atexit and returns a function which +// runs fn() if *perr != nil and deregisters fn +// +// It should be used in a defer statement normally so +// +// defer OnError(&err, cancelFunc)() +// +// So cancelFunc will be run if the function exits with an error or +// at exit. +func OnError(perr *error, fn func()) func() { + handle := Register(fn) + return func() { + defer Unregister(handle) + if *perr != nil { + fn() + } + } + +} diff --git a/vendor/github.com/rclone/rclone/lib/atexit/atexit_other.go b/vendor/github.com/rclone/rclone/lib/atexit/atexit_other.go new file mode 100644 index 00000000000..15faa7448b4 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/atexit/atexit_other.go @@ -0,0 +1,9 @@ +//+build windows plan9 + +package atexit + +import ( + "os" +) + +var exitSignals = []os.Signal{os.Interrupt} diff --git a/vendor/github.com/rclone/rclone/lib/atexit/atexit_unix.go b/vendor/github.com/rclone/rclone/lib/atexit/atexit_unix.go new file mode 100644 index 00000000000..acebfaf1c34 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/atexit/atexit_unix.go @@ -0,0 +1,10 @@ +//+build !windows,!plan9 + +package atexit + +import ( + "os" + "syscall" +) + +var exitSignals = []os.Signal{syscall.SIGINT, syscall.SIGTERM} // Not syscall.SIGQUIT as we want the default behaviour diff --git a/vendor/github.com/rclone/rclone/lib/bucket/bucket.go b/vendor/github.com/rclone/rclone/lib/bucket/bucket.go new file mode 100644 index 00000000000..a9c63b83dcf --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/bucket/bucket.go @@ -0,0 +1,166 @@ +// Package bucket is contains utilities for managing bucket based backends +package bucket + +import ( + "errors" + "strings" + "sync" +) + +var ( + // ErrAlreadyDeleted is returned when an already deleted + // bucket is passed to Remove + ErrAlreadyDeleted = errors.New("bucket already deleted") +) + +// Split takes an absolute path which includes the bucket and +// splits it into a bucket and a path in that bucket +// bucketPath +func Split(absPath string) (bucket, bucketPath string) { + // No bucket + if absPath == "" { + return "", "" + } + slash := strings.IndexRune(absPath, '/') + // Bucket but no path + if slash < 0 { + return absPath, "" + } + return absPath[:slash], absPath[slash+1:] +} + +// Cache stores whether buckets are available and their IDs +type Cache struct { + mu sync.Mutex // mutex to protect created and deleted + status map[string]bool // true if we have created the container, false if deleted + createMu sync.Mutex // mutex to protect against simultaneous Remove + removeMu sync.Mutex // mutex to protect against simultaneous Create +} + +// NewCache creates an empty Cache +func NewCache() *Cache { + return &Cache{ + status: make(map[string]bool, 1), + } +} + +// MarkOK marks the bucket as being present +func (c *Cache) MarkOK(bucket string) { + if bucket != "" { + c.mu.Lock() + c.status[bucket] = true + c.mu.Unlock() + } +} + +// MarkDeleted marks the bucket as being deleted +func (c *Cache) MarkDeleted(bucket string) { + if bucket != "" { + c.mu.Lock() + c.status[bucket] = false + c.mu.Unlock() + } +} + +type ( + // ExistsFn should be passed to Create to see if a bucket + // exists or not + ExistsFn func() (found bool, err error) + + // CreateFn should be passed to Create to make a bucket + CreateFn func() error +) + +// Create the bucket with create() if it doesn't exist +// +// If exists is set then if the bucket has been deleted it will call +// exists() to see if it still exists. +// +// If f returns an error we assume the bucket was not created +func (c *Cache) Create(bucket string, create CreateFn, exists ExistsFn) (err error) { + // if we are at the root, then it is OK + if bucket == "" { + return nil + } + + c.createMu.Lock() + defer c.createMu.Unlock() + c.mu.Lock() + defer c.mu.Unlock() + + // if have exists function and bucket has been deleted, check + // it still exists + if created, ok := c.status[bucket]; ok && !created && exists != nil { + found, err := exists() + if err == nil { + c.status[bucket] = found + } + if err != nil || found { + return err + } + } + + // If bucket already exists then it is OK + if created, ok := c.status[bucket]; ok && created { + return nil + } + + // Create the bucket + c.mu.Unlock() + err = create() + c.mu.Lock() + if err != nil { + return err + } + + // Mark OK if successful + c.status[bucket] = true + return nil +} + +// Remove the bucket with f if it exists +// +// If f returns an error we assume the bucket was not removed +// +// If the bucket has already been deleted it returns ErrAlreadyDeleted +func (c *Cache) Remove(bucket string, f func() error) error { + // if we are at the root, then it is OK + if bucket == "" { + return nil + } + + c.removeMu.Lock() + defer c.removeMu.Unlock() + c.mu.Lock() + defer c.mu.Unlock() + + // If bucket already deleted then it is OK + if created, ok := c.status[bucket]; ok && !created { + return ErrAlreadyDeleted + } + + // Remove the bucket + c.mu.Unlock() + err := f() + c.mu.Lock() + if err != nil { + return err + } + + // Mark removed if successful + c.status[bucket] = false + return err +} + +// IsDeleted returns true if the bucket has definitely been deleted by +// us, false otherwise. +func (c *Cache) IsDeleted(bucket string) bool { + c.mu.Lock() + created, ok := c.status[bucket] + c.mu.Unlock() + // if status unknown then return false + if !ok { + return false + } + return !created +} diff --git a/vendor/github.com/rclone/rclone/lib/cache/cache.go b/vendor/github.com/rclone/rclone/lib/cache/cache.go new file mode 100644 index 00000000000..590a14d7f22 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/cache/cache.go @@ -0,0 +1,185 @@ +// Package cache implements a simple cache where the entries are +// expired after a given time (5 minutes of disuse by default). +package cache + +import ( + "sync" + "time" +) + +// Cache holds values indexed by string, but expired after a given (5 +// minutes by default). +type Cache struct { + mu sync.Mutex + cache map[string]*cacheEntry + expireRunning bool + expireDuration time.Duration // expire the cache entry when it is older than this + expireInterval time.Duration // interval to run the cache expire +} + +// New creates a new cache with the default expire duration and interval +func New() *Cache { + return &Cache{ + cache: map[string]*cacheEntry{}, + expireRunning: false, + expireDuration: 300 * time.Second, + expireInterval: 60 * time.Second, + } +} + +// NewPermanentCache creates a new cache that never expires +func NewPermanentCache() *Cache { + return &Cache{ + cache: map[string]*cacheEntry{}, + expireRunning: true, + } +} + +// cacheEntry is stored in the cache +type cacheEntry struct { + value interface{} // cached item + err error // creation error + key string // key + lastUsed time.Time // time used for expiry + pinCount int // non zero if the entry should not be removed +} + +// CreateFunc is called to create new values. If the create function +// returns an error it will be cached if ok is true, otherwise the +// error will just be returned, allowing negative caching if required. +type CreateFunc func(key string) (value interface{}, ok bool, error error) + +// used marks an entry as accessed now and kicks the expire timer off +// should be called with the lock held +func (c *Cache) used(entry *cacheEntry) { + entry.lastUsed = time.Now() + if !c.expireRunning { + time.AfterFunc(c.expireInterval, c.cacheExpire) + c.expireRunning = true + } +} + +// Get gets a value named key either from the cache or creates it +// afresh with the create function. +func (c *Cache) Get(key string, create CreateFunc) (value interface{}, err error) { + c.mu.Lock() + entry, ok := c.cache[key] + if !ok { + c.mu.Unlock() // Unlock in case Get is called recursively + value, ok, err = create(key) + if err != nil && !ok { + return value, err + } + entry = &cacheEntry{ + value: value, + key: key, + err: err, + } + c.mu.Lock() + c.cache[key] = entry + } + defer c.mu.Unlock() + c.used(entry) + return entry.value, entry.err +} + +func (c *Cache) addPin(key string, count int) { + c.mu.Lock() + entry, ok := c.cache[key] + if ok { + entry.pinCount += count + c.used(entry) + } + c.mu.Unlock() +} + +// Pin a value in the cache if it exists +func (c *Cache) Pin(key string) { + c.addPin(key, 1) +} + +// Unpin a value in the cache if it exists +func (c *Cache) Unpin(key string) { + c.addPin(key, -1) +} + +// Put puts a value named key into the cache +func (c *Cache) Put(key string, value interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + entry := &cacheEntry{ + value: value, + key: key, + } + c.used(entry) + c.cache[key] = entry +} + +// GetMaybe returns the key and true if found, nil and false if not +func (c *Cache) GetMaybe(key string) (value interface{}, found bool) { + c.mu.Lock() + defer c.mu.Unlock() + entry, found := c.cache[key] + if !found { + return nil, found + } + c.used(entry) + return entry.value, found +} + +// Rename renames the item at oldKey to newKey. +// +// If there was an existing item at newKey then it takes precedence +// and is returned otherwise the item (if any) at oldKey is returned. +func (c *Cache) Rename(oldKey, newKey string) (value interface{}, found bool) { + c.mu.Lock() + if newEntry, newFound := c.cache[newKey]; newFound { + // If new entry is found use that + delete(c.cache, oldKey) + value, found = newEntry.value, newFound + c.used(newEntry) + } else if oldEntry, oldFound := c.cache[oldKey]; oldFound { + // If old entry is found rename it to new and use that + c.cache[newKey] = oldEntry + delete(c.cache, oldKey) + c.used(oldEntry) + value, found = oldEntry.value, oldFound + } + c.mu.Unlock() + return value, found +} + +// cacheExpire expires any entries that haven't been used recently +func (c *Cache) cacheExpire() { + c.mu.Lock() + defer c.mu.Unlock() + now := time.Now() + for key, entry := range c.cache { + if entry.pinCount <= 0 && now.Sub(entry.lastUsed) > c.expireDuration { + delete(c.cache, key) + } + } + if len(c.cache) != 0 { + time.AfterFunc(c.expireInterval, c.cacheExpire) + c.expireRunning = true + } else { + c.expireRunning = false + } +} + +// Clear removes everything from the cache +func (c *Cache) Clear() { + c.mu.Lock() + for k := range c.cache { + delete(c.cache, k) + } + c.mu.Unlock() +} + +// Entries returns the number of entries in the cache +func (c *Cache) Entries() int { + c.mu.Lock() + entries := len(c.cache) + c.mu.Unlock() + return entries +} diff --git a/vendor/github.com/rclone/rclone/lib/encoder/encoder.go b/vendor/github.com/rclone/rclone/lib/encoder/encoder.go new file mode 100644 index 00000000000..2724af73b3d --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/encoder/encoder.go @@ -0,0 +1,1159 @@ +/* +Translate file names for usage on restrictive storage systems + +The restricted set of characters are mapped to a unicode equivalent version +(most to their FULLWIDTH variant) to increase compatibility with other +storage systems. +See: http://unicode-search.net/unicode-namesearch.pl?term=FULLWIDTH + +Encoders will also quote reserved characters to differentiate between +the raw and encoded forms. +*/ + +package encoder + +import ( + "bytes" + "fmt" + "io" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +const ( + // adding this to any printable ASCII character turns it into the + // FULLWIDTH variant + fullOffset = 0xFEE0 + // the first rune of the SYMBOL FOR block for control characters + symbolOffset = '␀' // SYMBOL FOR NULL + // QuoteRune is the rune used for quoting reserved characters + QuoteRune = '‛' // SINGLE HIGH-REVERSED-9 QUOTATION MARK +) + +// NB keep the tests in fstests/fstests/fstests.go FsEncoding up to date with this +// NB keep the aliases up to date below also + +// Possible flags for the MultiEncoder +const ( + EncodeZero MultiEncoder = 0 // NUL(0x00) + EncodeSlash MultiEncoder = 1 << iota // / + EncodeLtGt // <> + EncodeDoubleQuote // " + EncodeSingleQuote // ' + EncodeBackQuote // ` + EncodeDollar // $ + EncodeColon // : + EncodeQuestion // ? + EncodeAsterisk // * + EncodePipe // | + EncodeHash // # + EncodePercent // % + EncodeBackSlash // \ + EncodeCrLf // CR(0x0D), LF(0x0A) + EncodeDel // DEL(0x7F) + EncodeCtl // CTRL(0x01-0x1F) + EncodeLeftSpace // Leading SPACE + EncodeLeftPeriod // Leading . + EncodeLeftTilde // Leading ~ + EncodeLeftCrLfHtVt // Leading CR LF HT VT + EncodeRightSpace // Trailing SPACE + EncodeRightPeriod // Trailing . + EncodeRightCrLfHtVt // Trailing CR LF HT VT + EncodeInvalidUtf8 // Invalid UTF-8 bytes + EncodeDot // . and .. names + + // Synthetic + EncodeWin = EncodeColon | EncodeQuestion | EncodeDoubleQuote | EncodeAsterisk | EncodeLtGt | EncodePipe // :?"*<>| + EncodeHashPercent = EncodeHash | EncodePercent // #% +) + +// Has returns true if flag is contained in mask +func (mask MultiEncoder) Has(flag MultiEncoder) bool { + return mask&flag != 0 +} + +// Encoder can transform names to and from the original and translated version. +type Encoder interface { + // Encode takes a raw name and substitutes any reserved characters and + // patterns in it + Encode(string) string + // Decode takes a name and undoes any substitutions made by Encode + Decode(string) string + + // FromStandardPath takes a / separated path in Standard encoding + // and converts it to a / separated path in this encoding. + FromStandardPath(string) string + // FromStandardName takes name in Standard encoding and converts + // it in this encoding. + FromStandardName(string) string + // ToStandardPath takes a / separated path in this encoding + // and converts it to a / separated path in Standard encoding. + ToStandardPath(string) string + // ToStandardName takes name in this encoding and converts + // it in Standard encoding. + ToStandardName(string) string +} + +// MultiEncoder is a configurable Encoder. The Encode* constants in this +// package can be combined using bitwise or (|) to enable handling of multiple +// character classes +type MultiEncoder uint + +// Aliases maps encodings to names and vice versa +var ( + encodingToName = map[MultiEncoder]string{} + nameToEncoding = map[string]MultiEncoder{} +) + +// alias adds an alias for MultiEncoder.String() and MultiEncoder.Set() +func alias(name string, mask MultiEncoder) { + nameToEncoding[name] = mask + // don't overwrite existing reverse translations + if _, ok := encodingToName[mask]; !ok { + encodingToName[mask] = name + } +} + +func init() { + alias("None", EncodeZero) + alias("Slash", EncodeSlash) + alias("LtGt", EncodeLtGt) + alias("DoubleQuote", EncodeDoubleQuote) + alias("SingleQuote", EncodeSingleQuote) + alias("BackQuote", EncodeBackQuote) + alias("Dollar", EncodeDollar) + alias("Colon", EncodeColon) + alias("Question", EncodeQuestion) + alias("Asterisk", EncodeAsterisk) + alias("Pipe", EncodePipe) + alias("Hash", EncodeHash) + alias("Percent", EncodePercent) + alias("BackSlash", EncodeBackSlash) + alias("CrLf", EncodeCrLf) + alias("Del", EncodeDel) + alias("Ctl", EncodeCtl) + alias("LeftSpace", EncodeLeftSpace) + alias("LeftPeriod", EncodeLeftPeriod) + alias("LeftTilde", EncodeLeftTilde) + alias("LeftCrLfHtVt", EncodeLeftCrLfHtVt) + alias("RightSpace", EncodeRightSpace) + alias("RightPeriod", EncodeRightPeriod) + alias("RightCrLfHtVt", EncodeRightCrLfHtVt) + alias("InvalidUtf8", EncodeInvalidUtf8) + alias("Dot", EncodeDot) +} + +// validStrings returns all the valid MultiEncoder strings +func validStrings() string { + var out []string + for k := range nameToEncoding { + out = append(out, k) + } + sort.Strings(out) + return strings.Join(out, ", ") +} + +// String converts the MultiEncoder into text +func (mask MultiEncoder) String() string { + // See if there is an exact translation - if so return that + if name, ok := encodingToName[mask]; ok { + return name + } + var out []string + // Otherwise decompose bit by bit + for bit := MultiEncoder(1); bit != 0; bit *= 2 { + if (mask & bit) != 0 { + if name, ok := encodingToName[bit]; ok { + out = append(out, name) + } else { + out = append(out, fmt.Sprintf("0x%X", uint(bit))) + } + } + } + return strings.Join(out, ",") +} + +// Set converts a string into a MultiEncoder +func (mask *MultiEncoder) Set(in string) error { + var out MultiEncoder + parts := strings.Split(in, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if bits, ok := nameToEncoding[part]; ok { + out |= bits + } else { + i, err := strconv.ParseInt(part, 0, 64) + if err != nil { + return fmt.Errorf("bad encoding %q: possible values are: %s", part, validStrings()) + } + out |= MultiEncoder(i) + } + } + *mask = out + return nil +} + +// Type returns a textual type of the MultiEncoder to satisfy the pflag.Value interface +func (mask MultiEncoder) Type() string { + return "Encoding" +} + +// Scan implements the fmt.Scanner interface +func (mask *MultiEncoder) Scan(s fmt.ScanState, ch rune) error { + token, err := s.Token(true, nil) + if err != nil { + return err + } + return mask.Set(string(token)) +} + +// Encode takes a raw name and substitutes any reserved characters and +// patterns in it +func (mask MultiEncoder) Encode(in string) string { + if in == "" { + return "" + } + + if mask.Has(EncodeDot) { + switch in { + case ".": + return "." + case "..": + return ".." + case ".": + return string(QuoteRune) + "." + case "..": + return string(QuoteRune) + "." + string(QuoteRune) + "." + } + } + + // handle prefix only replacements + prefix := "" + if mask.Has(EncodeLeftSpace) { // Leading SPACE + if in[0] == ' ' { + prefix, in = "␠", in[1:] // SYMBOL FOR SPACE + } else if r, l := utf8.DecodeRuneInString(in); r == '␠' { // SYMBOL FOR SPACE + prefix, in = string(QuoteRune)+"␠", in[l:] // SYMBOL FOR SPACE + } + } + if mask.Has(EncodeLeftPeriod) && prefix == "" { // Leading PERIOD + if in[0] == '.' { + prefix, in = ".", in[1:] // FULLWIDTH FULL STOP + } else if r, l := utf8.DecodeRuneInString(in); r == '.' { // FULLWIDTH FULL STOP + prefix, in = string(QuoteRune)+".", in[l:] // FULLWIDTH FULL STOP + } + } + if mask.Has(EncodeLeftTilde) && prefix == "" { // Leading ~ + if in[0] == '~' { + prefix, in = string('~'+fullOffset), in[1:] // FULLWIDTH TILDE + } else if r, l := utf8.DecodeRuneInString(in); r == '~'+fullOffset { + prefix, in = string(QuoteRune)+string('~'+fullOffset), in[l:] // FULLWIDTH TILDE + } + } + if mask.Has(EncodeLeftCrLfHtVt) && prefix == "" { // Leading CR LF HT VT + switch c := in[0]; c { + case '\t', '\n', '\v', '\r': + prefix, in = string('␀'+rune(c)), in[1:] // SYMBOL FOR NULL + default: + switch r, l := utf8.DecodeRuneInString(in); r { + case '␀' + '\t', '␀' + '\n', '␀' + '\v', '␀' + '\r': + prefix, in = string(QuoteRune)+string(r), in[l:] + } + } + } + // handle suffix only replacements + suffix := "" + if in != "" { + if mask.Has(EncodeRightSpace) { // Trailing SPACE + if in[len(in)-1] == ' ' { + suffix, in = "␠", in[:len(in)-1] // SYMBOL FOR SPACE + } else if r, l := utf8.DecodeLastRuneInString(in); r == '␠' { + suffix, in = string(QuoteRune)+"␠", in[:len(in)-l] // SYMBOL FOR SPACE + } + } + if mask.Has(EncodeRightPeriod) && suffix == "" { // Trailing . + if in[len(in)-1] == '.' { + suffix, in = ".", in[:len(in)-1] // FULLWIDTH FULL STOP + } else if r, l := utf8.DecodeLastRuneInString(in); r == '.' { + suffix, in = string(QuoteRune)+".", in[:len(in)-l] // FULLWIDTH FULL STOP + } + } + if mask.Has(EncodeRightCrLfHtVt) && suffix == "" { // Trailing . + switch c := in[len(in)-1]; c { + case '\t', '\n', '\v', '\r': + suffix, in = string('␀'+rune(c)), in[:len(in)-1] // FULLWIDTH FULL STOP + default: + switch r, l := utf8.DecodeLastRuneInString(in); r { + case '␀' + '\t', '␀' + '\n', '␀' + '\v', '␀' + '\r': + suffix, in = string(QuoteRune)+string(r), in[:len(in)-l] + } + } + } + } + + index := 0 + if prefix == "" && suffix == "" { + // find the first rune which (most likely) needs to be replaced + index = strings.IndexFunc(in, func(r rune) bool { + switch r { + case 0, '␀', QuoteRune, utf8.RuneError: + return true + } + if mask.Has(EncodeAsterisk) { // * + switch r { + case '*', + '*': + return true + } + } + if mask.Has(EncodeLtGt) { // <> + switch r { + case '<', '>', + '<', '>': + return true + } + } + if mask.Has(EncodeQuestion) { // ? + switch r { + case '?', + '?': + return true + } + } + if mask.Has(EncodeColon) { // : + switch r { + case ':', + ':': + return true + } + } + if mask.Has(EncodePipe) { // | + switch r { + case '|', + '|': + return true + } + } + if mask.Has(EncodeDoubleQuote) { // " + switch r { + case '"', + '"': + return true + } + } + if mask.Has(EncodeSingleQuote) { // ' + switch r { + case '\'', + ''': + return true + } + } + if mask.Has(EncodeBackQuote) { // ` + switch r { + case '`', + '`': + return true + } + } + if mask.Has(EncodeDollar) { // $ + switch r { + case '$', + '$': + return true + } + } + if mask.Has(EncodeSlash) { // / + switch r { + case '/', + '/': + return true + } + } + if mask.Has(EncodeBackSlash) { // \ + switch r { + case '\\', + '\': + return true + } + } + if mask.Has(EncodeCrLf) { // CR LF + switch r { + case rune(0x0D), rune(0x0A), + '␍', '␊': + return true + } + } + if mask.Has(EncodeHash) { // # + switch r { + case '#', + '#': + return true + } + } + if mask.Has(EncodePercent) { // % + switch r { + case '%', + '%': + return true + } + } + if mask.Has(EncodeDel) { // DEL(0x7F) + switch r { + case rune(0x7F), '␡': + return true + } + } + if mask.Has(EncodeCtl) { // CTRL(0x01-0x1F) + if r >= 1 && r <= 0x1F { + return true + } else if r > symbolOffset && r <= symbolOffset+0x1F { + return true + } + } + return false + }) + } + // nothing to replace, return input + if index == -1 { + return in + } + + var out bytes.Buffer + out.Grow(len(in) + len(prefix) + len(suffix)) + out.WriteString(prefix) + // copy the clean part of the input and skip it + out.WriteString(in[:index]) + in = in[index:] + + for i, r := range in { + switch r { + case 0: + out.WriteRune(symbolOffset) + continue + case '␀', QuoteRune: + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + case utf8.RuneError: + if mask.Has(EncodeInvalidUtf8) { + // only encode invalid sequences and not utf8.RuneError + if i+3 > len(in) || in[i:i+3] != string(utf8.RuneError) { + _, l := utf8.DecodeRuneInString(in[i:]) + appendQuotedBytes(&out, in[i:i+l]) + continue + } + } else { + // append the real bytes instead of utf8.RuneError + _, l := utf8.DecodeRuneInString(in[i:]) + out.WriteString(in[i : i+l]) + continue + } + } + if mask.Has(EncodeAsterisk) { // * + switch r { + case '*': + out.WriteRune(r + fullOffset) + continue + case '*': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeLtGt) { // <> + switch r { + case '<', '>': + out.WriteRune(r + fullOffset) + continue + case '<', '>': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeQuestion) { // ? + switch r { + case '?': + out.WriteRune(r + fullOffset) + continue + case '?': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeColon) { // : + switch r { + case ':': + out.WriteRune(r + fullOffset) + continue + case ':': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodePipe) { // | + switch r { + case '|': + out.WriteRune(r + fullOffset) + continue + case '|': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeDoubleQuote) { // " + switch r { + case '"': + out.WriteRune(r + fullOffset) + continue + case '"': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeSingleQuote) { // ' + switch r { + case '\'': + out.WriteRune(r + fullOffset) + continue + case ''': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeBackQuote) { // ` + switch r { + case '`': + out.WriteRune(r + fullOffset) + continue + case '`': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeDollar) { // $ + switch r { + case '$': + out.WriteRune(r + fullOffset) + continue + case '$': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeSlash) { // / + switch r { + case '/': + out.WriteRune(r + fullOffset) + continue + case '/': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeBackSlash) { // \ + switch r { + case '\\': + out.WriteRune(r + fullOffset) + continue + case '\': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeCrLf) { // CR LF + switch r { + case rune(0x0D), rune(0x0A): + out.WriteRune(r + symbolOffset) + continue + case '␍', '␊': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeHash) { // # + switch r { + case '#': + out.WriteRune(r + fullOffset) + continue + case '#': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodePercent) { // % + switch r { + case '%': + out.WriteRune(r + fullOffset) + continue + case '%': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeDel) { // DEL(0x7F) + switch r { + case rune(0x7F): + out.WriteRune('␡') // SYMBOL FOR DELETE + continue + case '␡': + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + if mask.Has(EncodeCtl) { // CTRL(0x01-0x1F) + if r >= 1 && r <= 0x1F { + out.WriteRune('␀' + r) // SYMBOL FOR NULL + continue + } else if r > symbolOffset && r <= symbolOffset+0x1F { + out.WriteRune(QuoteRune) + out.WriteRune(r) + continue + } + } + out.WriteRune(r) + } + out.WriteString(suffix) + return out.String() +} + +// Decode takes a name and undoes any substitutions made by Encode +func (mask MultiEncoder) Decode(in string) string { + if mask.Has(EncodeDot) { + switch in { + case ".": + return "." + case "..": + return ".." + case string(QuoteRune) + ".": + return "." + case string(QuoteRune) + "." + string(QuoteRune) + ".": + return ".." + } + } + + // handle prefix only replacements + prefix := "" + if r, l1 := utf8.DecodeRuneInString(in); mask.Has(EncodeLeftSpace) && r == '␠' { // SYMBOL FOR SPACE + prefix, in = " ", in[l1:] + } else if mask.Has(EncodeLeftPeriod) && r == '.' { // FULLWIDTH FULL STOP + prefix, in = ".", in[l1:] + } else if mask.Has(EncodeLeftTilde) && r == '~' { // FULLWIDTH TILDE + prefix, in = "~", in[l1:] + } else if mask.Has(EncodeLeftCrLfHtVt) && (r == '␀'+'\t' || r == '␀'+'\n' || r == '␀'+'\v' || r == '␀'+'\r') { + prefix, in = string(r-'␀'), in[l1:] + } else if r == QuoteRune { + if r, l2 := utf8.DecodeRuneInString(in[l1:]); mask.Has(EncodeLeftSpace) && r == '␠' { // SYMBOL FOR SPACE + prefix, in = "␠", in[l1+l2:] + } else if mask.Has(EncodeLeftPeriod) && r == '.' { // FULLWIDTH FULL STOP + prefix, in = ".", in[l1+l2:] + } else if mask.Has(EncodeLeftTilde) && r == '~' { // FULLWIDTH TILDE + prefix, in = "~", in[l1+l2:] + } else if mask.Has(EncodeLeftCrLfHtVt) && (r == '␀'+'\t' || r == '␀'+'\n' || r == '␀'+'\v' || r == '␀'+'\r') { + prefix, in = string(r), in[l1+l2:] + } + } + + // handle suffix only replacements + suffix := "" + if r, l := utf8.DecodeLastRuneInString(in); mask.Has(EncodeRightSpace) && r == '␠' { // SYMBOL FOR SPACE + in = in[:len(in)-l] + if q, l2 := utf8.DecodeLastRuneInString(in); q == QuoteRune { + suffix, in = "␠", in[:len(in)-l2] + } else { + suffix = " " + } + } else if mask.Has(EncodeRightPeriod) && r == '.' { // FULLWIDTH FULL STOP + in = in[:len(in)-l] + if q, l2 := utf8.DecodeLastRuneInString(in); q == QuoteRune { + suffix, in = ".", in[:len(in)-l2] + } else { + suffix = "." + } + } else if mask.Has(EncodeRightCrLfHtVt) && (r == '␀'+'\t' || r == '␀'+'\n' || r == '␀'+'\v' || r == '␀'+'\r') { + in = in[:len(in)-l] + if q, l2 := utf8.DecodeLastRuneInString(in); q == QuoteRune { + suffix, in = string(r), in[:len(in)-l2] + } else { + suffix = string(r - '␀') + } + } + index := 0 + if prefix == "" && suffix == "" { + // find the first rune which (most likely) needs to be replaced + index = strings.IndexFunc(in, func(r rune) bool { + switch r { + case '␀', QuoteRune: + return true + } + if mask.Has(EncodeAsterisk) { // * + switch r { + case '*': + return true + } + } + if mask.Has(EncodeLtGt) { // <> + switch r { + case '<', '>': + return true + } + } + if mask.Has(EncodeQuestion) { // ? + switch r { + case '?': + return true + } + } + if mask.Has(EncodeColon) { // : + switch r { + case ':': + return true + } + } + if mask.Has(EncodePipe) { // | + switch r { + case '|': + return true + } + } + if mask.Has(EncodeDoubleQuote) { // " + switch r { + case '"': + return true + } + } + if mask.Has(EncodeSingleQuote) { // ' + switch r { + case ''': + return true + } + } + if mask.Has(EncodeBackQuote) { // ` + switch r { + case '`': + return true + } + } + if mask.Has(EncodeDollar) { // $ + switch r { + case '$': + return true + } + } + if mask.Has(EncodeSlash) { // / + switch r { + case '/': + return true + } + } + if mask.Has(EncodeBackSlash) { // \ + switch r { + case '\': + return true + } + } + if mask.Has(EncodeCrLf) { // CR LF + switch r { + case '␍', '␊': + return true + } + } + if mask.Has(EncodeHash) { // # + switch r { + case '#': + return true + } + } + if mask.Has(EncodePercent) { // % + switch r { + case '%': + return true + } + } + if mask.Has(EncodeDel) { // DEL(0x7F) + switch r { + case '␡': + return true + } + } + if mask.Has(EncodeCtl) { // CTRL(0x01-0x1F) + if r > symbolOffset && r <= symbolOffset+0x1F { + return true + } + } + + return false + }) + } + // nothing to replace, return input + if index == -1 { + return in + } + + var out bytes.Buffer + out.Grow(len(in)) + out.WriteString(prefix) + // copy the clean part of the input and skip it + out.WriteString(in[:index]) + in = in[index:] + var unquote, unquoteNext, skipNext bool + + for i, r := range in { + if skipNext { + skipNext = false + continue + } + unquote, unquoteNext = unquoteNext, false + switch r { + case '␀': // SYMBOL FOR NULL + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(0) + } + continue + case QuoteRune: + if unquote { + out.WriteRune(r) + } else { + unquoteNext = true + } + continue + } + if mask.Has(EncodeAsterisk) { // * + switch r { + case '*': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeLtGt) { // <> + switch r { + case '<', '>': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeQuestion) { // ? + switch r { + case '?': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeColon) { // : + switch r { + case ':': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodePipe) { // | + switch r { + case '|': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeDoubleQuote) { // " + switch r { + case '"': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeSingleQuote) { // ' + switch r { + case ''': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeBackQuote) { // ` + switch r { + case '`': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeDollar) { // $ + switch r { + case '$': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeSlash) { // / + switch r { + case '/': // FULLWIDTH SOLIDUS + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeBackSlash) { // \ + switch r { + case '\': // FULLWIDTH REVERSE SOLIDUS + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeCrLf) { // CR LF + switch r { + case '␍', '␊': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - symbolOffset) + } + continue + } + } + if mask.Has(EncodeHash) { // % + switch r { + case '#': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodePercent) { // % + switch r { + case '%': + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - fullOffset) + } + continue + } + } + if mask.Has(EncodeDel) { // DEL(0x7F) + switch r { + case '␡': // SYMBOL FOR DELETE + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(0x7F) + } + continue + } + } + if mask.Has(EncodeCtl) { // CTRL(0x01-0x1F) + if r > symbolOffset && r <= symbolOffset+0x1F { + if unquote { + out.WriteRune(r) + } else { + out.WriteRune(r - symbolOffset) + } + continue + } + } + if unquote { + if mask.Has(EncodeInvalidUtf8) { + skipNext = appendUnquotedByte(&out, in[i:]) + if skipNext { + continue + } + } + out.WriteRune(QuoteRune) + } + switch r { + case utf8.RuneError: + // append the real bytes instead of utf8.RuneError + _, l := utf8.DecodeRuneInString(in[i:]) + out.WriteString(in[i : i+l]) + continue + } + + out.WriteRune(r) + } + if unquoteNext { + out.WriteRune(QuoteRune) + } + out.WriteString(suffix) + return out.String() +} + +// FromStandardPath takes a / separated path in Standard encoding +// and converts it to a / separated path in this encoding. +func (mask MultiEncoder) FromStandardPath(s string) string { + return FromStandardPath(mask, s) +} + +// FromStandardName takes name in Standard encoding and converts +// it in this encoding. +func (mask MultiEncoder) FromStandardName(s string) string { + return FromStandardName(mask, s) +} + +// ToStandardPath takes a / separated path in this encoding +// and converts it to a / separated path in Standard encoding. +func (mask MultiEncoder) ToStandardPath(s string) string { + return ToStandardPath(mask, s) +} + +// ToStandardName takes name in this encoding and converts +// it in Standard encoding. +func (mask MultiEncoder) ToStandardName(s string) string { + return ToStandardName(mask, s) +} + +func appendQuotedBytes(w io.Writer, s string) { + for _, b := range []byte(s) { + _, _ = fmt.Fprintf(w, string(QuoteRune)+"%02X", b) + } +} +func appendUnquotedByte(w io.Writer, s string) bool { + if len(s) < 2 { + return false + } + u, err := strconv.ParseUint(s[:2], 16, 8) + if err != nil { + return false + } + n, _ := w.Write([]byte{byte(u)}) + return n == 1 +} + +type identity struct{} + +func (identity) Encode(in string) string { return in } +func (identity) Decode(in string) string { return in } + +func (i identity) FromStandardPath(s string) string { + return FromStandardPath(i, s) +} +func (i identity) FromStandardName(s string) string { + return FromStandardName(i, s) +} +func (i identity) ToStandardPath(s string) string { + return ToStandardPath(i, s) +} +func (i identity) ToStandardName(s string) string { + return ToStandardName(i, s) +} + +// Identity returns an Encoder that always returns the input value +func Identity() Encoder { + return identity{} +} + +// FromStandardPath takes a / separated path in Standard encoding +// and converts it to a / separated path in the given encoding. +func FromStandardPath(e Encoder, s string) string { + if e == Standard { + return s + } + parts := strings.Split(s, "/") + encoded := make([]string, len(parts)) + changed := false + for i, p := range parts { + enc := FromStandardName(e, p) + changed = changed || enc != p + encoded[i] = enc + } + if !changed { + return s + } + return strings.Join(encoded, "/") +} + +// FromStandardName takes name in Standard encoding and converts +// it in the given encoding. +func FromStandardName(e Encoder, s string) string { + if e == Standard { + return s + } + return e.Encode(Standard.Decode(s)) +} + +// ToStandardPath takes a / separated path in the given encoding +// and converts it to a / separated path in Standard encoding. +func ToStandardPath(e Encoder, s string) string { + if e == Standard { + return s + } + parts := strings.Split(s, "/") + encoded := make([]string, len(parts)) + changed := false + for i, p := range parts { + dec := ToStandardName(e, p) + changed = changed || dec != p + encoded[i] = dec + } + if !changed { + return s + } + return strings.Join(encoded, "/") +} + +// ToStandardName takes name in the given encoding and converts +// it in Standard encoding. +func ToStandardName(e Encoder, s string) string { + if e == Standard { + return s + } + return Standard.Encode(e.Decode(s)) +} diff --git a/vendor/github.com/rclone/rclone/lib/encoder/standard.go b/vendor/github.com/rclone/rclone/lib/encoder/standard.go new file mode 100644 index 00000000000..439437c7932 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/encoder/standard.go @@ -0,0 +1,20 @@ +package encoder + +// Standard defines the encoding that is used for paths in- and output by rclone. +// +// List of replaced characters: +// (0x00) -> '␀' // SYMBOL FOR NULL +// / (slash) -> '/' // FULLWIDTH SOLIDUS +const Standard = (EncodeZero | + EncodeSlash | + EncodeCtl | + EncodeDel | + EncodeDot) + +// Base only encodes the zero byte and slash +const Base = (EncodeZero | + EncodeSlash | + EncodeDot) + +// Display is the internal encoding for logging and output +const Display = Standard diff --git a/vendor/github.com/rclone/rclone/lib/env/env.go b/vendor/github.com/rclone/rclone/lib/env/env.go new file mode 100644 index 00000000000..8e503aeed88 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/env/env.go @@ -0,0 +1,46 @@ +// Package env contains functions for dealing with environment variables +package env + +import ( + "os" + "os/user" + + homedir "github.com/mitchellh/go-homedir" +) + +// ShellExpandHelp describes what ShellExpand does for inclusion into help +const ShellExpandHelp = "\n\nLeading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`.\n" + +// ShellExpand replaces a leading "~" with the home directory" and +// expands all environment variables afterwards. +func ShellExpand(s string) string { + if s != "" { + if s[0] == '~' { + newS, err := homedir.Expand(s) + if err == nil { + s = newS + } + } + s = os.ExpandEnv(s) + } + return s +} + +// CurrentUser finds the current user name or "" if not found +func CurrentUser() (userName string) { + userName = os.Getenv("USER") + // If we are making docs just use $USER + if userName == "$USER" { + return userName + } + // Try reading using the OS + usr, err := user.Current() + if err == nil { + return usr.Username + } + // Fall back to reading $USER then $LOGNAME + if userName != "" { + return userName + } + return os.Getenv("LOGNAME") +} diff --git a/vendor/github.com/rclone/rclone/lib/errors/errors.go b/vendor/github.com/rclone/rclone/lib/errors/errors.go new file mode 100644 index 00000000000..6338440c4b7 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/errors/errors.go @@ -0,0 +1,76 @@ +package errors + +import ( + "errors" + "fmt" + "reflect" +) + +// New returns an error that formats as the given text. +func New(text string) error { + return errors.New(text) +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +func Errorf(format string, a ...interface{}) error { + return fmt.Errorf(format, a...) +} + +// WalkFunc is the signature of the Walk callback function. The function gets the +// current error in the chain and should return true if the chain processing +// should be aborted. +type WalkFunc func(error) bool + +// Walk invokes the given function for each error in the chain. If the +// provided functions returns true or no further cause can be found, the process +// is stopped and no further calls will be made. +// +// The next error in the chain is determined by the following rules: +// - If the current error has a `Cause() error` method (github.com/pkg/errors), +// the return value of this method is used. +// - If the current error has a `Unwrap() error` method (golang.org/x/xerrors), +// the return value of this method is used. +// - Common errors in the Go runtime that contain an Err field will use this value. +func Walk(err error, f WalkFunc) { + for prev := err; err != nil; prev = err { + if f(err) { + return + } + + switch e := err.(type) { + case causer: + err = e.Cause() + case wrapper: + err = e.Unwrap() + default: + // Unpack any struct or *struct with a field of name Err which satisfies + // the error interface. This includes *url.Error, *net.OpError, + // *os.SyscallError and many others in the stdlib. + errType := reflect.TypeOf(err) + errValue := reflect.ValueOf(err) + if errValue.IsValid() && errType.Kind() == reflect.Ptr { + errType = errType.Elem() + errValue = errValue.Elem() + } + if errValue.IsValid() && errType.Kind() == reflect.Struct { + if errField := errValue.FieldByName("Err"); errField.IsValid() { + errFieldValue := errField.Interface() + if newErr, ok := errFieldValue.(error); ok { + err = newErr + } + } + } + } + if reflect.DeepEqual(err, prev) { + break + } + } +} + +type causer interface { + Cause() error +} +type wrapper interface { + Unwrap() error +} diff --git a/vendor/github.com/rclone/rclone/lib/file/driveletter_other.go b/vendor/github.com/rclone/rclone/lib/file/driveletter_other.go new file mode 100644 index 00000000000..16e6e641fd7 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/file/driveletter_other.go @@ -0,0 +1,8 @@ +//+build !windows + +package file + +// FindUnusedDriveLetter does nothing except on Windows. +func FindUnusedDriveLetter() (driveLetter uint8) { + return 0 +} diff --git a/vendor/github.com/rclone/rclone/lib/file/driveletter_windows.go b/vendor/github.com/rclone/rclone/lib/file/driveletter_windows.go new file mode 100644 index 00000000000..ca080fc9c02 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/file/driveletter_windows.go @@ -0,0 +1,22 @@ +//+build windows + +package file + +import ( + "os" +) + +// FindUnusedDriveLetter searches mounted drive list on the system +// (starting from Z: and ending at D:) for unused drive letter. +// Returns the letter found (like 'Z') or zero value. +func FindUnusedDriveLetter() (driveLetter uint8) { + // Do not use A: and B:, because they are reserved for floppy drive. + // Do not use C:, because it is normally used for main drive. + for l := uint8('Z'); l >= uint8('D'); l-- { + _, err := os.Stat(string(l) + ":" + string(os.PathSeparator)) + if os.IsNotExist(err) { + return l + } + } + return 0 +} diff --git a/vendor/github.com/rclone/rclone/lib/file/file.go b/vendor/github.com/rclone/rclone/lib/file/file.go new file mode 100644 index 00000000000..a82c144e8e0 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/file/file.go @@ -0,0 +1,22 @@ +// Package file provides a version of os.OpenFile, the handles of +// which can be renamed and deleted under Windows. +package file + +import "os" + +// Open opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func Open(name string) (*os.File, error) { + return OpenFile(name, os.O_RDONLY, 0) +} + +// Create creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func Create(name string) (*os.File, error) { + return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} diff --git a/vendor/github.com/rclone/rclone/lib/file/file_other.go b/vendor/github.com/rclone/rclone/lib/file/file_other.go new file mode 100644 index 00000000000..16de1584792 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/file/file_other.go @@ -0,0 +1,15 @@ +//+build !windows + +package file + +import "os" + +// OpenFile is the generalized open call; most users will use Open or Create +// instead. It opens the named file with specified flag (O_RDONLY etc.) and +// perm (before umask), if applicable. If successful, methods on the returned +// File can be used for I/O. If there is an error, it will be of type +// *PathError. +// +// Under both Unix and Windows this will allow open files to be +// renamed and or deleted. +var OpenFile = os.OpenFile diff --git a/vendor/github.com/rclone/rclone/lib/file/file_windows.go b/vendor/github.com/rclone/rclone/lib/file/file_windows.go new file mode 100644 index 00000000000..29a6fad0fc9 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/file/file_windows.go @@ -0,0 +1,66 @@ +//+build windows + +package file + +import ( + "os" + "syscall" +) + +// OpenFile is the generalized open call; most users will use Open or Create +// instead. It opens the named file with specified flag (O_RDONLY etc.) and +// perm (before umask), if applicable. If successful, methods on the returned +// File can be used for I/O. If there is an error, it will be of type +// *PathError. +// +// Under both Unix and Windows this will allow open files to be +// renamed and or deleted. +func OpenFile(path string, mode int, perm os.FileMode) (*os.File, error) { + // This code copied from syscall_windows.go in the go source and then + // modified to support renaming and deleting open files by adding + // FILE_SHARE_DELETE. + // + // https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-createfilea#file_share_delete + if len(path) == 0 { + return nil, syscall.ERROR_FILE_NOT_FOUND + } + pathp, err := syscall.UTF16PtrFromString(path) + if err != nil { + return nil, err + } + var access uint32 + switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) { + case syscall.O_RDONLY: + access = syscall.GENERIC_READ + case syscall.O_WRONLY: + access = syscall.GENERIC_WRITE + case syscall.O_RDWR: + access = syscall.GENERIC_READ | syscall.GENERIC_WRITE + } + if mode&syscall.O_CREAT != 0 { + access |= syscall.GENERIC_WRITE + } + if mode&syscall.O_APPEND != 0 { + access &^= syscall.GENERIC_WRITE + access |= syscall.FILE_APPEND_DATA + } + sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) + var createmode uint32 + switch { + case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL): + createmode = syscall.CREATE_NEW + case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC): + createmode = syscall.CREATE_ALWAYS + case mode&syscall.O_CREAT == syscall.O_CREAT: + createmode = syscall.OPEN_ALWAYS + case mode&syscall.O_TRUNC == syscall.O_TRUNC: + createmode = syscall.TRUNCATE_EXISTING + default: + createmode = syscall.OPEN_EXISTING + } + h, e := syscall.CreateFile(pathp, access, sharemode, nil, createmode, syscall.FILE_ATTRIBUTE_NORMAL, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/rclone/rclone/lib/file/preallocate_other.go b/vendor/github.com/rclone/rclone/lib/file/preallocate_other.go new file mode 100644 index 00000000000..87b01799eae --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/file/preallocate_other.go @@ -0,0 +1,23 @@ +//+build !windows,!linux + +package file + +import "os" + +// PreallocateImplemented is a constant indicating whether the +// implementation of Preallocate actually does anything. +const PreallocateImplemented = false + +// PreAllocate the file for performance reasons +func PreAllocate(size int64, out *os.File) error { + return nil +} + +// SetSparseImplemented is a constant indicating whether the +// implementation of SetSparse actually does anything. +const SetSparseImplemented = false + +// SetSparse makes the file be a sparse file +func SetSparse(out *os.File) error { + return nil +} diff --git a/vendor/github.com/rclone/rclone/lib/file/preallocate_unix.go b/vendor/github.com/rclone/rclone/lib/file/preallocate_unix.go new file mode 100644 index 00000000000..8c3df5238a8 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/file/preallocate_unix.go @@ -0,0 +1,59 @@ +//+build linux + +package file + +import ( + "os" + "sync/atomic" + + "github.com/rclone/rclone/fs" + "golang.org/x/sys/unix" +) + +var ( + fallocFlags = [...]uint32{ + unix.FALLOC_FL_KEEP_SIZE, // Default + unix.FALLOC_FL_KEEP_SIZE | unix.FALLOC_FL_PUNCH_HOLE, // for ZFS #3066 + } + fallocFlagsIndex int32 +) + +// PreallocateImplemented is a constant indicating whether the +// implementation of Preallocate actually does anything. +const PreallocateImplemented = true + +// PreAllocate the file for performance reasons +func PreAllocate(size int64, out *os.File) error { + if size <= 0 { + return nil + } + index := atomic.LoadInt32(&fallocFlagsIndex) +again: + if index >= int32(len(fallocFlags)) { + return nil // Fallocate is disabled + } + flags := fallocFlags[index] + err := unix.Fallocate(int(out.Fd()), flags, 0, size) + if err == unix.ENOTSUP { + // Try the next flags combination + index++ + atomic.StoreInt32(&fallocFlagsIndex, index) + fs.Debugf(nil, "preAllocate: got error on fallocate, trying combination %d/%d: %v", index, len(fallocFlags), err) + goto again + + } + // FIXME could be doing something here + // if err == unix.ENOSPC { + // log.Printf("No space") + // } + return err +} + +// SetSparseImplemented is a constant indicating whether the +// implementation of SetSparse actually does anything. +const SetSparseImplemented = false + +// SetSparse makes the file be a sparse file +func SetSparse(out *os.File) error { + return nil +} diff --git a/vendor/github.com/rclone/rclone/lib/file/preallocate_windows.go b/vendor/github.com/rclone/rclone/lib/file/preallocate_windows.go new file mode 100644 index 00000000000..f92f9f7f79a --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/file/preallocate_windows.go @@ -0,0 +1,101 @@ +//+build windows + +package file + +import ( + "os" + "syscall" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +var ( + ntdll = windows.NewLazySystemDLL("ntdll.dll") + ntQueryVolumeInformationFile = ntdll.NewProc("NtQueryVolumeInformationFile") + ntSetInformationFile = ntdll.NewProc("NtSetInformationFile") +) + +type fileAllocationInformation struct { + AllocationSize uint64 +} + +type fileFsSizeInformation struct { + TotalAllocationUnits uint64 + AvailableAllocationUnits uint64 + SectorsPerAllocationUnit uint32 + BytesPerSector uint32 +} + +type ioStatusBlock struct { + Status, Information uintptr +} + +// PreallocateImplemented is a constant indicating whether the +// implementation of Preallocate actually does anything. +const PreallocateImplemented = true + +// PreAllocate the file for performance reasons +func PreAllocate(size int64, out *os.File) error { + if size <= 0 { + return nil + } + + var ( + iosb ioStatusBlock + fsSizeInfo fileFsSizeInformation + allocInfo fileAllocationInformation + ) + + // Query info about the block sizes on the file system + _, _, e1 := ntQueryVolumeInformationFile.Call( + uintptr(out.Fd()), + uintptr(unsafe.Pointer(&iosb)), + uintptr(unsafe.Pointer(&fsSizeInfo)), + uintptr(unsafe.Sizeof(fsSizeInfo)), + uintptr(3), // FileFsSizeInformation + ) + if e1 != nil && e1 != syscall.Errno(0) { + return errors.Wrap(e1, "preAllocate NtQueryVolumeInformationFile failed") + } + + // Calculate the allocation size + clusterSize := uint64(fsSizeInfo.BytesPerSector) * uint64(fsSizeInfo.SectorsPerAllocationUnit) + if clusterSize <= 0 { + return errors.Errorf("preAllocate clusterSize %d <= 0", clusterSize) + } + allocInfo.AllocationSize = (1 + uint64(size-1)/clusterSize) * clusterSize + + // Ask for the allocation + _, _, e1 = ntSetInformationFile.Call( + uintptr(out.Fd()), + uintptr(unsafe.Pointer(&iosb)), + uintptr(unsafe.Pointer(&allocInfo)), + uintptr(unsafe.Sizeof(allocInfo)), + uintptr(19), // FileAllocationInformation + ) + if e1 != nil && e1 != syscall.Errno(0) { + return errors.Wrap(e1, "preAllocate NtSetInformationFile failed") + } + + return nil +} + +const ( + FSCTL_SET_SPARSE = 0x000900c4 +) + +// SetSparseImplemented is a constant indicating whether the +// implementation of SetSparse actually does anything. +const SetSparseImplemented = true + +// SetSparse makes the file be a sparse file +func SetSparse(out *os.File) error { + var bytesReturned uint32 + err := syscall.DeviceIoControl(syscall.Handle(out.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, &bytesReturned, nil) + if err != nil { + return errors.Wrap(err, "DeviceIoControl FSCTL_SET_SPARSE") + } + return nil +} diff --git a/vendor/github.com/rclone/rclone/lib/file/unc.go b/vendor/github.com/rclone/rclone/lib/file/unc.go new file mode 100644 index 00000000000..41661336d83 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/file/unc.go @@ -0,0 +1,10 @@ +//+build !windows + +package file + +// UNCPath converts an absolute Windows path to a UNC long path. +// +// It does nothing on non windows platforms +func UNCPath(l string) string { + return l +} diff --git a/vendor/github.com/rclone/rclone/lib/file/unc_windows.go b/vendor/github.com/rclone/rclone/lib/file/unc_windows.go new file mode 100644 index 00000000000..5881befedb0 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/file/unc_windows.go @@ -0,0 +1,31 @@ +//+build windows + +package file + +import ( + "regexp" + "strings" +) + +// Pattern to match a windows absolute path: "c:\" and similar +var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`) + +// UNCPath converts an absolute Windows path to a UNC long path. +// +// It does nothing on non windows platforms +func UNCPath(l string) string { + // If prefix is "\\", we already have a UNC path or server. + if strings.HasPrefix(l, `\\`) { + // If already long path, just keep it + if strings.HasPrefix(l, `\\?\`) { + return l + } + + // Trim "\\" from path and add UNC prefix. + return `\\?\UNC\` + strings.TrimPrefix(l, `\\`) + } + if isAbsWinDrive.MatchString(l) { + return `\\?\` + l + } + return l +} diff --git a/vendor/github.com/rclone/rclone/lib/mmap/mmap.go b/vendor/github.com/rclone/rclone/lib/mmap/mmap.go new file mode 100644 index 00000000000..719bcf816d1 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/mmap/mmap.go @@ -0,0 +1,29 @@ +package mmap + +import "os" + +// PageSize is the minimum allocation size. Allocations will use at +// least this size and are likely to be multiplied up to a multiple of +// this size. +var PageSize = os.Getpagesize() + +// MustAlloc allocates size bytes and returns a slice containing them. If +// the allocation fails it will panic. This is best used for +// allocations which are a multiple of the PageSize. +func MustAlloc(size int) []byte { + mem, err := Alloc(size) + if err != nil { + panic(err) + } + return mem +} + +// MustFree frees buffers allocated by Alloc. Note it should be passed +// the same slice (not a derived slice) that Alloc returned. If the +// free fails it will panic. +func MustFree(mem []byte) { + err := Free(mem) + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/rclone/rclone/lib/mmap/mmap_unix.go b/vendor/github.com/rclone/rclone/lib/mmap/mmap_unix.go new file mode 100644 index 00000000000..a5129261fbd --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/mmap/mmap_unix.go @@ -0,0 +1,33 @@ +// Package mmap implements a large block memory allocator using +// anonymous memory maps. + +// +build !plan9,!windows,!js + +package mmap + +import ( + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// Alloc allocates size bytes and returns a slice containing them. If +// the allocation fails it will return with an error. This is best +// used for allocations which are a multiple of the PageSize. +func Alloc(size int) ([]byte, error) { + mem, err := unix.Mmap(-1, 0, size, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_PRIVATE|unix.MAP_ANON) + if err != nil { + return nil, errors.Wrap(err, "mmap: failed to allocate memory for buffer") + } + return mem, nil +} + +// Free frees buffers allocated by Alloc. Note it should be passed +// the same slice (not a derived slice) that Alloc returned. If the +// free fails it will return with an error. +func Free(mem []byte) error { + err := unix.Munmap(mem) + if err != nil { + return errors.Wrap(err, "mmap: failed to unmap memory") + } + return nil +} diff --git a/vendor/github.com/rclone/rclone/lib/mmap/mmap_unsupported.go b/vendor/github.com/rclone/rclone/lib/mmap/mmap_unsupported.go new file mode 100644 index 00000000000..7f956c7b860 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/mmap/mmap_unsupported.go @@ -0,0 +1,19 @@ +// Fallback Alloc and Free for unsupported OSes + +// +build plan9 js + +package mmap + +// Alloc allocates size bytes and returns a slice containing them. If +// the allocation fails it will return with an error. This is best +// used for allocations which are a multiple of the Pagesize. +func Alloc(size int) ([]byte, error) { + return make([]byte, size), nil +} + +// Free frees buffers allocated by Alloc. Note it should be passed +// the same slice (not a derived slice) that Alloc returned. If the +// free fails it will return with an error. +func Free(mem []byte) error { + return nil +} diff --git a/vendor/github.com/rclone/rclone/lib/mmap/mmap_windows.go b/vendor/github.com/rclone/rclone/lib/mmap/mmap_windows.go new file mode 100644 index 00000000000..43608552745 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/mmap/mmap_windows.go @@ -0,0 +1,42 @@ +// Package mmap implements a large block memory allocator using +// anonymous memory maps. + +// +build windows + +package mmap + +import ( + "reflect" + "unsafe" + + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +// Alloc allocates size bytes and returns a slice containing them. If +// the allocation fails it will return with an error. This is best +// used for allocations which are a multiple of the PageSize. +func Alloc(size int) ([]byte, error) { + p, err := windows.VirtualAlloc(0, uintptr(size), windows.MEM_COMMIT, windows.PAGE_READWRITE) + if err != nil { + return nil, errors.Wrap(err, "mmap: failed to allocate memory for buffer") + } + var mem []byte + sh := (*reflect.SliceHeader)(unsafe.Pointer(&mem)) + sh.Data = p + sh.Len = size + sh.Cap = size + return mem, nil +} + +// Free frees buffers allocated by Alloc. Note it should be passed +// the same slice (not a derived slice) that Alloc returned. If the +// free fails it will return with an error. +func Free(mem []byte) error { + sh := (*reflect.SliceHeader)(unsafe.Pointer(&mem)) + err := windows.VirtualFree(sh.Data, 0, windows.MEM_RELEASE) + if err != nil { + return errors.Wrap(err, "mmap: failed to unmap memory") + } + return nil +} diff --git a/vendor/github.com/rclone/rclone/lib/oauthutil/oauthutil.go b/vendor/github.com/rclone/rclone/lib/oauthutil/oauthutil.go new file mode 100644 index 00000000000..501a36d3dce --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/oauthutil/oauthutil.go @@ -0,0 +1,669 @@ +package oauthutil + +import ( + "context" + "encoding/json" + "fmt" + "html/template" + "net" + "net/http" + "net/url" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config" + "github.com/rclone/rclone/fs/config/configmap" + "github.com/rclone/rclone/fs/fshttp" + "github.com/rclone/rclone/lib/random" + "github.com/skratchdot/open-golang/open" + "golang.org/x/oauth2" +) + +const ( + // TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization + // code should be returned in the title bar of the browser, with the page text + // prompting the user to copy the code and paste it in the application. + TitleBarRedirectURL = "urn:ietf:wg:oauth:2.0:oob" + + // bindPort is the port that we bind the local webserver to + bindPort = "53682" + + // bindAddress is binding for local webserver when active + bindAddress = "127.0.0.1:" + bindPort + + // RedirectURL is redirect to local webserver when active + RedirectURL = "http://" + bindAddress + "/" + + // RedirectPublicURL is redirect to local webserver when active with public name + RedirectPublicURL = "http://localhost.rclone.org:" + bindPort + "/" + + // RedirectLocalhostURL is redirect to local webserver when active with localhost + RedirectLocalhostURL = "http://localhost:" + bindPort + "/" + + // RedirectPublicSecureURL is a public https URL which + // redirects to the local webserver + RedirectPublicSecureURL = "https://oauth.rclone.org/" + + // AuthResponseTemplate is a template to handle the redirect URL for oauth requests + AuthResponseTemplate = ` + + + +{{ if .OK }}Success!{{ else }}Failure!{{ end }} + + +

{{ if .OK }}Success!{{ else }}Failure!{{ end }}

+
+
+{{ if eq .OK false }}
+Error: {{ .Name }}
+{{ if .Description }}Description: {{ .Description }}
{{ end }} +{{ if .Code }}Code: {{ .Code }}
{{ end }} +{{ if .HelpURL }}Look here for help: {{ .HelpURL }}
{{ end }} +{{ else }} +All done. Please go back to rclone. +{{ end }} +
+ + +` +) + +// SharedOptions are shared between backends the utilize an OAuth flow +var SharedOptions = []fs.Option{{ + Name: config.ConfigClientID, + Help: "OAuth Client Id\nLeave blank normally.", +}, { + Name: config.ConfigClientSecret, + Help: "OAuth Client Secret\nLeave blank normally.", +}, { + Name: config.ConfigToken, + Help: "OAuth Access Token as a JSON blob.", + Advanced: true, +}, { + Name: config.ConfigAuthURL, + Help: "Auth server URL.\nLeave blank to use the provider defaults.", + Advanced: true, +}, { + Name: config.ConfigTokenURL, + Help: "Token server url.\nLeave blank to use the provider defaults.", + Advanced: true, +}} + +// oldToken contains an end-user's tokens. +// This is the data you must store to persist authentication. +// +// From the original code.google.com/p/goauth2/oauth package - used +// for backwards compatibility in the rclone config file +type oldToken struct { + AccessToken string + RefreshToken string + Expiry time.Time +} + +// GetToken returns the token saved in the config file under +// section name. +func GetToken(name string, m configmap.Mapper) (*oauth2.Token, error) { + tokenString, ok := m.Get(config.ConfigToken) + if !ok || tokenString == "" { + return nil, errors.Errorf("empty token found - please run \"rclone config reconnect %s:\"", name) + } + token := new(oauth2.Token) + err := json.Unmarshal([]byte(tokenString), token) + if err != nil { + return nil, err + } + // if has data then return it + if token.AccessToken != "" { + return token, nil + } + // otherwise try parsing as oldToken + oldtoken := new(oldToken) + err = json.Unmarshal([]byte(tokenString), oldtoken) + if err != nil { + return nil, err + } + // Fill in result into new token + token.AccessToken = oldtoken.AccessToken + token.RefreshToken = oldtoken.RefreshToken + token.Expiry = oldtoken.Expiry + // Save new format in config file + err = PutToken(name, m, token, false) + if err != nil { + return nil, err + } + return token, nil +} + +// PutToken stores the token in the config file +// +// This saves the config file if it changes +func PutToken(name string, m configmap.Mapper, token *oauth2.Token, newSection bool) error { + tokenBytes, err := json.Marshal(token) + if err != nil { + return err + } + tokenString := string(tokenBytes) + old, ok := m.Get(config.ConfigToken) + if !ok || tokenString != old { + err = config.SetValueAndSave(name, config.ConfigToken, tokenString) + if newSection && err != nil { + fs.Debugf(name, "Added new token to config, still needs to be saved") + } else if err != nil { + fs.Errorf(nil, "Failed to save new token in config file: %v", err) + } else { + fs.Debugf(name, "Saved new token in config file") + } + } + return nil +} + +// TokenSource stores updated tokens in the config file +type TokenSource struct { + mu sync.Mutex + name string + m configmap.Mapper + tokenSource oauth2.TokenSource + token *oauth2.Token + config *oauth2.Config + ctx context.Context + expiryTimer *time.Timer // signals whenever the token expires +} + +// If token has expired then first try re-reading it from the config +// file in case a concurrently running rclone has updated it already +func (ts *TokenSource) reReadToken() bool { + tokenString, err := config.FileGetFresh(ts.name, config.ConfigToken) + if err != nil { + fs.Debugf(ts.name, "Failed to read token out of config file: %v", err) + return false + } + newToken := new(oauth2.Token) + err = json.Unmarshal([]byte(tokenString), newToken) + if err != nil { + fs.Debugf(ts.name, "Failed to parse token out of config file: %v", err) + return false + } + if !newToken.Valid() { + fs.Debugf(ts.name, "Loaded invalid token from config file - ignoring") + return false + } + fs.Debugf(ts.name, "Loaded fresh token from config file") + ts.token = newToken + ts.tokenSource = nil // invalidate since we changed the token + return true +} + +// Token returns a token or an error. +// Token must be safe for concurrent use by multiple goroutines. +// The returned Token must not be modified. +// +// This saves the token in the config file if it has changed +func (ts *TokenSource) Token() (*oauth2.Token, error) { + ts.mu.Lock() + defer ts.mu.Unlock() + var ( + token *oauth2.Token + err error + changed = false + ) + const maxTries = 5 + + // Try getting the token a few times + for i := 1; i <= maxTries; i++ { + // Try reading the token from the config file in case it has + // been updated by a concurrent rclone process + if !ts.token.Valid() { + if ts.reReadToken() { + changed = true + } + } + + // Make a new token source if required + if ts.tokenSource == nil { + ts.tokenSource = ts.config.TokenSource(ts.ctx, ts.token) + } + + token, err = ts.tokenSource.Token() + if err == nil { + break + } + fs.Debugf(ts.name, "Token refresh failed try %d/%d: %v", i, maxTries, err) + time.Sleep(1 * time.Second) + } + if err != nil { + return nil, errors.Wrapf(err, "couldn't fetch token - maybe it has expired? - refresh with \"rclone config reconnect %s:\"", ts.name) + } + changed = changed || (*token != *ts.token) + ts.token = token + if changed { + // Bump on the expiry timer if it is set + if ts.expiryTimer != nil { + ts.expiryTimer.Reset(ts.timeToExpiry()) + } + err = PutToken(ts.name, ts.m, token, false) + if err != nil { + return nil, errors.Wrap(err, "couldn't store token") + } + } + return token, nil +} + +// Invalidate invalidates the token +func (ts *TokenSource) Invalidate() { + ts.mu.Lock() + ts.token.AccessToken = "" + ts.mu.Unlock() +} + +// timeToExpiry returns how long until the token expires +// +// Call with the lock held +func (ts *TokenSource) timeToExpiry() time.Duration { + t := ts.token + if t == nil { + return 0 + } + if t.Expiry.IsZero() { + return 3e9 * time.Second // ~95 years + } + return t.Expiry.Sub(time.Now()) +} + +// OnExpiry returns a channel which has the time written to it when +// the token expires. Note that there is only one channel so if +// attaching multiple go routines it will only signal to one of them. +func (ts *TokenSource) OnExpiry() <-chan time.Time { + ts.mu.Lock() + defer ts.mu.Unlock() + if ts.expiryTimer == nil { + ts.expiryTimer = time.NewTimer(ts.timeToExpiry()) + } + return ts.expiryTimer.C +} + +// Check interface satisfied +var _ oauth2.TokenSource = (*TokenSource)(nil) + +// Context returns a context with our HTTP Client baked in for oauth2 +func Context(ctx context.Context, client *http.Client) context.Context { + return context.WithValue(ctx, oauth2.HTTPClient, client) +} + +// overrideCredentials sets the ClientID and ClientSecret from the +// config file if they are not blank. +// If any value is overridden, true is returned. +// the origConfig is copied +func overrideCredentials(name string, m configmap.Mapper, origConfig *oauth2.Config) (newConfig *oauth2.Config, changed bool) { + newConfig = new(oauth2.Config) + *newConfig = *origConfig + changed = false + ClientID, ok := m.Get(config.ConfigClientID) + if ok && ClientID != "" { + newConfig.ClientID = ClientID + changed = true + } + ClientSecret, ok := m.Get(config.ConfigClientSecret) + if ok && ClientSecret != "" { + newConfig.ClientSecret = ClientSecret + changed = true + } + AuthURL, ok := m.Get(config.ConfigAuthURL) + if ok && AuthURL != "" { + newConfig.Endpoint.AuthURL = AuthURL + changed = true + } + TokenURL, ok := m.Get(config.ConfigTokenURL) + if ok && TokenURL != "" { + newConfig.Endpoint.TokenURL = TokenURL + changed = true + } + return newConfig, changed +} + +// NewClientWithBaseClient gets a token from the config file and +// configures a Client with it. It returns the client and a +// TokenSource which Invalidate may need to be called on. It uses the +// httpClient passed in as the base client. +func NewClientWithBaseClient(ctx context.Context, name string, m configmap.Mapper, config *oauth2.Config, baseClient *http.Client) (*http.Client, *TokenSource, error) { + config, _ = overrideCredentials(name, m, config) + token, err := GetToken(name, m) + if err != nil { + return nil, nil, err + } + + // Set our own http client in the context + ctx = Context(ctx, baseClient) + + // Wrap the TokenSource in our TokenSource which saves changed + // tokens in the config file + ts := &TokenSource{ + name: name, + m: m, + token: token, + config: config, + ctx: ctx, + } + return oauth2.NewClient(ctx, ts), ts, nil + +} + +// NewClient gets a token from the config file and configures a Client +// with it. It returns the client and a TokenSource which Invalidate may need to be called on +func NewClient(ctx context.Context, name string, m configmap.Mapper, oauthConfig *oauth2.Config) (*http.Client, *TokenSource, error) { + return NewClientWithBaseClient(ctx, name, m, oauthConfig, fshttp.NewClient(ctx)) +} + +// AuthResult is returned from the web server after authorization +// success or failure +type AuthResult struct { + OK bool // Failure or Success? + Name string + Description string + Code string + HelpURL string + Form url.Values // the complete contents of the form + Err error // any underlying error to report +} + +// Error satisfies the error interface so AuthResult can be used as an error +func (ar *AuthResult) Error() string { + status := "Error" + if ar.OK { + status = "OK" + } + return fmt.Sprintf("%s: %s\nCode: %q\nDescription: %s\nHelp: %s", + status, ar.Name, ar.Code, ar.Description, ar.HelpURL) +} + +// CheckAuthFn is called when a good Auth has been received +type CheckAuthFn func(*oauth2.Config, *AuthResult) error + +// Options for the oauth config +type Options struct { + NoOffline bool // If set then "access_type=offline" parameter is not passed + CheckAuth CheckAuthFn // When the AuthResult is known the checkAuth function is called if set + OAuth2Opts []oauth2.AuthCodeOption // extra oauth2 options + StateBlankOK bool // If set, state returned as "" is deemed to be OK +} + +// Config does the initial creation of the token +// +// If opt is nil it will use the default Options +// +// It may run an internal webserver to receive the results +func Config(ctx context.Context, id, name string, m configmap.Mapper, oauthConfig *oauth2.Config, opt *Options) error { + if opt == nil { + opt = &Options{} + } + oauthConfig, changed := overrideCredentials(name, m, oauthConfig) + authorizeOnlyValue, ok := m.Get(config.ConfigAuthorize) + authorizeOnly := ok && authorizeOnlyValue != "" // set if being run by "rclone authorize" + authorizeNoAutoBrowserValue, ok := m.Get(config.ConfigAuthNoBrowser) + authorizeNoAutoBrowser := ok && authorizeNoAutoBrowserValue != "" + + // See if already have a token + tokenString, ok := m.Get("token") + if ok && tokenString != "" { + fmt.Printf("Already have a token - refresh?\n") + if !config.ConfirmWithConfig(ctx, m, "config_refresh_token", true) { + return nil + } + } + + // Ask the user whether they are using a local machine + isLocal := func() bool { + fmt.Printf("Use auto config?\n") + fmt.Printf(" * Say Y if not sure\n") + fmt.Printf(" * Say N if you are working on a remote or headless machine\n") + return config.ConfirmWithConfig(ctx, m, "config_is_local", true) + } + + // Detect whether we should use internal web server + useWebServer := false + switch oauthConfig.RedirectURL { + case TitleBarRedirectURL: + useWebServer = authorizeOnly + if !authorizeOnly { + useWebServer = isLocal() + } + if useWebServer { + // copy the config and set to use the internal webserver + configCopy := *oauthConfig + oauthConfig = &configCopy + oauthConfig.RedirectURL = RedirectURL + } + default: + if changed { + fmt.Printf("Make sure your Redirect URL is set to %q in your custom config.\n", oauthConfig.RedirectURL) + } + useWebServer = true + if authorizeOnly { + break + } + if !isLocal() { + fmt.Printf(`For this to work, you will need rclone available on a machine that has +a web browser available. + +For more help and alternate methods see: https://rclone.org/remote_setup/ + +Execute the following on the machine with the web browser (same rclone +version recommended): + +`) + if changed { + fmt.Printf("\trclone authorize %q -- %q %q\n", id, oauthConfig.ClientID, oauthConfig.ClientSecret) + } else { + fmt.Printf("\trclone authorize %q\n", id) + } + fmt.Println("\nThen paste the result below:") + code := config.ReadNonEmptyLine("result> ") + token := &oauth2.Token{} + err := json.Unmarshal([]byte(code), token) + if err != nil { + return err + } + return PutToken(name, m, token, true) + } + } + + // Make random state + state, err := random.Password(128) + if err != nil { + return err + } + + // Generate oauth URL + opts := opt.OAuth2Opts + if !opt.NoOffline { + opts = append(opts, oauth2.AccessTypeOffline) + } + authURL := oauthConfig.AuthCodeURL(state, opts...) + + // Prepare webserver if needed + var server *authServer + if useWebServer { + server = newAuthServer(opt, bindAddress, state, authURL) + err := server.Init() + if err != nil { + return errors.Wrap(err, "failed to start auth webserver") + } + go server.Serve() + defer server.Stop() + authURL = "http://" + bindAddress + "/auth?state=" + state + } + + if !authorizeNoAutoBrowser && oauthConfig.RedirectURL != TitleBarRedirectURL { + // Open the URL for the user to visit + _ = open.Start(authURL) + fmt.Printf("If your browser doesn't open automatically go to the following link: %s\n", authURL) + } else { + fmt.Printf("Please go to the following link: %s\n", authURL) + } + fmt.Printf("Log in and authorize rclone for access\n") + + // Read the code via the webserver or manually + var auth *AuthResult + if useWebServer { + fmt.Printf("Waiting for code...\n") + auth = <-server.result + if !auth.OK || auth.Code == "" { + return auth + } + fmt.Printf("Got code\n") + if opt.CheckAuth != nil { + err = opt.CheckAuth(oauthConfig, auth) + if err != nil { + return err + } + } + } else { + auth = &AuthResult{ + Code: config.ReadNonEmptyLine("Enter verification code> "), + } + } + + // Exchange the code for a token + ctx = Context(ctx, fshttp.NewClient(ctx)) + token, err := oauthConfig.Exchange(ctx, auth.Code) + if err != nil { + return errors.Wrap(err, "failed to get token") + } + + // Print code if we are doing a manual auth + if authorizeOnly { + result, err := json.Marshal(token) + if err != nil { + return errors.Wrap(err, "failed to marshal token") + } + fmt.Printf("Paste the following into your remote machine --->\n%s\n<---End paste\n", result) + } + return PutToken(name, m, token, true) +} + +// Local web server for collecting auth +type authServer struct { + opt *Options + state string + listener net.Listener + bindAddress string + authURL string + server *http.Server + result chan *AuthResult +} + +// newAuthServer makes the webserver for collecting auth +func newAuthServer(opt *Options, bindAddress, state, authURL string) *authServer { + return &authServer{ + opt: opt, + state: state, + bindAddress: bindAddress, + authURL: authURL, // http://host/auth redirects to here + result: make(chan *AuthResult, 1), + } +} + +// Receive the auth request +func (s *authServer) handleAuth(w http.ResponseWriter, req *http.Request) { + fs.Debugf(nil, "Received %s request on auth server to %q", req.Method, req.URL.Path) + + // Reply with the response to the user and to the channel + reply := func(status int, res *AuthResult) { + w.WriteHeader(status) + w.Header().Set("Content-Type", "text/html") + var t = template.Must(template.New("authResponse").Parse(AuthResponseTemplate)) + if err := t.Execute(w, res); err != nil { + fs.Debugf(nil, "Could not execute template for web response.") + } + s.result <- res + } + + // Parse the form parameters and save them + err := req.ParseForm() + if err != nil { + reply(http.StatusBadRequest, &AuthResult{ + Name: "Parse form error", + Description: err.Error(), + }) + return + } + + // get code, error if empty + code := req.Form.Get("code") + if code == "" { + reply(http.StatusBadRequest, &AuthResult{ + Name: "Auth Error", + Description: "No code returned by remote server", + }) + return + } + + // check state + state := req.Form.Get("state") + if state != s.state && !(state == "" && s.opt.StateBlankOK) { + reply(http.StatusBadRequest, &AuthResult{ + Name: "Auth state doesn't match", + Description: fmt.Sprintf("Expecting %q got %q", s.state, state), + }) + return + } + + // code OK + reply(http.StatusOK, &AuthResult{ + OK: true, + Code: code, + Form: req.Form, + }) +} + +// Init gets the internal web server ready to receive config details +func (s *authServer) Init() error { + fs.Debugf(nil, "Starting auth server on %s", s.bindAddress) + mux := http.NewServeMux() + s.server = &http.Server{ + Addr: s.bindAddress, + Handler: mux, + } + s.server.SetKeepAlivesEnabled(false) + + mux.HandleFunc("/favicon.ico", func(w http.ResponseWriter, req *http.Request) { + http.Error(w, "", http.StatusNotFound) + return + }) + mux.HandleFunc("/auth", func(w http.ResponseWriter, req *http.Request) { + state := req.FormValue("state") + if state != s.state { + fs.Debugf(nil, "State did not match: want %q got %q", s.state, state) + http.Error(w, "State did not match - please try again", http.StatusForbidden) + return + } + http.Redirect(w, req, s.authURL, http.StatusTemporaryRedirect) + return + }) + mux.HandleFunc("/", s.handleAuth) + + var err error + s.listener, err = net.Listen("tcp", s.bindAddress) + if err != nil { + return err + } + return nil +} + +// Serve the auth server, doesn't return +func (s *authServer) Serve() { + err := s.server.Serve(s.listener) + fs.Debugf(nil, "Closed auth server with error: %v", err) +} + +// Stop the auth server by closing its socket +func (s *authServer) Stop() { + fs.Debugf(nil, "Closing auth server") + close(s.result) + _ = s.listener.Close() + + // close the server + _ = s.server.Close() +} diff --git a/vendor/github.com/rclone/rclone/lib/oauthutil/renew.go b/vendor/github.com/rclone/rclone/lib/oauthutil/renew.go new file mode 100644 index 00000000000..1fb96e77129 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/oauthutil/renew.go @@ -0,0 +1,69 @@ +package oauthutil + +import ( + "sync/atomic" + + "github.com/rclone/rclone/fs" +) + +// Renew allows tokens to be renewed on expiry if uploads are in progress. +type Renew struct { + name string // name to use in logs + ts *TokenSource // token source that needs renewing + uploads int32 // number of uploads in progress - atomic access required + run func() error // a transaction to run to renew the token on +} + +// NewRenew creates a new Renew struct and starts a background process +// which renews the token whenever it expires. It uses the run() call +// to run a transaction to do this. +// +// It will only renew the token if the number of uploads > 0 +func NewRenew(name string, ts *TokenSource, run func() error) *Renew { + r := &Renew{ + name: name, + ts: ts, + run: run, + } + go r.renewOnExpiry() + return r +} + +// renewOnExpiry renews the token whenever it expires. Useful when there +// are lots of uploads in progress and the token doesn't get renewed. +// Amazon seem to cancel your uploads if you don't renew your token +// for 2hrs. +func (r *Renew) renewOnExpiry() { + expiry := r.ts.OnExpiry() + for { + <-expiry + uploads := atomic.LoadInt32(&r.uploads) + if uploads != 0 { + fs.Debugf(r.name, "Token expired - %d uploads in progress - refreshing", uploads) + // Do a transaction + err := r.run() + if err == nil { + fs.Debugf(r.name, "Token refresh successful") + } else { + fs.Errorf(r.name, "Token refresh failed: %v", err) + } + } else { + fs.Debugf(r.name, "Token expired but no uploads in progress - doing nothing") + } + } +} + +// Start should be called before starting an upload +func (r *Renew) Start() { + atomic.AddInt32(&r.uploads, 1) +} + +// Stop should be called after finishing an upload +func (r *Renew) Stop() { + atomic.AddInt32(&r.uploads, -1) +} + +// Invalidate invalidates the token source +func (r *Renew) Invalidate() { + r.ts.Invalidate() +} diff --git a/vendor/github.com/rclone/rclone/lib/pacer/context.go b/vendor/github.com/rclone/rclone/lib/pacer/context.go new file mode 100644 index 00000000000..904c318e5ff --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/pacer/context.go @@ -0,0 +1,23 @@ +package pacer + +import "context" + +// ctxt is a context key type. +type ctxt byte + +// ctxt enumeration. +const ( + ctxRetries ctxt = iota +) + +// RetriesCtx returns number of retries set for the context. +// If retries are not specified for the context it returns false. +func RetriesCtx(ctx context.Context) (int, bool) { + retries, ok := ctx.Value(ctxRetries).(int) + return retries, ok +} + +// WithRetries sets number of retries for the context. +func WithRetries(ctx context.Context, count int) context.Context { + return context.WithValue(ctx, ctxRetries, count) +} diff --git a/vendor/github.com/rclone/rclone/lib/pacer/pacer.go b/vendor/github.com/rclone/rclone/lib/pacer/pacer.go new file mode 100644 index 00000000000..bc2c4c45f12 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/pacer/pacer.go @@ -0,0 +1,282 @@ +// Package pacer makes pacing and retrying API calls easy +package pacer + +import ( + "context" + "sync" + "time" + + "github.com/rclone/rclone/lib/errors" +) + +// State represents the public Pacer state that will be passed to the +// configured Calculator +type State struct { + SleepTime time.Duration // current time to sleep before adding the pacer token back + ConsecutiveRetries int // number of consecutive retries, will be 0 when the last invoker call returned false + LastError error // the error returned by the last invoker call or nil +} + +// Calculator is a generic calculation function for a Pacer. +type Calculator interface { + // Calculate takes the current Pacer state and returns the sleep time after which + // the next Pacer call will be done. + Calculate(state State) time.Duration +} + +// Pacer is the primary type of the pacer package. It allows to retry calls +// with a configurable delay in between. +type Pacer struct { + pacerOptions + mu sync.Mutex // Protecting read/writes + pacer chan struct{} // To pace the operations + connTokens chan struct{} // Connection tokens + state State +} +type pacerOptions struct { + maxConnections int // Maximum number of concurrent connections + retries int // Max number of retries + calculator Calculator // switchable pacing algorithm - call with mu held + invoker InvokerFunc // wrapper function used to invoke the target function +} + +// InvokerFunc is the signature of the wrapper function used to invoke the +// target function in Pacer. +type InvokerFunc func(try, tries int, f Paced) (bool, error) + +// Option can be used in New to configure the Pacer. +type Option func(*pacerOptions) + +// CalculatorOption sets a Calculator for the new Pacer. +func CalculatorOption(c Calculator) Option { + return func(p *pacerOptions) { p.calculator = c } +} + +// RetriesOption sets the retries number for the new Pacer. +func RetriesOption(retries int) Option { + return func(p *pacerOptions) { p.retries = retries } +} + +// MaxConnectionsOption sets the maximum connections number for the new Pacer. +func MaxConnectionsOption(maxConnections int) Option { + return func(p *pacerOptions) { p.maxConnections = maxConnections } +} + +// InvokerOption sets an InvokerFunc for the new Pacer. +func InvokerOption(invoker InvokerFunc) Option { + return func(p *pacerOptions) { p.invoker = invoker } +} + +// Paced is a function which is called by the Call and CallNoRetry +// methods. It should return a boolean, true if it would like to be +// retried, and an error. This error may be returned or returned +// wrapped in a RetryError. +type Paced func() (bool, error) + +// New returns a Pacer with sensible defaults. +func New(options ...Option) *Pacer { + opts := pacerOptions{ + maxConnections: 10, + retries: 3, + } + for _, o := range options { + o(&opts) + } + p := &Pacer{ + pacerOptions: opts, + pacer: make(chan struct{}, 1), + } + if p.calculator == nil { + p.SetCalculator(nil) + } + p.state.SleepTime = p.calculator.Calculate(p.state) + if p.invoker == nil { + p.invoker = invoke + } + p.SetMaxConnections(p.maxConnections) + + // Put the first pacing token in + p.pacer <- struct{}{} + + return p +} + +// SetMaxConnections sets the maximum number of concurrent connections. +// Setting the value to 0 will allow unlimited number of connections. +// Should not be changed once you have started calling the pacer. +// By default this will be set to fs.Config.Checkers. +func (p *Pacer) SetMaxConnections(n int) { + p.mu.Lock() + defer p.mu.Unlock() + p.maxConnections = n + if n <= 0 { + p.connTokens = nil + } else { + p.connTokens = make(chan struct{}, n) + for i := 0; i < n; i++ { + p.connTokens <- struct{}{} + } + } +} + +// SetRetries sets the max number of retries for Call +func (p *Pacer) SetRetries(retries int) { + p.mu.Lock() + defer p.mu.Unlock() + p.retries = retries +} + +// SetCalculator sets the pacing algorithm. Don't modify the Calculator object +// afterwards, use the ModifyCalculator method when needed. +// +// It will choose the default algorithm if nil is passed in. +func (p *Pacer) SetCalculator(c Calculator) { + p.mu.Lock() + defer p.mu.Unlock() + if c == nil { + c = NewDefault() + } + p.calculator = c +} + +// ModifyCalculator calls the given function with the currently configured +// Calculator and the Pacer lock held. +func (p *Pacer) ModifyCalculator(f func(Calculator)) { + p.mu.Lock() + f(p.calculator) + p.mu.Unlock() +} + +// Start a call to the API +// +// This must be called as a pair with endCall +// +// This waits for the pacer token +func (p *Pacer) beginCall() { + // pacer starts with a token in and whenever we take one out + // XXX ms later we put another in. We could do this with a + // Ticker more accurately, but then we'd have to work out how + // not to run it when it wasn't needed + <-p.pacer + if p.maxConnections > 0 { + <-p.connTokens + } + + p.mu.Lock() + // Restart the timer + go func(t time.Duration) { + time.Sleep(t) + p.pacer <- struct{}{} + }(p.state.SleepTime) + p.mu.Unlock() +} + +// endCall implements the pacing algorithm +// +// This should calculate a new sleepTime. It takes a boolean as to +// whether the operation should be retried or not. +func (p *Pacer) endCall(retry bool, err error) { + if p.maxConnections > 0 { + p.connTokens <- struct{}{} + } + p.mu.Lock() + if retry { + p.state.ConsecutiveRetries++ + } else { + p.state.ConsecutiveRetries = 0 + } + p.state.LastError = err + p.state.SleepTime = p.calculator.Calculate(p.state) + p.mu.Unlock() +} + +// call implements Call but with settable retries +func (p *Pacer) call(fn Paced, retries int) (err error) { + var retry bool + for i := 1; i <= retries; i++ { + p.beginCall() + retry, err = p.invoker(i, retries, fn) + p.endCall(retry, err) + if !retry { + break + } + } + return err +} + +// CallContext paces the remote operations to not exceed the limits and retry +// on rate limit exceeded. Context can be used to control the number of retries +// on per Call basis. Use WithRetries function to set custom retry count. +// +// This calls fn, expecting it to return a retry flag and an +// error. This error may be returned wrapped in a RetryError if the +// number of retries is exceeded. +func (p *Pacer) CallContext(ctx context.Context, fn Paced) (err error) { + p.mu.Lock() + retries := p.retries + if r, ok := RetriesCtx(ctx); ok { + retries = r + } + p.mu.Unlock() + return p.call(fn, retries) +} + +// Call paces the remote operations to not exceed the limits and retry +// on rate limit exceeded +// +// This calls fn, expecting it to return a retry flag and an +// error. This error may be returned wrapped in a RetryError if the +// number of retries is exceeded. +func (p *Pacer) Call(fn Paced) (err error) { + p.mu.Lock() + retries := p.retries + p.mu.Unlock() + return p.call(fn, retries) +} + +// CallNoRetry paces the remote operations to not exceed the limits +// and return a retry error on rate limit exceeded +// +// This calls fn and wraps the output in a RetryError if it would like +// it to be retried +func (p *Pacer) CallNoRetry(fn Paced) error { + return p.call(fn, 1) +} + +func invoke(try, tries int, f Paced) (bool, error) { + return f() +} + +type retryAfterError struct { + error + retryAfter time.Duration +} + +func (r *retryAfterError) Error() string { + return r.error.Error() +} + +func (r *retryAfterError) Cause() error { + return r.error +} + +// RetryAfterError returns a wrapped error that can be used by Calculator implementations +func RetryAfterError(err error, retryAfter time.Duration) error { + return &retryAfterError{ + error: err, + retryAfter: retryAfter, + } +} + +// IsRetryAfter returns true if the error or any of it's Cause's is an error +// returned by RetryAfterError. It also returns the associated Duration if possible. +func IsRetryAfter(err error) (retryAfter time.Duration, isRetryAfter bool) { + errors.Walk(err, func(err error) bool { + if r, ok := err.(*retryAfterError); ok { + retryAfter, isRetryAfter = r.retryAfter, true + return true + } + return false + }) + return +} diff --git a/vendor/github.com/rclone/rclone/lib/pacer/pacers.go b/vendor/github.com/rclone/rclone/lib/pacer/pacers.go new file mode 100644 index 00000000000..d24c1028513 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/pacer/pacers.go @@ -0,0 +1,355 @@ +package pacer + +import ( + "math/rand" + "time" + + "golang.org/x/time/rate" +) + +type ( + // MinSleep configures the minimum sleep time of a Calculator + MinSleep time.Duration + // MaxSleep configures the maximum sleep time of a Calculator + MaxSleep time.Duration + // DecayConstant configures the decay constant time of a Calculator + DecayConstant uint + // AttackConstant configures the attack constant of a Calculator + AttackConstant uint + // Burst configures the number of API calls to allow without sleeping + Burst int +) + +// Default is a truncated exponential attack and decay. +// +// On retries the sleep time is doubled, on non errors then sleeptime decays +// according to the decay constant as set with SetDecayConstant. +// +// The sleep never goes below that set with SetMinSleep or above that set +// with SetMaxSleep. +type Default struct { + minSleep time.Duration // minimum sleep time + maxSleep time.Duration // maximum sleep time + decayConstant uint // decay constant + attackConstant uint // attack constant +} + +// DefaultOption is the interface implemented by all options for the Default Calculator +type DefaultOption interface { + ApplyDefault(*Default) +} + +// NewDefault creates a Calculator used by Pacer as the default. +func NewDefault(opts ...DefaultOption) *Default { + c := &Default{ + minSleep: 10 * time.Millisecond, + maxSleep: 2 * time.Second, + decayConstant: 2, + attackConstant: 1, + } + c.Update(opts...) + return c +} + +// Update applies the Calculator options. +func (c *Default) Update(opts ...DefaultOption) { + for _, opt := range opts { + opt.ApplyDefault(c) + } +} + +// ApplyDefault updates the value on the Calculator +func (o MinSleep) ApplyDefault(c *Default) { + c.minSleep = time.Duration(o) +} + +// ApplyDefault updates the value on the Calculator +func (o MaxSleep) ApplyDefault(c *Default) { + c.maxSleep = time.Duration(o) +} + +// ApplyDefault updates the value on the Calculator +func (o DecayConstant) ApplyDefault(c *Default) { + c.decayConstant = uint(o) +} + +// ApplyDefault updates the value on the Calculator +func (o AttackConstant) ApplyDefault(c *Default) { + c.attackConstant = uint(o) +} + +// Calculate takes the current Pacer state and return the wait time until the next try. +func (c *Default) Calculate(state State) time.Duration { + if t, ok := IsRetryAfter(state.LastError); ok { + if t < c.minSleep { + return c.minSleep + } + return t + } + + if state.ConsecutiveRetries > 0 { + sleepTime := c.maxSleep + if c.attackConstant != 0 { + sleepTime = (state.SleepTime << c.attackConstant) / ((1 << c.attackConstant) - 1) + } + if sleepTime > c.maxSleep { + sleepTime = c.maxSleep + } + return sleepTime + } + sleepTime := (state.SleepTime<> c.decayConstant + if sleepTime < c.minSleep { + sleepTime = c.minSleep + } + return sleepTime +} + +// AmazonCloudDrive is a specialized pacer for Amazon Drive +// +// It implements a truncated exponential backoff strategy with randomization. +// Normally operations are paced at the interval set with SetMinSleep. On errors +// the sleep timer is set to 0..2**retries seconds. +// +// See https://developer.amazon.com/public/apis/experience/cloud-drive/content/restful-api-best-practices +type AmazonCloudDrive struct { + minSleep time.Duration // minimum sleep time +} + +// AmazonCloudDriveOption is the interface implemented by all options for the AmazonCloudDrive Calculator +type AmazonCloudDriveOption interface { + ApplyAmazonCloudDrive(*AmazonCloudDrive) +} + +// NewAmazonCloudDrive returns a new AmazonCloudDrive Calculator with default values +func NewAmazonCloudDrive(opts ...AmazonCloudDriveOption) *AmazonCloudDrive { + c := &AmazonCloudDrive{ + minSleep: 10 * time.Millisecond, + } + c.Update(opts...) + return c +} + +// Update applies the Calculator options. +func (c *AmazonCloudDrive) Update(opts ...AmazonCloudDriveOption) { + for _, opt := range opts { + opt.ApplyAmazonCloudDrive(c) + } +} + +// ApplyAmazonCloudDrive updates the value on the Calculator +func (o MinSleep) ApplyAmazonCloudDrive(c *AmazonCloudDrive) { + c.minSleep = time.Duration(o) +} + +// Calculate takes the current Pacer state and return the wait time until the next try. +func (c *AmazonCloudDrive) Calculate(state State) time.Duration { + if t, ok := IsRetryAfter(state.LastError); ok { + if t < c.minSleep { + return c.minSleep + } + return t + } + + consecutiveRetries := state.ConsecutiveRetries + if consecutiveRetries == 0 { + return c.minSleep + } + if consecutiveRetries > 9 { + consecutiveRetries = 9 + } + // consecutiveRetries starts at 1 so + // maxSleep is 2**(consecutiveRetries-1) seconds + maxSleep := time.Second << uint(consecutiveRetries-1) + // actual sleep is random from 0..maxSleep + sleepTime := time.Duration(rand.Int63n(int64(maxSleep))) + if sleepTime < c.minSleep { + sleepTime = c.minSleep + } + return sleepTime +} + +// AzureIMDS is a pacer for the Azure instance metadata service. +type AzureIMDS struct { +} + +// NewAzureIMDS returns a new Azure IMDS calculator. +func NewAzureIMDS() *AzureIMDS { + c := &AzureIMDS{} + return c +} + +// Calculate takes the current Pacer state and return the wait time until the next try. +func (c *AzureIMDS) Calculate(state State) time.Duration { + var addBackoff time.Duration + + if state.ConsecutiveRetries == 0 { + // Initial condition: no backoff. + return 0 + } + + if state.ConsecutiveRetries > 4 { + // The number of consecutive retries shouldn't exceed five. + // In case it does for some reason, cap delay. + addBackoff = 0 + } else { + addBackoff = time.Duration(2< 5 { + consecutiveRetries = 5 + } + // consecutiveRetries starts at 1 so go from 1,2,3,4,5,5 => 1,2,4,8,16,16 + // maxSleep is 2**(consecutiveRetries-1) seconds + random milliseconds + return time.Second< 0 { + if c.attackConstant == 0 { + return c.maxSleep + } + if state.SleepTime == 0 { + return c.minSleep + } + sleepTime := (state.SleepTime << c.attackConstant) / ((1 << c.attackConstant) - 1) + if sleepTime > c.maxSleep { + sleepTime = c.maxSleep + } + return sleepTime + } + sleepTime := (state.SleepTime<> c.decayConstant + if sleepTime < c.minSleep { + sleepTime = 0 + } + return sleepTime +} diff --git a/vendor/github.com/rclone/rclone/lib/pacer/tokens.go b/vendor/github.com/rclone/rclone/lib/pacer/tokens.go new file mode 100644 index 00000000000..b4f905ba9be --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/pacer/tokens.go @@ -0,0 +1,31 @@ +// Tokens for controlling concurrency + +package pacer + +// TokenDispenser is for controlling concurrency +type TokenDispenser struct { + tokens chan struct{} +} + +// NewTokenDispenser makes a pool of n tokens +func NewTokenDispenser(n int) *TokenDispenser { + td := &TokenDispenser{ + tokens: make(chan struct{}, n), + } + // Fill up the upload tokens + for i := 0; i < n; i++ { + td.tokens <- struct{}{} + } + return td +} + +// Get gets a token from the pool - don't forget to return it with Put +func (td *TokenDispenser) Get() { + <-td.tokens + return +} + +// Put returns a token +func (td *TokenDispenser) Put() { + td.tokens <- struct{}{} +} diff --git a/vendor/github.com/rclone/rclone/lib/pool/pool.go b/vendor/github.com/rclone/rclone/lib/pool/pool.go new file mode 100644 index 00000000000..12f6f14617f --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/pool/pool.go @@ -0,0 +1,215 @@ +// Package pool implements a memory pool similar in concept to +// sync.Pool but with more determinism. +package pool + +import ( + "fmt" + "log" + "runtime" + "sync" + "time" + + "github.com/rclone/rclone/lib/mmap" +) + +// Pool of internal buffers +// +// We hold buffers in cache. Every time we Get or Put we update +// minFill which is the minimum len(cache) seen. +// +// Every flushTime we remove minFill buffers from the cache as they +// were not used in the previous flushTime interval. +type Pool struct { + mu sync.Mutex + cache [][]byte + minFill int // the minimum fill of the cache + bufferSize int + poolSize int + timer *time.Timer + inUse int + alloced int + flushTime time.Duration + flushPending bool + alloc func(int) ([]byte, error) + free func([]byte) error +} + +// New makes a buffer pool +// +// flushTime is the interval the buffer pools is flushed +// bufferSize is the size of the allocations +// poolSize is the maximum number of free buffers in the pool +// useMmap should be set to use mmap allocations +func New(flushTime time.Duration, bufferSize, poolSize int, useMmap bool) *Pool { + bp := &Pool{ + cache: make([][]byte, 0, poolSize), + poolSize: poolSize, + flushTime: flushTime, + bufferSize: bufferSize, + } + if useMmap { + bp.alloc = mmap.Alloc + bp.free = mmap.Free + } else { + bp.alloc = func(size int) ([]byte, error) { + return make([]byte, size), nil + } + bp.free = func([]byte) error { + return nil + } + } + bp.timer = time.AfterFunc(flushTime, bp.flushAged) + runtime.SetFinalizer(bp, (*Pool).flushAndStop) + return bp +} + +// get gets the last buffer in bp.cache +// +// Call with mu held +func (bp *Pool) get() []byte { + n := len(bp.cache) - 1 + buf := bp.cache[n] + bp.cache[n] = nil // clear buffer pointer from bp.cache + bp.cache = bp.cache[:n] + return buf +} + +// put puts the buffer on the end of bp.cache +// +// Call with mu held +func (bp *Pool) put(buf []byte) { + bp.cache = append(bp.cache, buf) +} + +// flush n entries from the entire buffer pool +// Call with mu held +func (bp *Pool) flush(n int) { + for i := 0; i < n; i++ { + bp.freeBuffer(bp.get()) + } + bp.minFill = len(bp.cache) +} + +// Flush the entire buffer pool +func (bp *Pool) Flush() { + bp.mu.Lock() + bp.flush(len(bp.cache)) + bp.mu.Unlock() +} + +// Flush the entire buffer pool and stop flusher +func (bp *Pool) flushAndStop() { + bp.mu.Lock() + bp.timer.Stop() + bp.flush(len(bp.cache)) + bp.mu.Unlock() +} + +// Remove bp.minFill buffers +func (bp *Pool) flushAged() { + bp.mu.Lock() + bp.flushPending = false + bp.flush(bp.minFill) + // If there are still items in the cache, schedule another flush + if len(bp.cache) != 0 { + bp.kickFlusher() + } + bp.mu.Unlock() +} + +// InUse returns the number of buffers in use which haven't been +// returned to the pool +func (bp *Pool) InUse() int { + bp.mu.Lock() + defer bp.mu.Unlock() + return bp.inUse +} + +// InPool returns the number of buffers in the pool +func (bp *Pool) InPool() int { + bp.mu.Lock() + defer bp.mu.Unlock() + return len(bp.cache) +} + +// Alloced returns the number of buffers allocated and not yet freed +func (bp *Pool) Alloced() int { + bp.mu.Lock() + defer bp.mu.Unlock() + return bp.alloced +} + +// starts or resets the buffer flusher timer - call with mu held +func (bp *Pool) kickFlusher() { + if bp.flushPending { + return + } + bp.flushPending = true + bp.timer.Reset(bp.flushTime) +} + +// Make sure minFill is correct - call with mu held +func (bp *Pool) updateMinFill() { + if len(bp.cache) < bp.minFill { + bp.minFill = len(bp.cache) + } +} + +// Get a buffer from the pool or allocate one +func (bp *Pool) Get() []byte { + bp.mu.Lock() + var buf []byte + waitTime := time.Millisecond + for { + if len(bp.cache) > 0 { + buf = bp.get() + break + } else { + var err error + buf, err = bp.alloc(bp.bufferSize) + if err == nil { + bp.alloced++ + break + } + log.Printf("Failed to get memory for buffer, waiting for %v: %v", waitTime, err) + bp.mu.Unlock() + time.Sleep(waitTime) + bp.mu.Lock() + waitTime *= 2 + } + } + bp.inUse++ + bp.updateMinFill() + bp.mu.Unlock() + return buf +} + +// freeBuffer returns mem to the os if required - call with lock held +func (bp *Pool) freeBuffer(mem []byte) { + err := bp.free(mem) + if err != nil { + log.Printf("Failed to free memory: %v", err) + } + bp.alloced-- +} + +// Put returns the buffer to the buffer cache or frees it +// +// Note that if you try to return a buffer of the wrong size to Put it +// will panic. +func (bp *Pool) Put(buf []byte) { + bp.mu.Lock() + defer bp.mu.Unlock() + buf = buf[0:cap(buf)] + if len(buf) != bp.bufferSize { + panic(fmt.Sprintf("Returning buffer sized %d but expecting %d", len(buf), bp.bufferSize)) + } + if len(bp.cache) < bp.poolSize { + bp.put(buf) + } else { + bp.freeBuffer(buf) + } + bp.inUse-- + bp.updateMinFill() + bp.kickFlusher() +} diff --git a/vendor/github.com/rclone/rclone/lib/random/random.go b/vendor/github.com/rclone/rclone/lib/random/random.go new file mode 100644 index 00000000000..57a858bebb8 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/random/random.go @@ -0,0 +1,71 @@ +// Package random holds a few functions for working with random numbers +package random + +import ( + cryptorand "crypto/rand" + "encoding/base64" + "encoding/binary" + mathrand "math/rand" + + "github.com/pkg/errors" +) + +// String create a random string for test purposes. +// +// Do not use these for passwords. +func String(n int) string { + const ( + vowel = "aeiou" + consonant = "bcdfghjklmnpqrstvwxyz" + digit = "0123456789" + ) + pattern := []string{consonant, vowel, consonant, vowel, consonant, vowel, consonant, digit} + out := make([]byte, n) + p := 0 + for i := range out { + source := pattern[p] + p = (p + 1) % len(pattern) + out[i] = source[mathrand.Intn(len(source))] + } + return string(out) +} + +// Password creates a crypto strong password which is just about +// memorable. The password is composed of printable ASCII characters +// from the base64 alphabet. +// +// Requires password strength in bits. +// 64 is just about memorable +// 128 is secure +func Password(bits int) (password string, err error) { + bytes := bits / 8 + if bits%8 != 0 { + bytes++ + } + var pw = make([]byte, bytes) + n, err := cryptorand.Read(pw) + if err != nil { + return "", errors.Wrap(err, "password read failed") + } + if n != bytes { + return "", errors.Errorf("password short read: %d", n) + } + password = base64.RawURLEncoding.EncodeToString(pw) + return password, nil +} + +// Seed the global math/rand with crypto strong data +// +// This doesn't make it OK to use math/rand in crypto sensitive +// environments - don't do that! However it does help to mitigate the +// problem if that happens accidentally. This would have helped with +// CVE-2020-28924 - #4783 +func Seed() error { + var seed int64 + err := binary.Read(cryptorand.Reader, binary.LittleEndian, &seed) + if err != nil { + return errors.Wrap(err, "failed to read random seed") + } + mathrand.Seed(seed) + return nil +} diff --git a/vendor/github.com/rclone/rclone/lib/readers/counting_reader.go b/vendor/github.com/rclone/rclone/lib/readers/counting_reader.go new file mode 100644 index 00000000000..872b4c50e96 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/readers/counting_reader.go @@ -0,0 +1,28 @@ +package readers + +import "io" + +// NewCountingReader returns a CountingReader, which will read from the given +// reader while keeping track of how many bytes were read. +func NewCountingReader(in io.Reader) *CountingReader { + return &CountingReader{in: in} +} + +// CountingReader holds a reader and a read count of how many bytes were read +// so far. +type CountingReader struct { + in io.Reader + read uint64 +} + +// Read reads from the underlying reader. +func (cr *CountingReader) Read(b []byte) (int, error) { + n, err := cr.in.Read(b) + cr.read += uint64(n) + return n, err +} + +// BytesRead returns how many bytes were read from the underlying reader so far. +func (cr *CountingReader) BytesRead() uint64 { + return cr.read +} diff --git a/vendor/github.com/rclone/rclone/lib/readers/error.go b/vendor/github.com/rclone/rclone/lib/readers/error.go new file mode 100644 index 00000000000..13d4f077ba6 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/readers/error.go @@ -0,0 +1,11 @@ +package readers + +// ErrorReader wraps an error to return on Read +type ErrorReader struct { + Err error +} + +// Read always returns the error +func (er ErrorReader) Read(p []byte) (n int, err error) { + return 0, er.Err +} diff --git a/vendor/github.com/rclone/rclone/lib/readers/limited.go b/vendor/github.com/rclone/rclone/lib/readers/limited.go new file mode 100644 index 00000000000..218dd661a0a --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/readers/limited.go @@ -0,0 +1,22 @@ +package readers + +import "io" + +// LimitedReadCloser adds io.Closer to io.LimitedReader. Create one with NewLimitedReadCloser +type LimitedReadCloser struct { + *io.LimitedReader + io.Closer +} + +// NewLimitedReadCloser returns a LimitedReadCloser wrapping rc to +// limit it to reading limit bytes. If limit < 0 then it does not +// wrap rc, it just returns it. +func NewLimitedReadCloser(rc io.ReadCloser, limit int64) (lrc io.ReadCloser) { + if limit < 0 { + return rc + } + return &LimitedReadCloser{ + LimitedReader: &io.LimitedReader{R: rc, N: limit}, + Closer: rc, + } +} diff --git a/vendor/github.com/rclone/rclone/lib/readers/noclose.go b/vendor/github.com/rclone/rclone/lib/readers/noclose.go new file mode 100644 index 00000000000..dc36e8be248 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/readers/noclose.go @@ -0,0 +1,29 @@ +package readers + +import "io" + +// noClose is used to wrap an io.Reader to stop it being upgraded +type noClose struct { + in io.Reader +} + +// Read implements io.Closer by passing it straight on +func (nc noClose) Read(p []byte) (n int, err error) { + return nc.in.Read(p) +} + +// NoCloser makes sure that the io.Reader passed in can't upgraded to +// an io.Closer. +// +// This is for use with http.NewRequest to make sure the body doesn't +// get upgraded to an io.Closer and the body closed unexpectedly. +func NoCloser(in io.Reader) io.Reader { + if in == nil { + return in + } + // if in doesn't implement io.Closer, just return it + if _, canClose := in.(io.Closer); !canClose { + return in + } + return noClose{in: in} +} diff --git a/vendor/github.com/rclone/rclone/lib/readers/pattern_reader.go b/vendor/github.com/rclone/rclone/lib/readers/pattern_reader.go new file mode 100644 index 00000000000..a480dc18514 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/readers/pattern_reader.go @@ -0,0 +1,59 @@ +package readers + +import ( + "io" + + "github.com/pkg/errors" +) + +// This is the smallest prime less than 256 +// +// Using a prime here means we are less likely to hit repeating patterns +const patternReaderModulo = 251 + +// NewPatternReader creates a reader, that returns a deterministic byte pattern. +// After length bytes are read +func NewPatternReader(length int64) io.ReadSeeker { + return &patternReader{ + length: length, + } +} + +type patternReader struct { + offset int64 + length int64 + c byte +} + +func (r *patternReader) Read(p []byte) (n int, err error) { + for i := range p { + if r.offset >= r.length { + return n, io.EOF + } + p[i] = r.c + r.c = (r.c + 1) % patternReaderModulo + r.offset++ + n++ + } + return +} + +// Seek implements the io.Seeker interface. +func (r *patternReader) Seek(offset int64, whence int) (abs int64, err error) { + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = r.offset + offset + case io.SeekEnd: + abs = r.length + offset + default: + return 0, errors.New("patternReader: invalid whence") + } + if abs < 0 { + return 0, errors.New("patternReader: negative position") + } + r.offset = abs + r.c = byte(abs % patternReaderModulo) + return abs, nil +} diff --git a/vendor/github.com/rclone/rclone/lib/readers/readfill.go b/vendor/github.com/rclone/rclone/lib/readers/readfill.go new file mode 100644 index 00000000000..64b5de44e89 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/readers/readfill.go @@ -0,0 +1,18 @@ +package readers + +import "io" + +// ReadFill reads as much data from r into buf as it can +// +// It reads until the buffer is full or r.Read returned an error. +// +// This is io.ReadFull but when you just want as much data as +// possible, not an exact size of block. +func ReadFill(r io.Reader, buf []byte) (n int, err error) { + var nn int + for n < len(buf) && err == nil { + nn, err = r.Read(buf[n:]) + n += nn + } + return n, err +} diff --git a/vendor/github.com/rclone/rclone/lib/readers/repeatable.go b/vendor/github.com/rclone/rclone/lib/readers/repeatable.go new file mode 100644 index 00000000000..e61b083d82d --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/readers/repeatable.go @@ -0,0 +1,104 @@ +package readers + +import ( + "io" + "sync" + + "github.com/pkg/errors" +) + +// A RepeatableReader implements the io.ReadSeeker it allow to seek cached data +// back and forth within the reader but will only read data from the internal Reader as necessary +// and will play nicely with the Account and io.LimitedReader to reflect current speed +type RepeatableReader struct { + mu sync.Mutex // protect against concurrent use + in io.Reader // Input reader + i int64 // current reading index + b []byte // internal cache buffer +} + +var _ io.ReadSeeker = (*RepeatableReader)(nil) + +// Seek implements the io.Seeker interface. +// If seek position is passed the cache buffer length the function will return +// the maximum offset that can be used and "fs.RepeatableReader.Seek: offset is unavailable" Error +func (r *RepeatableReader) Seek(offset int64, whence int) (int64, error) { + r.mu.Lock() + defer r.mu.Unlock() + + var abs int64 + cacheLen := int64(len(r.b)) + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs = r.i + offset + case io.SeekEnd: + abs = cacheLen + offset + default: + return 0, errors.New("fs.RepeatableReader.Seek: invalid whence") + } + if abs < 0 { + return 0, errors.New("fs.RepeatableReader.Seek: negative position") + } + if abs > cacheLen { + return offset - (abs - cacheLen), errors.New("fs.RepeatableReader.Seek: offset is unavailable") + } + r.i = abs + return abs, nil +} + +// Read data from original Reader into bytes +// Data is either served from the underlying Reader or from cache if was already read +func (r *RepeatableReader) Read(b []byte) (n int, err error) { + r.mu.Lock() + defer r.mu.Unlock() + + cacheLen := int64(len(r.b)) + if r.i == cacheLen { + n, err = r.in.Read(b) + if n > 0 { + r.b = append(r.b, b[:n]...) + } + } else { + n = copy(b, r.b[r.i:]) + } + r.i += int64(n) + return n, err +} + +// NewRepeatableReader create new repeatable reader from Reader r +func NewRepeatableReader(r io.Reader) *RepeatableReader { + return &RepeatableReader{in: r} +} + +// NewRepeatableReaderSized create new repeatable reader from Reader r +// with an initial buffer of size. +func NewRepeatableReaderSized(r io.Reader, size int) *RepeatableReader { + return &RepeatableReader{ + in: r, + b: make([]byte, 0, size), + } +} + +// NewRepeatableLimitReader create new repeatable reader from Reader r +// with an initial buffer of size wrapped in an io.LimitReader to read +// only size. +func NewRepeatableLimitReader(r io.Reader, size int) *RepeatableReader { + return NewRepeatableReaderSized(io.LimitReader(r, int64(size)), size) +} + +// NewRepeatableReaderBuffer create new repeatable reader from Reader r +// using the buffer passed in. +func NewRepeatableReaderBuffer(r io.Reader, buf []byte) *RepeatableReader { + return &RepeatableReader{ + in: r, + b: buf[:0], + } +} + +// NewRepeatableLimitReaderBuffer create new repeatable reader from +// Reader r and buf wrapped in an io.LimitReader to read only size. +func NewRepeatableLimitReaderBuffer(r io.Reader, buf []byte, size int64) *RepeatableReader { + return NewRepeatableReaderBuffer(io.LimitReader(r, size), buf) +} diff --git a/vendor/github.com/rclone/rclone/lib/rest/rest.go b/vendor/github.com/rclone/rclone/lib/rest/rest.go new file mode 100644 index 00000000000..e6e0e68501a --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/rest/rest.go @@ -0,0 +1,474 @@ +// Package rest implements a simple REST wrapper +// +// All methods are safe for concurrent calling. +package rest + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "sync" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/lib/readers" +) + +// Client contains the info to sustain the API +type Client struct { + mu sync.RWMutex + c *http.Client + rootURL string + errorHandler func(resp *http.Response) error + headers map[string]string + signer SignerFn +} + +// NewClient takes an oauth http.Client and makes a new api instance +func NewClient(c *http.Client) *Client { + api := &Client{ + c: c, + errorHandler: defaultErrorHandler, + headers: make(map[string]string), + } + return api +} + +// ReadBody reads resp.Body into result, closing the body +func ReadBody(resp *http.Response) (result []byte, err error) { + defer fs.CheckClose(resp.Body, &err) + return ioutil.ReadAll(resp.Body) +} + +// defaultErrorHandler doesn't attempt to parse the http body, just +// returns it in the error message closing resp.Body +func defaultErrorHandler(resp *http.Response) (err error) { + body, err := ReadBody(resp) + if err != nil { + return errors.Wrap(err, "error reading error out of body") + } + return errors.Errorf("HTTP error %v (%v) returned body: %q", resp.StatusCode, resp.Status, body) +} + +// SetErrorHandler sets the handler to decode an error response when +// the HTTP status code is not 2xx. The handler should close resp.Body. +func (api *Client) SetErrorHandler(fn func(resp *http.Response) error) *Client { + api.mu.Lock() + defer api.mu.Unlock() + api.errorHandler = fn + return api +} + +// SetRoot sets the default RootURL. You can override this on a per +// call basis using the RootURL field in Opts. +func (api *Client) SetRoot(RootURL string) *Client { + api.mu.Lock() + defer api.mu.Unlock() + api.rootURL = RootURL + return api +} + +// SetHeader sets a header for all requests +// Start the key with "*" for don't canonicalise +func (api *Client) SetHeader(key, value string) *Client { + api.mu.Lock() + defer api.mu.Unlock() + api.headers[key] = value + return api +} + +// RemoveHeader unsets a header for all requests +func (api *Client) RemoveHeader(key string) *Client { + api.mu.Lock() + defer api.mu.Unlock() + delete(api.headers, key) + return api +} + +// SignerFn is used to sign an outgoing request +type SignerFn func(*http.Request) error + +// SetSigner sets a signer for all requests +func (api *Client) SetSigner(signer SignerFn) *Client { + api.mu.Lock() + defer api.mu.Unlock() + api.signer = signer + return api +} + +// SetUserPass creates an Authorization header for all requests with +// the UserName and Password passed in +func (api *Client) SetUserPass(UserName, Password string) *Client { + req, _ := http.NewRequest("GET", "http://example.com", nil) + req.SetBasicAuth(UserName, Password) + api.SetHeader("Authorization", req.Header.Get("Authorization")) + return api +} + +// SetCookie creates a Cookies Header for all requests with the supplied +// cookies passed in. +// All cookies have to be supplied at once, all cookies will be overwritten +// on a new call to the method +func (api *Client) SetCookie(cks ...*http.Cookie) *Client { + req, _ := http.NewRequest("GET", "http://example.com", nil) + for _, ck := range cks { + req.AddCookie(ck) + } + api.SetHeader("Cookie", req.Header.Get("Cookie")) + return api +} + +// Opts contains parameters for Call, CallJSON, etc. +type Opts struct { + Method string // GET, POST, etc. + Path string // relative to RootURL + RootURL string // override RootURL passed into SetRoot() + Body io.Reader + NoResponse bool // set to close Body + ContentType string + ContentLength *int64 + ContentRange string + ExtraHeaders map[string]string // extra headers, start them with "*" for don't canonicalise + UserName string // username for Basic Auth + Password string // password for Basic Auth + Options []fs.OpenOption + IgnoreStatus bool // if set then we don't check error status or parse error body + MultipartParams url.Values // if set do multipart form upload with attached file + MultipartMetadataName string // ..this is used for the name of the metadata form part if set + MultipartContentName string // ..name of the parameter which is the attached file + MultipartFileName string // ..name of the file for the attached file + Parameters url.Values // any parameters for the final URL + TransferEncoding []string // transfer encoding, set to "identity" to disable chunked encoding + Close bool // set to close the connection after this transaction + NoRedirect bool // if this is set then the client won't follow redirects +} + +// Copy creates a copy of the options +func (o *Opts) Copy() *Opts { + newOpts := *o + return &newOpts +} + +// DecodeJSON decodes resp.Body into result +func DecodeJSON(resp *http.Response, result interface{}) (err error) { + defer fs.CheckClose(resp.Body, &err) + decoder := json.NewDecoder(resp.Body) + return decoder.Decode(result) +} + +// DecodeXML decodes resp.Body into result +func DecodeXML(resp *http.Response, result interface{}) (err error) { + defer fs.CheckClose(resp.Body, &err) + decoder := xml.NewDecoder(resp.Body) + return decoder.Decode(result) +} + +// ClientWithNoRedirects makes a new http client which won't follow redirects +func ClientWithNoRedirects(c *http.Client) *http.Client { + clientCopy := *c + clientCopy.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + } + return &clientCopy +} + +// Call makes the call and returns the http.Response +// +// if err == nil then resp.Body will need to be closed unless +// opt.NoResponse is set +// +// if err != nil then resp.Body will have been closed +// +// it will return resp if at all possible, even if err is set +func (api *Client) Call(ctx context.Context, opts *Opts) (resp *http.Response, err error) { + api.mu.RLock() + defer api.mu.RUnlock() + if opts == nil { + return nil, errors.New("call() called with nil opts") + } + url := api.rootURL + if opts.RootURL != "" { + url = opts.RootURL + } + if url == "" { + return nil, errors.New("RootURL not set") + } + url += opts.Path + if opts.Parameters != nil && len(opts.Parameters) > 0 { + url += "?" + opts.Parameters.Encode() + } + body := readers.NoCloser(opts.Body) + // If length is set and zero then nil out the body to stop use + // use of chunked encoding and insert a "Content-Length: 0" + // header. + // + // If we don't do this we get "Content-Length" headers for all + // files except 0 length files. + if opts.ContentLength != nil && *opts.ContentLength == 0 { + body = nil + } + req, err := http.NewRequest(opts.Method, url, body) + if err != nil { + return + } + req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext + headers := make(map[string]string) + // Set default headers + for k, v := range api.headers { + headers[k] = v + } + if opts.ContentType != "" { + headers["Content-Type"] = opts.ContentType + } + if opts.ContentLength != nil { + req.ContentLength = *opts.ContentLength + } + if opts.ContentRange != "" { + headers["Content-Range"] = opts.ContentRange + } + if len(opts.TransferEncoding) != 0 { + req.TransferEncoding = opts.TransferEncoding + } + if opts.Close { + req.Close = true + } + // Set any extra headers + if opts.ExtraHeaders != nil { + for k, v := range opts.ExtraHeaders { + headers[k] = v + } + } + // add any options to the headers + fs.OpenOptionAddHeaders(opts.Options, headers) + // Now set the headers + for k, v := range headers { + if k != "" && v != "" { + if k[0] == '*' { + // Add non-canonical version if header starts with * + k = k[1:] + req.Header[k] = append(req.Header[k], v) + } else { + req.Header.Add(k, v) + } + } + } + + if opts.UserName != "" || opts.Password != "" { + req.SetBasicAuth(opts.UserName, opts.Password) + } + var c *http.Client + if opts.NoRedirect { + c = ClientWithNoRedirects(api.c) + } else { + c = api.c + } + if api.signer != nil { + api.mu.RUnlock() + err = api.signer(req) + api.mu.RLock() + if err != nil { + return nil, errors.Wrap(err, "signer failed") + } + } + api.mu.RUnlock() + resp, err = c.Do(req) + api.mu.RLock() + if err != nil { + return nil, err + } + if !opts.IgnoreStatus { + if resp.StatusCode < 200 || resp.StatusCode > 299 { + err = api.errorHandler(resp) + if err.Error() == "" { + // replace empty errors with something + err = errors.Errorf("http error %d: %v", resp.StatusCode, resp.Status) + } + return resp, err + } + } + if opts.NoResponse { + return resp, resp.Body.Close() + } + return resp, nil +} + +// MultipartUpload creates an io.Reader which produces an encoded a +// multipart form upload from the params passed in and the passed in +// +// in - the body of the file (may be nil) +// params - the form parameters +// fileName - is the name of the attached file +// contentName - the name of the parameter for the file +// +// the int64 returned is the overhead in addition to the file contents, in case Content-Length is required +// +// NB This doesn't allow setting the content type of the attachment +func MultipartUpload(in io.Reader, params url.Values, contentName, fileName string) (io.ReadCloser, string, int64, error) { + bodyReader, bodyWriter := io.Pipe() + writer := multipart.NewWriter(bodyWriter) + contentType := writer.FormDataContentType() + + // Create a Multipart Writer as base for calculating the Content-Length + buf := &bytes.Buffer{} + dummyMultipartWriter := multipart.NewWriter(buf) + err := dummyMultipartWriter.SetBoundary(writer.Boundary()) + if err != nil { + return nil, "", 0, err + } + + for key, vals := range params { + for _, val := range vals { + err := dummyMultipartWriter.WriteField(key, val) + if err != nil { + return nil, "", 0, err + } + } + } + if in != nil { + _, err = dummyMultipartWriter.CreateFormFile(contentName, fileName) + if err != nil { + return nil, "", 0, err + } + } + + err = dummyMultipartWriter.Close() + if err != nil { + return nil, "", 0, err + } + + multipartLength := int64(buf.Len()) + + // Pump the data in the background + go func() { + var err error + + for key, vals := range params { + for _, val := range vals { + err = writer.WriteField(key, val) + if err != nil { + _ = bodyWriter.CloseWithError(errors.Wrap(err, "create metadata part")) + return + } + } + } + + if in != nil { + part, err := writer.CreateFormFile(contentName, fileName) + if err != nil { + _ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to create form file")) + return + } + + _, err = io.Copy(part, in) + if err != nil { + _ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to copy data")) + return + } + } + + err = writer.Close() + if err != nil { + _ = bodyWriter.CloseWithError(errors.Wrap(err, "failed to close form")) + return + } + + _ = bodyWriter.Close() + }() + + return bodyReader, contentType, multipartLength, nil +} + +// CallJSON runs Call and decodes the body as a JSON object into response (if not nil) +// +// If request is not nil then it will be JSON encoded as the body of the request +// +// If response is not nil then the response will be JSON decoded into +// it and resp.Body will be closed. +// +// If response is nil then the resp.Body will be closed only if +// opts.NoResponse is set. +// +// If (opts.MultipartParams or opts.MultipartContentName) and +// opts.Body are set then CallJSON will do a multipart upload with a +// file attached. opts.MultipartContentName is the name of the +// parameter and opts.MultipartFileName is the name of the file. If +// MultpartContentName is set, and request != nil is supplied, then +// the request will be marshalled into JSON and added to the form with +// parameter name MultipartMetadataName. +// +// It will return resp if at all possible, even if err is set +func (api *Client) CallJSON(ctx context.Context, opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) { + return api.callCodec(ctx, opts, request, response, json.Marshal, DecodeJSON, "application/json") +} + +// CallXML runs Call and decodes the body as an XML object into response (if not nil) +// +// If request is not nil then it will be XML encoded as the body of the request +// +// If response is not nil then the response will be XML decoded into +// it and resp.Body will be closed. +// +// If response is nil then the resp.Body will be closed only if +// opts.NoResponse is set. +// +// See CallJSON for a description of MultipartParams and related opts +// +// It will return resp if at all possible, even if err is set +func (api *Client) CallXML(ctx context.Context, opts *Opts, request interface{}, response interface{}) (resp *http.Response, err error) { + return api.callCodec(ctx, opts, request, response, xml.Marshal, DecodeXML, "application/xml") +} + +type marshalFn func(v interface{}) ([]byte, error) +type decodeFn func(resp *http.Response, result interface{}) (err error) + +func (api *Client) callCodec(ctx context.Context, opts *Opts, request interface{}, response interface{}, marshal marshalFn, decode decodeFn, contentType string) (resp *http.Response, err error) { + var requestBody []byte + // Marshal the request if given + if request != nil { + requestBody, err = marshal(request) + if err != nil { + return nil, err + } + // Set the body up as a marshalled object if no body passed in + if opts.Body == nil { + opts = opts.Copy() + opts.ContentType = contentType + opts.Body = bytes.NewBuffer(requestBody) + } + } + if opts.MultipartParams != nil || opts.MultipartContentName != "" { + params := opts.MultipartParams + if params == nil { + params = url.Values{} + } + if opts.MultipartMetadataName != "" { + params.Add(opts.MultipartMetadataName, string(requestBody)) + } + opts = opts.Copy() + + var overhead int64 + opts.Body, opts.ContentType, overhead, err = MultipartUpload(opts.Body, params, opts.MultipartContentName, opts.MultipartFileName) + if err != nil { + return nil, err + } + if opts.ContentLength != nil { + *opts.ContentLength += overhead + } + } + resp, err = api.Call(ctx, opts) + if err != nil { + return resp, err + } + // if opts.NoResponse is set, resp.Body will have been closed by Call() + if response == nil || opts.NoResponse { + return resp, nil + } + err = decode(resp, response) + return resp, err +} diff --git a/vendor/github.com/rclone/rclone/lib/rest/url.go b/vendor/github.com/rclone/rclone/lib/rest/url.go new file mode 100644 index 00000000000..07ce15958d9 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/rest/url.go @@ -0,0 +1,27 @@ +package rest + +import ( + "net/url" + + "github.com/pkg/errors" +) + +// URLJoin joins a URL and a path returning a new URL +// +// path should be URL escaped +func URLJoin(base *url.URL, path string) (*url.URL, error) { + rel, err := url.Parse(path) + if err != nil { + return nil, errors.Wrapf(err, "Error parsing %q as URL", path) + } + return base.ResolveReference(rel), nil +} + +// URLPathEscape escapes URL path the in string using URL escaping rules +// +// This mimics url.PathEscape which only available from go 1.8 +func URLPathEscape(in string) string { + var u url.URL + u.Path = in + return u.String() +} diff --git a/vendor/github.com/rclone/rclone/lib/structs/structs.go b/vendor/github.com/rclone/rclone/lib/structs/structs.go new file mode 100644 index 00000000000..9c6ee05b55f --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/structs/structs.go @@ -0,0 +1,57 @@ +// Package structs is for manipulating structures with reflection +package structs + +import ( + "reflect" +) + +// SetFrom sets the public members of a from b +// +// a and b should be pointers to structs +// +// a can be a different type from b +// +// Only the Fields which have the same name and assignable type on a +// and b will be set. +// +// This is useful for copying between almost identical structures that +// are frequently present in auto generated code for cloud storage +// interfaces. +func SetFrom(a, b interface{}) { + ta := reflect.TypeOf(a).Elem() + tb := reflect.TypeOf(b).Elem() + va := reflect.ValueOf(a).Elem() + vb := reflect.ValueOf(b).Elem() + for i := 0; i < tb.NumField(); i++ { + bField := vb.Field(i) + tbField := tb.Field(i) + name := tbField.Name + aField := va.FieldByName(name) + taField, found := ta.FieldByName(name) + if found && aField.IsValid() && bField.IsValid() && aField.CanSet() && tbField.Type.AssignableTo(taField.Type) { + aField.Set(bField) + } + } +} + +// SetDefaults for a from b +// +// a and b should be pointers to the same kind of struct +// +// This copies the public members only from b to a. This is useful if +// you can't just use a struct copy because it contains a private +// mutex, e.g. as http.Transport. +func SetDefaults(a, b interface{}) { + pt := reflect.TypeOf(a) + t := pt.Elem() + va := reflect.ValueOf(a).Elem() + vb := reflect.ValueOf(b).Elem() + for i := 0; i < t.NumField(); i++ { + aField := va.Field(i) + // Set a from b if it is public + if aField.CanSet() { + bField := vb.Field(i) + aField.Set(bField) + } + } +} diff --git a/vendor/github.com/rclone/rclone/lib/terminal/hidden_other.go b/vendor/github.com/rclone/rclone/lib/terminal/hidden_other.go new file mode 100644 index 00000000000..2a15b21b132 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/terminal/hidden_other.go @@ -0,0 +1,7 @@ +// +build !windows + +package terminal + +// HideConsole is only supported on windows +func HideConsole() { +} diff --git a/vendor/github.com/rclone/rclone/lib/terminal/hidden_windows.go b/vendor/github.com/rclone/rclone/lib/terminal/hidden_windows.go new file mode 100644 index 00000000000..0745e87ed60 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/terminal/hidden_windows.go @@ -0,0 +1,19 @@ +// +build windows + +package terminal + +import ( + "syscall" +) + +// HideConsole hides the console window and activates another window +func HideConsole() { + getConsoleWindow := syscall.NewLazyDLL("kernel32.dll").NewProc("GetConsoleWindow") + showWindow := syscall.NewLazyDLL("user32.dll").NewProc("ShowWindow") + if getConsoleWindow.Find() == nil && showWindow.Find() == nil { + hwnd, _, _ := getConsoleWindow.Call() + if hwnd != 0 { + showWindow.Call(hwnd, 0) + } + } +} diff --git a/vendor/github.com/rclone/rclone/lib/terminal/terminal.go b/vendor/github.com/rclone/rclone/lib/terminal/terminal.go new file mode 100644 index 00000000000..4eda1ababaa --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/terminal/terminal.go @@ -0,0 +1,103 @@ +// Package terminal provides VT100 terminal codes and a windows +// implementation of that. +package terminal + +import ( + "io" + "os" + "runtime" + "sync" + + colorable "github.com/mattn/go-colorable" +) + +// VT100 codes +const ( + EraseLine = "\x1b[2K" + MoveToStartOfLine = "\x1b[1G" + MoveUp = "\x1b[1A" + + Reset = "\x1b[0m" + Bright = "\x1b[1m" + Dim = "\x1b[2m" + Underscore = "\x1b[4m" + Blink = "\x1b[5m" + Reverse = "\x1b[7m" + Hidden = "\x1b[8m" + + BlackFg = "\x1b[30m" + RedFg = "\x1b[31m" + GreenFg = "\x1b[32m" + YellowFg = "\x1b[33m" + BlueFg = "\x1b[34m" + MagentaFg = "\x1b[35m" + CyanFg = "\x1b[36m" + WhiteFg = "\x1b[37m" + + BlackBg = "\x1b[40m" + RedBg = "\x1b[41m" + GreenBg = "\x1b[42m" + YellowBg = "\x1b[43m" + BlueBg = "\x1b[44m" + MagentaBg = "\x1b[45m" + CyanBg = "\x1b[46m" + WhiteBg = "\x1b[47m" + + HiBlackFg = "\x1b[90m" + HiRedFg = "\x1b[91m" + HiGreenFg = "\x1b[92m" + HiYellowFg = "\x1b[93m" + HiBlueFg = "\x1b[94m" + HiMagentaFg = "\x1b[95m" + HiCyanFg = "\x1b[96m" + HiWhiteFg = "\x1b[97m" + + HiBlackBg = "\x1b[100m" + HiRedBg = "\x1b[101m" + HiGreenBg = "\x1b[102m" + HiYellowBg = "\x1b[103m" + HiBlueBg = "\x1b[104m" + HiMagentaBg = "\x1b[105m" + HiCyanBg = "\x1b[106m" + HiWhiteBg = "\x1b[107m" + + ChangeTitle = "\033]0;" + BEL = "\007" +) + +var ( + // make sure that start is only called once + once sync.Once +) + +// Start the terminal - must be called before use +func Start() { + once.Do(func() { + f := os.Stdout + if !IsTerminal(int(f.Fd())) { + // If stdout not a tty then remove escape codes + Out = colorable.NewNonColorable(f) + } else if runtime.GOOS == "windows" && os.Getenv("TERM") != "" { + // If TERM is set just use stdout + Out = f + } else { + Out = colorable.NewColorable(f) + } + }) +} + +// WriteString writes the string passed in to the terminal +func WriteString(s string) { + Write([]byte(s)) +} + +// Out is an io.Writer which can be used to write to the terminal +// e.g. for use with fmt.Fprintf(terminal.Out, "terminal fun: %d\n", n) +var Out io.Writer + +// Write sends out to the VT100 terminal. +// It will initialise the terminal if this is the first call. +func Write(out []byte) { + Start() + _, _ = Out.Write(out) +} diff --git a/vendor/github.com/rclone/rclone/lib/terminal/terminal_normal.go b/vendor/github.com/rclone/rclone/lib/terminal/terminal_normal.go new file mode 100644 index 00000000000..d8531ebf5e5 --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/terminal/terminal_normal.go @@ -0,0 +1,37 @@ +//+build !js + +package terminal + +import ( + "fmt" + "os" + + "golang.org/x/crypto/ssh/terminal" +) + +// GetSize reads the dimensions of the current terminal or returns a +// sensible default +func GetSize() (w, h int) { + w, h, err := terminal.GetSize(int(os.Stdout.Fd())) + if err != nil { + w, h = 80, 25 + } + return w, h +} + +// IsTerminal returns whether the fd passed in is a terminal or not +func IsTerminal(fd int) bool { + return terminal.IsTerminal(fd) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return terminal.ReadPassword(fd) +} + +// WriteTerminalTitle writes a string to the terminal title +func WriteTerminalTitle(title string) { + fmt.Printf(ChangeTitle + title + BEL) +} diff --git a/vendor/github.com/rclone/rclone/lib/terminal/terminal_unsupported.go b/vendor/github.com/rclone/rclone/lib/terminal/terminal_unsupported.go new file mode 100644 index 00000000000..d26fb6291bb --- /dev/null +++ b/vendor/github.com/rclone/rclone/lib/terminal/terminal_unsupported.go @@ -0,0 +1,28 @@ +//+build js + +package terminal + +import "errors" + +// GetSize reads the dimensions of the current terminal or returns a +// sensible default +func GetSize() (w, h int) { + return 80, 25 +} + +// IsTerminal returns whether the fd passed in is a terminal or not +func IsTerminal(fd int) bool { + return false +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, errors.New("can't read password") +} + +// WriteTerminalTitle writes a string to the terminal title +func WriteTerminalTitle(title string) { + // Since there's nothing to return, this is a NOOP +} diff --git a/vendor/github.com/rfjakob/eme/LICENSE b/vendor/github.com/rfjakob/eme/LICENSE new file mode 100644 index 00000000000..569ca028af0 --- /dev/null +++ b/vendor/github.com/rfjakob/eme/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Jakob Unterwurzacher + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rfjakob/eme/README.md b/vendor/github.com/rfjakob/eme/README.md new file mode 100644 index 00000000000..0db50da958b --- /dev/null +++ b/vendor/github.com/rfjakob/eme/README.md @@ -0,0 +1,116 @@ +EME for Go [![CI](https://github.com/rfjakob/eme/actions/workflows/ci.yml/badge.svg)](https://github.com/rfjakob/eme/actions/workflows/ci.yml) [![GoDoc](https://godoc.org/github.com/rfjakob/eme?status.svg)](https://godoc.org/github.com/rfjakob/eme) ![MIT License](https://img.shields.io/badge/license-MIT-blue.svg) +========== + +**EME** (ECB-Mix-ECB or, clearer, **Encrypt-Mix-Encrypt**) is a wide-block +encryption mode developed by Halevi +and Rogaway in 2003 [[eme]](#eme). + +EME uses multiple invocations of a block cipher to construct a new +cipher of bigger block size (in multiples of 16 bytes, up to 2048 bytes). + +Quoting from the original [[eme]](#eme) paper: + +> We describe a block-cipher mode of operation, EME, that turns an n-bit block cipher into +> a tweakable enciphering scheme that acts on strings of mn bits, where m ∈ [1..n]. The mode is +> parallelizable, but as serial-efficient as the non-parallelizable mode CMC [6]. EME can be used +> to solve the disk-sector encryption problem. The algorithm entails two layers of ECB encryption +> and a “lightweight mixing” in between. We prove EME secure, in the reduction-based sense of +> modern cryptography. + +Figure 2 from the [[eme]](#eme) paper shows an overview of the transformation: + +[![Figure 2 from [eme]](paper-eme-fig2.png)](#) + +This is an implementation of EME in Go, complete with test vectors from IEEE [[p1619-2]](#p1619-2) +and Halevi [[eme-32-testvec]](#eme-32-testvec). + +It has no dependencies outside the standard library. + +Is it patentend? +---------------- + +In 2007, the UC Davis has decided to abandon [[patabandon]](#patabandon) +the patent application [[patappl]](#patappl) for EME. + +Related algorithms +------------------ + +**EME-32** is EME with the cipher set to AES and the length set to 512. +That is, EME-32 [[eme-32-pdf]](#eme-32-pdf) is a subset of EME. + +**EME2**, also known as EME\* [[emestar]](#emestar), is an extended version of EME +that has built-in handling for data that is not a multiple of 16 bytes +long. +EME2 has been selected for standardization in IEEE P1619.2 [[p1619.2]](#p1619.2). + +References +---------- + +#### [eme] +*A Parallelizable Enciphering Mode* +Shai Halevi, Phillip Rogaway, 28 Jul 2003 +https://eprint.iacr.org/2003/147.pdf + +Note: This is the original EME paper. EME is specified for an arbitrary +number of block-cipher blocks. EME-32 is a concrete implementation of +EME with a fixed length of 32 AES blocks. + +#### [eme-32-email] +*Re: EME-32-AES with editorial comments* +Shai Halevi, 07 Jun 2005 +http://grouper.ieee.org/groups/1619/email/msg00310.html + +#### [eme-32-pdf] +*Draft Standard for Tweakable Wide-block Encryption* +Shai Halevi, 02 June 2005 +http://grouper.ieee.org/groups/1619/email/pdf00020.pdf + +Note: This is the latest version of the EME-32 draft that I could find. It +includes test vectors and C source code. + +#### [eme-32-testvec] +*Re: Test vectors for LRW and EME* +Shai Halevi, 16 Nov 2004 +http://grouper.ieee.org/groups/1619/email/msg00218.html + +#### [emestar] +*EME\*: extending EME to handle arbitrary-length messages with associated data* +Shai Halevi, 27 May 2004 +https://eprint.iacr.org/2004/125.pdf + +#### [patabandon] +*Re: [P1619-2] Non-awareness patent statement made by UC Davis* +Mat Ball, 26 Nov 2007 +http://grouper.ieee.org/groups/1619/email-2/msg00005.html + +#### [patappl] +*Block cipher mode of operation for constructing a wide-blocksize block cipher from a conventional block cipher* +US patent application US20040131182 +http://www.google.com/patents/US20040131182 + +#### [p1619-2] +*IEEE P1619.2™/D9 Draft Standard for Wide-Block Encryption for Shared Storage Media* +IEEE, Dec 2008 +http://siswg.net/index2.php?option=com_docman&task=doc_view&gid=156&Itemid=41 + +Note: This is a draft version. The final version is not freely available +and must be bought from IEEE. + +Package Changelog +----------------- + +v1.1.2, 2021-06-27 +* Add `go.mod` file +* Switch from Travis CI to Github Actions +* No code changes + +v1.1.1, 2020-04-13 +* Update `go vet` call in `test.bash` to work on recent Go versions +* No code changes + +v1.1, 2017-03-05 +* Add eme.New() / \*EMECipher convenience wrapper +* Improve panic message and parameter wording + +v1.0, 2015-12-08 +* Stable release diff --git a/vendor/github.com/rfjakob/eme/benchmark.bash b/vendor/github.com/rfjakob/eme/benchmark.bash new file mode 100644 index 00000000000..8045e6a8dd0 --- /dev/null +++ b/vendor/github.com/rfjakob/eme/benchmark.bash @@ -0,0 +1,3 @@ +#!/bin/bash -eu + +go test -bench=. diff --git a/vendor/github.com/rfjakob/eme/eme.go b/vendor/github.com/rfjakob/eme/eme.go new file mode 100644 index 00000000000..a05a191fc9a --- /dev/null +++ b/vendor/github.com/rfjakob/eme/eme.go @@ -0,0 +1,206 @@ +// EME (ECB-Mix-ECB or, clearer, Encrypt-Mix-Encrypt) is a wide-block +// encryption mode developed by Halevi and Rogaway. +// +// It was presented in the 2003 paper "A Parallelizable Enciphering Mode" by +// Halevi and Rogaway. +// +// EME uses multiple invocations of a block cipher to construct a new cipher +// of bigger block size (in multiples of 16 bytes, up to 2048 bytes). +package eme + +import ( + "crypto/cipher" + "log" +) + +type directionConst bool + +const ( + // Encrypt "inputData" + DirectionEncrypt = directionConst(true) + // Decrypt "inputData" + DirectionDecrypt = directionConst(false) +) + +// multByTwo - GF multiplication as specified in the EME-32 draft +func multByTwo(out []byte, in []byte) { + if len(in) != 16 { + panic("len must be 16") + } + tmp := make([]byte, 16) + + tmp[0] = 2 * in[0] + if in[15] >= 128 { + tmp[0] = tmp[0] ^ 135 + } + for j := 1; j < 16; j++ { + tmp[j] = 2 * in[j] + if in[j-1] >= 128 { + tmp[j] += 1 + } + } + copy(out, tmp) +} + +func xorBlocks(out []byte, in1 []byte, in2 []byte) { + if len(in1) != len(in2) { + log.Panicf("len(in1)=%d is not equal to len(in2)=%d", len(in1), len(in2)) + } + + for i := range in1 { + out[i] = in1[i] ^ in2[i] + } +} + +// aesTransform - encrypt or decrypt (according to "direction") using block +// cipher "bc" (typically AES) +func aesTransform(dst []byte, src []byte, direction directionConst, bc cipher.Block) { + if direction == DirectionEncrypt { + bc.Encrypt(dst, src) + return + } else if direction == DirectionDecrypt { + bc.Decrypt(dst, src) + return + } +} + +// tabulateL - calculate L_i for messages up to a length of m cipher blocks +func tabulateL(bc cipher.Block, m int) [][]byte { + /* set L0 = 2*AESenc(K; 0) */ + eZero := make([]byte, 16) + Li := make([]byte, 16) + bc.Encrypt(Li, eZero) + + LTable := make([][]byte, m) + // Allocate pool once and slice into m pieces in the loop + pool := make([]byte, m*16) + for i := 0; i < m; i++ { + multByTwo(Li, Li) + LTable[i] = pool[i*16 : (i+1)*16] + copy(LTable[i], Li) + } + return LTable +} + +// Transform - EME-encrypt or EME-decrypt, according to "direction" +// (defined in the constants DirectionEncrypt and DirectionDecrypt). +// The data in "inputData" is en- or decrypted with the block ciper "bc" under +// "tweak" (also known as IV). +// +// The tweak is used to randomize the encryption in the same way as an +// IV. A use of this encryption mode envisioned by the authors of the +// algorithm was to encrypt each sector of a disk, with the tweak +// being the sector number. If you encipher the same data with the +// same tweak you will get the same ciphertext. +// +// The result is returned in a freshly allocated slice of the same +// size as inputData. +// +// Limitations: +// * The block cipher must have block size 16 (usually AES). +// * The size of "tweak" must be 16 +// * "inputData" must be a multiple of 16 bytes long +// If any of these pre-conditions are not met, the function will panic. +// +// Note that you probably don't want to call this function directly and instead +// use eme.New(), which provides conventient wrappers. +func Transform(bc cipher.Block, tweak []byte, inputData []byte, direction directionConst) []byte { + // In the paper, the tweak is just called "T". Call it the same here to + // make following the paper easy. + T := tweak + // In the paper, the plaintext data is called "P" and the ciphertext is + // called "C". Because encryption and decryption are virtually identical, + // we share the code and always call the input data "P" and the output data + // "C", regardless of the direction. + P := inputData + + if bc.BlockSize() != 16 { + log.Panicf("Using a block size other than 16 is not implemented") + } + if len(T) != 16 { + log.Panicf("Tweak must be 16 bytes long, is %d", len(T)) + } + if len(P)%16 != 0 { + log.Panicf("Data P must be a multiple of 16 long, is %d", len(P)) + } + m := len(P) / 16 + if m == 0 || m > 16*8 { + log.Panicf("EME operates on 1 to %d block-cipher blocks, you passed %d", 16*8, m) + } + + C := make([]byte, len(P)) + + LTable := tabulateL(bc, m) + + PPj := make([]byte, 16) + for j := 0; j < m; j++ { + Pj := P[j*16 : (j+1)*16] + /* PPj = 2**(j-1)*L xor Pj */ + xorBlocks(PPj, Pj, LTable[j]) + /* PPPj = AESenc(K; PPj) */ + aesTransform(C[j*16:(j+1)*16], PPj, direction, bc) + } + + /* MP =(xorSum PPPj) xor T */ + MP := make([]byte, 16) + xorBlocks(MP, C[0:16], T) + for j := 1; j < m; j++ { + xorBlocks(MP, MP, C[j*16:(j+1)*16]) + } + + /* MC = AESenc(K; MP) */ + MC := make([]byte, 16) + aesTransform(MC, MP, direction, bc) + + /* M = MP xor MC */ + M := make([]byte, 16) + xorBlocks(M, MP, MC) + CCCj := make([]byte, 16) + for j := 1; j < m; j++ { + multByTwo(M, M) + /* CCCj = 2**(j-1)*M xor PPPj */ + xorBlocks(CCCj, C[j*16:(j+1)*16], M) + copy(C[j*16:(j+1)*16], CCCj) + } + + /* CCC1 = (xorSum CCCj) xor T xor MC */ + CCC1 := make([]byte, 16) + xorBlocks(CCC1, MC, T) + for j := 1; j < m; j++ { + xorBlocks(CCC1, CCC1, C[j*16:(j+1)*16]) + } + copy(C[0:16], CCC1) + + for j := 0; j < m; j++ { + /* CCj = AES-enc(K; CCCj) */ + aesTransform(C[j*16:(j+1)*16], C[j*16:(j+1)*16], direction, bc) + /* Cj = 2**(j-1)*L xor CCj */ + xorBlocks(C[j*16:(j+1)*16], C[j*16:(j+1)*16], LTable[j]) + } + + return C +} + +// EMECipher provides EME-Encryption and -Decryption functions that are more +// convenient than calling Transform directly. +type EMECipher struct { + bc cipher.Block +} + +// New returns a new EMECipher object. "bc" must have a block size of 16, +// or subsequent calls to Encrypt and Decrypt will panic. +func New(bc cipher.Block) *EMECipher { + return &EMECipher{ + bc: bc, + } +} + +// Encrypt is equivalent to calling Transform with direction=DirectionEncrypt. +func (e *EMECipher) Encrypt(tweak []byte, inputData []byte) []byte { + return Transform(e.bc, tweak, inputData, DirectionEncrypt) +} + +// Decrypt is equivalent to calling Transform with direction=DirectionDecrypt. +func (e *EMECipher) Decrypt(tweak []byte, inputData []byte) []byte { + return Transform(e.bc, tweak, inputData, DirectionDecrypt) +} diff --git a/vendor/github.com/rfjakob/eme/paper-eme-fig2.png b/vendor/github.com/rfjakob/eme/paper-eme-fig2.png new file mode 100644 index 00000000000..c59c7c10cc5 Binary files /dev/null and b/vendor/github.com/rfjakob/eme/paper-eme-fig2.png differ diff --git a/vendor/github.com/rfjakob/eme/test.bash b/vendor/github.com/rfjakob/eme/test.bash new file mode 100644 index 00000000000..876b4caa7c6 --- /dev/null +++ b/vendor/github.com/rfjakob/eme/test.bash @@ -0,0 +1,5 @@ +#!/bin/bash -eu + +go build +go test -v "$@" +go vet -all . diff --git a/vendor/github.com/robfig/cron/v3/.gitignore b/vendor/github.com/robfig/cron/v3/.gitignore new file mode 100644 index 00000000000..00268614f04 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/robfig/cron/v3/.travis.yml b/vendor/github.com/robfig/cron/v3/.travis.yml new file mode 100644 index 00000000000..4f2ee4d9733 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/robfig/cron/v3/LICENSE b/vendor/github.com/robfig/cron/v3/LICENSE new file mode 100644 index 00000000000..3a0f627ffeb --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2012 Rob Figueiredo +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/robfig/cron/v3/README.md b/vendor/github.com/robfig/cron/v3/README.md new file mode 100644 index 00000000000..984c537c014 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/README.md @@ -0,0 +1,125 @@ +[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) +[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) + +# cron + +Cron V3 has been released! + +To download the specific tagged release, run: + + go get github.com/robfig/cron/v3@v3.0.0 + +Import it in your program as: + + import "github.com/robfig/cron/v3" + +It requires Go 1.11 or later due to usage of Go Modules. + +Refer to the documentation here: +http://godoc.org/github.com/robfig/cron + +The rest of this document describes the the advances in v3 and a list of +breaking changes for users that wish to upgrade from an earlier version. + +## Upgrading to v3 (June 2019) + +cron v3 is a major upgrade to the library that addresses all outstanding bugs, +feature requests, and rough edges. It is based on a merge of master which +contains various fixes to issues found over the years and the v2 branch which +contains some backwards-incompatible features like the ability to remove cron +jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like +the timezone support, and fixes a number of bugs. + +New features: + +- Support for Go modules. Callers must now import this library as + `github.com/robfig/cron/v3`, instead of `gopkg.in/...` + +- Fixed bugs: + - 0f01e6b parser: fix combining of Dow and Dom (#70) + - dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157) + - eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144) + - 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97) + - 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206) + +- Standard cron spec parsing by default (first field is "minute"), with an easy + way to opt into the seconds field (quartz-compatible). Although, note that the + year field (optional in Quartz) is not supported. + +- Extensible, key/value logging via an interface that complies with + the https://github.com/go-logr/logr project. + +- The new Chain & JobWrapper types allow you to install "interceptors" to add + cross-cutting behavior like the following: + - Recover any panics from jobs + - Delay a job's execution if the previous run hasn't completed yet + - Skip a job's execution if the previous run hasn't completed yet + - Log each job's invocations + - Notification when jobs are completed + +It is backwards incompatible with both v1 and v2. These updates are required: + +- The v1 branch accepted an optional seconds field at the beginning of the cron + spec. This is non-standard and has led to a lot of confusion. The new default + parser conforms to the standard as described by [the Cron wikipedia page]. + + UPDATING: To retain the old behavior, construct your Cron with a custom + parser: + + // Seconds field, required + cron.New(cron.WithSeconds()) + + // Seconds field, optional + cron.New( + cron.WithParser( + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)) + +- The Cron type now accepts functional options on construction rather than the + previous ad-hoc behavior modification mechanisms (setting a field, calling a setter). + + UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be + updated to provide those values on construction. + +- CRON_TZ is now the recommended way to specify the timezone of a single + schedule, which is sanctioned by the specification. The legacy "TZ=" prefix + will continue to be supported since it is unambiguous and easy to do so. + + UPDATING: No update is required. + +- By default, cron will no longer recover panics in jobs that it runs. + Recovering can be surprising (see issue #192) and seems to be at odds with + typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option + has been removed to accommodate the more general JobWrapper type. + + UPDATING: To opt into panic recovery and configure the panic logger: + + cron.New(cron.WithChain( + cron.Recover(logger), // or use cron.DefaultLogger + )) + +- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was + removed, since it is duplicative with the leveled logging. + + UPDATING: Callers should use `WithLogger` and specify a logger that does not + discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`: + + cron.New( + cron.WithLogger(cron.VerbosePrintfLogger(logger))) + + +### Background - Cron spec format + +There are two cron spec formats in common usage: + +- The "standard" cron format, described on [the Cron wikipedia page] and used by + the cron Linux system utility. + +- The cron format used by [the Quartz Scheduler], commonly used for scheduled + jobs in Java software + +[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron +[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html + +The original version of this package included an optional "seconds" field, which +made it incompatible with both of these formats. Now, the "standard" format is +the default format accepted, and the Quartz format is opt-in. diff --git a/vendor/github.com/robfig/cron/v3/chain.go b/vendor/github.com/robfig/cron/v3/chain.go new file mode 100644 index 00000000000..9565b418e0e --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/chain.go @@ -0,0 +1,92 @@ +package cron + +import ( + "fmt" + "runtime" + "sync" + "time" +) + +// JobWrapper decorates the given Job with some behavior. +type JobWrapper func(Job) Job + +// Chain is a sequence of JobWrappers that decorates submitted jobs with +// cross-cutting behaviors like logging or synchronization. +type Chain struct { + wrappers []JobWrapper +} + +// NewChain returns a Chain consisting of the given JobWrappers. +func NewChain(c ...JobWrapper) Chain { + return Chain{c} +} + +// Then decorates the given job with all JobWrappers in the chain. +// +// This: +// NewChain(m1, m2, m3).Then(job) +// is equivalent to: +// m1(m2(m3(job))) +func (c Chain) Then(j Job) Job { + for i := range c.wrappers { + j = c.wrappers[len(c.wrappers)-i-1](j) + } + return j +} + +// Recover panics in wrapped jobs and log them with the provided logger. +func Recover(logger Logger) JobWrapper { + return func(j Job) Job { + return FuncJob(func() { + defer func() { + if r := recover(); r != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + err, ok := r.(error) + if !ok { + err = fmt.Errorf("%v", r) + } + logger.Error(err, "panic", "stack", "...\n"+string(buf)) + } + }() + j.Run() + }) + } +} + +// DelayIfStillRunning serializes jobs, delaying subsequent runs until the +// previous one is complete. Jobs running after a delay of more than a minute +// have the delay logged at Info. +func DelayIfStillRunning(logger Logger) JobWrapper { + return func(j Job) Job { + var mu sync.Mutex + return FuncJob(func() { + start := time.Now() + mu.Lock() + defer mu.Unlock() + if dur := time.Since(start); dur > time.Minute { + logger.Info("delay", "duration", dur) + } + j.Run() + }) + } +} + +// SkipIfStillRunning skips an invocation of the Job if a previous invocation is +// still running. It logs skips to the given logger at Info level. +func SkipIfStillRunning(logger Logger) JobWrapper { + return func(j Job) Job { + var ch = make(chan struct{}, 1) + ch <- struct{}{} + return FuncJob(func() { + select { + case v := <-ch: + j.Run() + ch <- v + default: + logger.Info("skip") + } + }) + } +} diff --git a/vendor/github.com/robfig/cron/v3/constantdelay.go b/vendor/github.com/robfig/cron/v3/constantdelay.go new file mode 100644 index 00000000000..cd6e7b1be91 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/constantdelay.go @@ -0,0 +1,27 @@ +package cron + +import "time" + +// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". +// It does not support jobs more frequent than once a second. +type ConstantDelaySchedule struct { + Delay time.Duration +} + +// Every returns a crontab Schedule that activates once every duration. +// Delays of less than a second are not supported (will round up to 1 second). +// Any fields less than a Second are truncated. +func Every(duration time.Duration) ConstantDelaySchedule { + if duration < time.Second { + duration = time.Second + } + return ConstantDelaySchedule{ + Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, + } +} + +// Next returns the next time this should be run. +// This rounds so that the next activation time will be on the second. +func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { + return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) +} diff --git a/vendor/github.com/robfig/cron/v3/cron.go b/vendor/github.com/robfig/cron/v3/cron.go new file mode 100644 index 00000000000..c7e91766589 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/cron.go @@ -0,0 +1,355 @@ +package cron + +import ( + "context" + "sort" + "sync" + "time" +) + +// Cron keeps track of any number of entries, invoking the associated func as +// specified by the schedule. It may be started, stopped, and the entries may +// be inspected while running. +type Cron struct { + entries []*Entry + chain Chain + stop chan struct{} + add chan *Entry + remove chan EntryID + snapshot chan chan []Entry + running bool + logger Logger + runningMu sync.Mutex + location *time.Location + parser ScheduleParser + nextID EntryID + jobWaiter sync.WaitGroup +} + +// ScheduleParser is an interface for schedule spec parsers that return a Schedule +type ScheduleParser interface { + Parse(spec string) (Schedule, error) +} + +// Job is an interface for submitted cron jobs. +type Job interface { + Run() +} + +// Schedule describes a job's duty cycle. +type Schedule interface { + // Next returns the next activation time, later than the given time. + // Next is invoked initially, and then each time the job is run. + Next(time.Time) time.Time +} + +// EntryID identifies an entry within a Cron instance +type EntryID int + +// Entry consists of a schedule and the func to execute on that schedule. +type Entry struct { + // ID is the cron-assigned ID of this entry, which may be used to look up a + // snapshot or remove it. + ID EntryID + + // Schedule on which this job should be run. + Schedule Schedule + + // Next time the job will run, or the zero time if Cron has not been + // started or this entry's schedule is unsatisfiable + Next time.Time + + // Prev is the last time this job was run, or the zero time if never. + Prev time.Time + + // WrappedJob is the thing to run when the Schedule is activated. + WrappedJob Job + + // Job is the thing that was submitted to cron. + // It is kept around so that user code that needs to get at the job later, + // e.g. via Entries() can do so. + Job Job +} + +// Valid returns true if this is not the zero entry. +func (e Entry) Valid() bool { return e.ID != 0 } + +// byTime is a wrapper for sorting the entry array by time +// (with zero time at the end). +type byTime []*Entry + +func (s byTime) Len() int { return len(s) } +func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTime) Less(i, j int) bool { + // Two zero times should return false. + // Otherwise, zero is "greater" than any other time. + // (To sort it at the end of the list.) + if s[i].Next.IsZero() { + return false + } + if s[j].Next.IsZero() { + return true + } + return s[i].Next.Before(s[j].Next) +} + +// New returns a new Cron job runner, modified by the given options. +// +// Available Settings +// +// Time Zone +// Description: The time zone in which schedules are interpreted +// Default: time.Local +// +// Parser +// Description: Parser converts cron spec strings into cron.Schedules. +// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron +// +// Chain +// Description: Wrap submitted jobs to customize behavior. +// Default: A chain that recovers panics and logs them to stderr. +// +// See "cron.With*" to modify the default behavior. +func New(opts ...Option) *Cron { + c := &Cron{ + entries: nil, + chain: NewChain(), + add: make(chan *Entry), + stop: make(chan struct{}), + snapshot: make(chan chan []Entry), + remove: make(chan EntryID), + running: false, + runningMu: sync.Mutex{}, + logger: DefaultLogger, + location: time.Local, + parser: standardParser, + } + for _, opt := range opts { + opt(c) + } + return c +} + +// FuncJob is a wrapper that turns a func() into a cron.Job +type FuncJob func() + +func (f FuncJob) Run() { f() } + +// AddFunc adds a func to the Cron to be run on the given schedule. +// The spec is parsed using the time zone of this Cron instance as the default. +// An opaque ID is returned that can be used to later remove it. +func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) { + return c.AddJob(spec, FuncJob(cmd)) +} + +// AddJob adds a Job to the Cron to be run on the given schedule. +// The spec is parsed using the time zone of this Cron instance as the default. +// An opaque ID is returned that can be used to later remove it. +func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) { + schedule, err := c.parser.Parse(spec) + if err != nil { + return 0, err + } + return c.Schedule(schedule, cmd), nil +} + +// Schedule adds a Job to the Cron to be run on the given schedule. +// The job is wrapped with the configured Chain. +func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID { + c.runningMu.Lock() + defer c.runningMu.Unlock() + c.nextID++ + entry := &Entry{ + ID: c.nextID, + Schedule: schedule, + WrappedJob: c.chain.Then(cmd), + Job: cmd, + } + if !c.running { + c.entries = append(c.entries, entry) + } else { + c.add <- entry + } + return entry.ID +} + +// Entries returns a snapshot of the cron entries. +func (c *Cron) Entries() []Entry { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + replyChan := make(chan []Entry, 1) + c.snapshot <- replyChan + return <-replyChan + } + return c.entrySnapshot() +} + +// Location gets the time zone location +func (c *Cron) Location() *time.Location { + return c.location +} + +// Entry returns a snapshot of the given entry, or nil if it couldn't be found. +func (c *Cron) Entry(id EntryID) Entry { + for _, entry := range c.Entries() { + if id == entry.ID { + return entry + } + } + return Entry{} +} + +// Remove an entry from being run in the future. +func (c *Cron) Remove(id EntryID) { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + c.remove <- id + } else { + c.removeEntry(id) + } +} + +// Start the cron scheduler in its own goroutine, or no-op if already started. +func (c *Cron) Start() { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + return + } + c.running = true + go c.run() +} + +// Run the cron scheduler, or no-op if already running. +func (c *Cron) Run() { + c.runningMu.Lock() + if c.running { + c.runningMu.Unlock() + return + } + c.running = true + c.runningMu.Unlock() + c.run() +} + +// run the scheduler.. this is private just due to the need to synchronize +// access to the 'running' state variable. +func (c *Cron) run() { + c.logger.Info("start") + + // Figure out the next activation times for each entry. + now := c.now() + for _, entry := range c.entries { + entry.Next = entry.Schedule.Next(now) + c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next) + } + + for { + // Determine the next entry to run. + sort.Sort(byTime(c.entries)) + + var timer *time.Timer + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + timer = time.NewTimer(100000 * time.Hour) + } else { + timer = time.NewTimer(c.entries[0].Next.Sub(now)) + } + + for { + select { + case now = <-timer.C: + now = now.In(c.location) + c.logger.Info("wake", "now", now) + + // Run every entry whose next time was less than now + for _, e := range c.entries { + if e.Next.After(now) || e.Next.IsZero() { + break + } + c.startJob(e.WrappedJob) + e.Prev = e.Next + e.Next = e.Schedule.Next(now) + c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next) + } + + case newEntry := <-c.add: + timer.Stop() + now = c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next) + + case replyChan := <-c.snapshot: + replyChan <- c.entrySnapshot() + continue + + case <-c.stop: + timer.Stop() + c.logger.Info("stop") + return + + case id := <-c.remove: + timer.Stop() + now = c.now() + c.removeEntry(id) + c.logger.Info("removed", "entry", id) + } + + break + } + } +} + +// startJob runs the given job in a new goroutine. +func (c *Cron) startJob(j Job) { + c.jobWaiter.Add(1) + go func() { + defer c.jobWaiter.Done() + j.Run() + }() +} + +// now returns current time in c location +func (c *Cron) now() time.Time { + return time.Now().In(c.location) +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +// A context is returned so the caller can wait for running jobs to complete. +func (c *Cron) Stop() context.Context { + c.runningMu.Lock() + defer c.runningMu.Unlock() + if c.running { + c.stop <- struct{}{} + c.running = false + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + c.jobWaiter.Wait() + cancel() + }() + return ctx +} + +// entrySnapshot returns a copy of the current cron entry list. +func (c *Cron) entrySnapshot() []Entry { + var entries = make([]Entry, len(c.entries)) + for i, e := range c.entries { + entries[i] = *e + } + return entries +} + +func (c *Cron) removeEntry(id EntryID) { + var entries []*Entry + for _, e := range c.entries { + if e.ID != id { + entries = append(entries, e) + } + } + c.entries = entries +} diff --git a/vendor/github.com/robfig/cron/v3/doc.go b/vendor/github.com/robfig/cron/v3/doc.go new file mode 100644 index 00000000000..fa5d08b4dbb --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/doc.go @@ -0,0 +1,231 @@ +/* +Package cron implements a cron spec parser and job runner. + +Installation + +To download the specific tagged release, run: + + go get github.com/robfig/cron/v3@v3.0.0 + +Import it in your program as: + + import "github.com/robfig/cron/v3" + +It requires Go 1.11 or later due to usage of Go Modules. + +Usage + +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. + + c := cron.New() + c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") }) + c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") }) + c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") }) + c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") }) + c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") }) + c.Start() + .. + // Funcs are invoked in their own goroutine, asynchronously. + ... + // Funcs may also be added to a running Cron + c.AddFunc("@daily", func() { fmt.Println("Every day") }) + .. + // Inspect the cron job entries' next and previous run times. + inspect(c.Entries()) + .. + c.Stop() // Stop the scheduler (does not stop any jobs already running). + +CRON Expression Format + +A cron expression represents a set of times, using 5 space-separated fields. + + Field name | Mandatory? | Allowed values | Allowed special characters + ---------- | ---------- | -------------- | -------------------------- + Minutes | Yes | 0-59 | * / , - + Hours | Yes | 0-23 | * / , - + Day of month | Yes | 1-31 | * / , - ? + Month | Yes | 1-12 or JAN-DEC | * / , - + Day of week | Yes | 0-6 or SUN-SAT | * / , - ? + +Month and Day-of-week field values are case insensitive. "SUN", "Sun", and +"sun" are equally accepted. + +The specific interpretation of the format is based on the Cron Wikipedia page: +https://en.wikipedia.org/wiki/Cron + +Alternative Formats + +Alternative Cron expression formats support other fields like seconds. You can +implement that by creating a custom Parser as follows. + + cron.New( + cron.WithParser( + cron.NewParser( + cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))) + +Since adding Seconds is the most common modification to the standard cron spec, +cron provides a builtin function to do that, which is equivalent to the custom +parser you saw earlier, except that its seconds field is REQUIRED: + + cron.New(cron.WithSeconds()) + +That emulates Quartz, the most popular alternative Cron schedule format: +http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html + +Special Characters + +Asterisk ( * ) + +The asterisk indicates that the cron expression will match for all values of the +field; e.g., using an asterisk in the 5th field (month) would indicate every +month. + +Slash ( / ) + +Slashes are used to describe increments of ranges. For example 3-59/15 in the +1st field (minutes) would indicate the 3rd minute of the hour and every 15 +minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", +that is, an increment over the largest possible range of the field. The form +"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the +increment until the end of that specific range. It does not wrap around. + +Comma ( , ) + +Commas are used to separate items of a list. For example, using "MON,WED,FRI" in +the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. + +Hyphen ( - ) + +Hyphens are used to define ranges. For example, 9-17 would indicate every +hour between 9am and 5pm inclusive. + +Question mark ( ? ) + +Question mark may be used instead of '*' for leaving either day-of-month or +day-of-week blank. + +Predefined schedules + +You may use one of several pre-defined schedules in place of a cron expression. + + Entry | Description | Equivalent To + ----- | ----------- | ------------- + @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 * + @monthly | Run once a month, midnight, first of month | 0 0 1 * * + @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0 + @daily (or @midnight) | Run once a day, midnight | 0 0 * * * + @hourly | Run once an hour, beginning of hour | 0 * * * * + +Intervals + +You may also schedule a job to execute at fixed intervals, starting at the time it's added +or cron is run. This is supported by formatting the cron spec like this: + + @every + +where "duration" is a string accepted by time.ParseDuration +(http://golang.org/pkg/time/#ParseDuration). + +For example, "@every 1h30m10s" would indicate a schedule that activates after +1 hour, 30 minutes, 10 seconds, and then every interval after that. + +Note: The interval does not take the job runtime into account. For example, +if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, +it will have only 2 minutes of idle time between each run. + +Time zones + +By default, all interpretation and scheduling is done in the machine's local +time zone (time.Local). You can specify a different time zone on construction: + + cron.New( + cron.WithLocation(time.UTC)) + +Individual cron schedules may also override the time zone they are to be +interpreted in by providing an additional space-separated field at the beginning +of the cron spec, of the form "CRON_TZ=Asia/Tokyo". + +For example: + + # Runs at 6am in time.Local + cron.New().AddFunc("0 6 * * ?", ...) + + # Runs at 6am in America/New_York + nyc, _ := time.LoadLocation("America/New_York") + c := cron.New(cron.WithLocation(nyc)) + c.AddFunc("0 6 * * ?", ...) + + # Runs at 6am in Asia/Tokyo + cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) + + # Runs at 6am in Asia/Tokyo + c := cron.New(cron.WithLocation(nyc)) + c.SetLocation("America/New_York") + c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...) + +The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility. + +Be aware that jobs scheduled during daylight-savings leap-ahead transitions will +not be run! + +Job Wrappers + +A Cron runner may be configured with a chain of job wrappers to add +cross-cutting functionality to all submitted jobs. For example, they may be used +to achieve the following effects: + + - Recover any panics from jobs (activated by default) + - Delay a job's execution if the previous run hasn't completed yet + - Skip a job's execution if the previous run hasn't completed yet + - Log each job's invocations + +Install wrappers for all jobs added to a cron using the `cron.WithChain` option: + + cron.New(cron.WithChain( + cron.SkipIfStillRunning(logger), + )) + +Install wrappers for individual jobs by explicitly wrapping them: + + job = cron.NewChain( + cron.SkipIfStillRunning(logger), + ).Then(job) + +Thread safety + +Since the Cron service runs concurrently with the calling code, some amount of +care must be taken to ensure proper synchronization. + +All cron methods are designed to be correctly synchronized as long as the caller +ensures that invocations have a clear happens-before ordering between them. + +Logging + +Cron defines a Logger interface that is a subset of the one defined in +github.com/go-logr/logr. It has two logging levels (Info and Error), and +parameters are key/value pairs. This makes it possible for cron logging to plug +into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided +to wrap the standard library *log.Logger. + +For additional insight into Cron operations, verbose logging may be activated +which will record job runs, scheduling decisions, and added or removed jobs. +Activate it with a one-off logger as follows: + + cron.New( + cron.WithLogger( + cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)))) + + +Implementation + +Cron entries are stored in an array, sorted by their next activation time. Cron +sleeps until the next job is due to be run. + +Upon waking: + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. +*/ +package cron diff --git a/vendor/github.com/robfig/cron/v3/logger.go b/vendor/github.com/robfig/cron/v3/logger.go new file mode 100644 index 00000000000..b4efcc05356 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/logger.go @@ -0,0 +1,86 @@ +package cron + +import ( + "io/ioutil" + "log" + "os" + "strings" + "time" +) + +// DefaultLogger is used by Cron if none is specified. +var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags)) + +// DiscardLogger can be used by callers to discard all log messages. +var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0)) + +// Logger is the interface used in this package for logging, so that any backend +// can be plugged in. It is a subset of the github.com/go-logr/logr interface. +type Logger interface { + // Info logs routine messages about cron's operation. + Info(msg string, keysAndValues ...interface{}) + // Error logs an error condition. + Error(err error, msg string, keysAndValues ...interface{}) +} + +// PrintfLogger wraps a Printf-based logger (such as the standard library "log") +// into an implementation of the Logger interface which logs errors only. +func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { + return printfLogger{l, false} +} + +// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library +// "log") into an implementation of the Logger interface which logs everything. +func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger { + return printfLogger{l, true} +} + +type printfLogger struct { + logger interface{ Printf(string, ...interface{}) } + logInfo bool +} + +func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) { + if pl.logInfo { + keysAndValues = formatTimes(keysAndValues) + pl.logger.Printf( + formatString(len(keysAndValues)), + append([]interface{}{msg}, keysAndValues...)...) + } +} + +func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) { + keysAndValues = formatTimes(keysAndValues) + pl.logger.Printf( + formatString(len(keysAndValues)+2), + append([]interface{}{msg, "error", err}, keysAndValues...)...) +} + +// formatString returns a logfmt-like format string for the number of +// key/values. +func formatString(numKeysAndValues int) string { + var sb strings.Builder + sb.WriteString("%s") + if numKeysAndValues > 0 { + sb.WriteString(", ") + } + for i := 0; i < numKeysAndValues/2; i++ { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString("%v=%v") + } + return sb.String() +} + +// formatTimes formats any time.Time values as RFC3339. +func formatTimes(keysAndValues []interface{}) []interface{} { + var formattedArgs []interface{} + for _, arg := range keysAndValues { + if t, ok := arg.(time.Time); ok { + arg = t.Format(time.RFC3339) + } + formattedArgs = append(formattedArgs, arg) + } + return formattedArgs +} diff --git a/vendor/github.com/robfig/cron/v3/option.go b/vendor/github.com/robfig/cron/v3/option.go new file mode 100644 index 00000000000..09e4278e779 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/option.go @@ -0,0 +1,45 @@ +package cron + +import ( + "time" +) + +// Option represents a modification to the default behavior of a Cron. +type Option func(*Cron) + +// WithLocation overrides the timezone of the cron instance. +func WithLocation(loc *time.Location) Option { + return func(c *Cron) { + c.location = loc + } +} + +// WithSeconds overrides the parser used for interpreting job schedules to +// include a seconds field as the first one. +func WithSeconds() Option { + return WithParser(NewParser( + Second | Minute | Hour | Dom | Month | Dow | Descriptor, + )) +} + +// WithParser overrides the parser used for interpreting job schedules. +func WithParser(p ScheduleParser) Option { + return func(c *Cron) { + c.parser = p + } +} + +// WithChain specifies Job wrappers to apply to all jobs added to this cron. +// Refer to the Chain* functions in this package for provided wrappers. +func WithChain(wrappers ...JobWrapper) Option { + return func(c *Cron) { + c.chain = NewChain(wrappers...) + } +} + +// WithLogger uses the provided logger. +func WithLogger(logger Logger) Option { + return func(c *Cron) { + c.logger = logger + } +} diff --git a/vendor/github.com/robfig/cron/v3/parser.go b/vendor/github.com/robfig/cron/v3/parser.go new file mode 100644 index 00000000000..3cf8879f7e7 --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/parser.go @@ -0,0 +1,434 @@ +package cron + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Configuration options for creating a parser. Most options specify which +// fields should be included, while others enable features. If a field is not +// included the parser will assume a default value. These options do not change +// the order fields are parse in. +type ParseOption int + +const ( + Second ParseOption = 1 << iota // Seconds field, default 0 + SecondOptional // Optional seconds field, default 0 + Minute // Minutes field, default 0 + Hour // Hours field, default 0 + Dom // Day of month field, default * + Month // Month field, default * + Dow // Day of week field, default * + DowOptional // Optional day of week field, default * + Descriptor // Allow descriptors such as @monthly, @weekly, etc. +) + +var places = []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, +} + +var defaults = []string{ + "0", + "0", + "0", + "*", + "*", + "*", +} + +// A custom Parser that can be configured. +type Parser struct { + options ParseOption +} + +// NewParser creates a Parser with custom options. +// +// It panics if more than one Optional is given, since it would be impossible to +// correctly infer which optional is provided or missing in general. +// +// Examples +// +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") +// +// // Same as above, just excludes time fields +// subsParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") +// +// // Same as above, just makes Dow optional +// subsParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") +// +func NewParser(options ParseOption) Parser { + optionals := 0 + if options&DowOptional > 0 { + optionals++ + } + if options&SecondOptional > 0 { + optionals++ + } + if optionals > 1 { + panic("multiple optionals may not be configured") + } + return Parser{options} +} + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// It accepts crontab specs and features configured by NewParser. +func (p Parser) Parse(spec string) (Schedule, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("empty spec string") + } + + // Extract timezone if present + var loc = time.Local + if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") { + var err error + i := strings.Index(spec, " ") + eq := strings.Index(spec, "=") + if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil { + return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err) + } + spec = strings.TrimSpace(spec[i:]) + } + + // Handle named schedules (descriptors), if configured + if strings.HasPrefix(spec, "@") { + if p.options&Descriptor == 0 { + return nil, fmt.Errorf("parser does not accept descriptors: %v", spec) + } + return parseDescriptor(spec, loc) + } + + // Split on whitespace. + fields := strings.Fields(spec) + + // Validate & fill in any omitted or optional fields + var err error + fields, err = normalizeFields(fields, p.options) + if err != nil { + return nil, err + } + + field := func(field string, r bounds) uint64 { + if err != nil { + return 0 + } + var bits uint64 + bits, err = getField(field, r) + return bits + } + + var ( + second = field(fields[0], seconds) + minute = field(fields[1], minutes) + hour = field(fields[2], hours) + dayofmonth = field(fields[3], dom) + month = field(fields[4], months) + dayofweek = field(fields[5], dow) + ) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + Location: loc, + }, nil +} + +// normalizeFields takes a subset set of the time fields and returns the full set +// with defaults (zeroes) populated for unset fields. +// +// As part of performing this function, it also validates that the provided +// fields are compatible with the configured options. +func normalizeFields(fields []string, options ParseOption) ([]string, error) { + // Validate optionals & add their field to options + optionals := 0 + if options&SecondOptional > 0 { + options |= Second + optionals++ + } + if options&DowOptional > 0 { + options |= Dow + optionals++ + } + if optionals > 1 { + return nil, fmt.Errorf("multiple optionals may not be configured") + } + + // Figure out how many fields we need + max := 0 + for _, place := range places { + if options&place > 0 { + max++ + } + } + min := max - optionals + + // Validate number of fields + if count := len(fields); count < min || count > max { + if min == max { + return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields) + } + return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields) + } + + // Populate the optional field if not provided + if min < max && len(fields) == min { + switch { + case options&DowOptional > 0: + fields = append(fields, defaults[5]) // TODO: improve access to default + case options&SecondOptional > 0: + fields = append([]string{defaults[0]}, fields...) + default: + return nil, fmt.Errorf("unknown optional field") + } + } + + // Populate all fields not part of options with their defaults + n := 0 + expandedFields := make([]string, len(places)) + copy(expandedFields, defaults) + for i, place := range places { + if options&place > 0 { + expandedFields[i] = fields[n] + n++ + } + } + return expandedFields, nil +} + +var standardParser = NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, +) + +// ParseStandard returns a new crontab schedule representing the given +// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries +// representing: minute, hour, day of month, month and day of week, in that +// order. It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Standard crontab specs, e.g. "* * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func ParseStandard(standardSpec string) (Schedule, error) { + return standardParser.Parse(standardSpec) +} + +// getField returns an Int with the bits set representing all of the times that +// the field represents or error parsing field value. A "field" is a comma-separated +// list of "ranges". +func getField(field string, r bounds) (uint64, error) { + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bit, err := getRange(expr, r) + if err != nil { + return bits, err + } + bits |= bit + } + return bits, nil +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +// or error parsing range. +func getRange(expr string, r bounds) (uint64, error) { + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + err error + ) + + var extra uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra = starBit + } else { + start, err = parseIntOrName(lowAndHigh[0], r.names) + if err != nil { + return 0, err + } + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end, err = parseIntOrName(lowAndHigh[1], r.names) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step, err = mustParseInt(rangeAndStep[1]) + if err != nil { + return 0, err + } + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + if step > 1 { + extra = 0 + } + default: + return 0, fmt.Errorf("too many slashes: %s", expr) + } + + if start < r.min { + return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + if step == 0 { + return 0, fmt.Errorf("step of range should be a positive number: %s", expr) + } + + return getBits(start, end, step) | extra, nil +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) (uint, error) { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt, nil + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or returns an error. +func mustParseInt(expr string) (uint, error) { + num, err := strconv.Atoi(expr) + if err != nil { + return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err) + } + if num < 0 { + return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr) + } + + return uint(num), nil +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// parseDescriptor returns a predefined schedule for the expression, or error if none matches. +func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) { + switch descriptor { + case "@yearly", "@annually": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + Location: loc, + }, nil + + case "@monthly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + case "@weekly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: 1 << dow.min, + Location: loc, + }, nil + + case "@daily", "@midnight": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + case "@hourly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + Location: loc, + }, nil + + } + + const every = "@every " + if strings.HasPrefix(descriptor, every) { + duration, err := time.ParseDuration(descriptor[len(every):]) + if err != nil { + return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err) + } + return Every(duration), nil + } + + return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor) +} diff --git a/vendor/github.com/robfig/cron/v3/spec.go b/vendor/github.com/robfig/cron/v3/spec.go new file mode 100644 index 00000000000..fa1e241e5fb --- /dev/null +++ b/vendor/github.com/robfig/cron/v3/spec.go @@ -0,0 +1,188 @@ +package cron + +import "time" + +// SpecSchedule specifies a duty cycle (to the second granularity), based on a +// traditional crontab specification. It is computed initially and stored as bit sets. +type SpecSchedule struct { + Second, Minute, Hour, Dom, Month, Dow uint64 + + // Override location for this schedule. + Location *time.Location +} + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + dom = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + dow = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(t time.Time) time.Time { + // General approach + // + // For Month, Day, Hour, Minute, Second: + // Check if the time value matches. If yes, continue to the next field. + // If the field doesn't match the schedule, then increment the field until it matches. + // While incrementing the field, a wrap-around brings it back to the beginning + // of the field list (since it is necessary to re-verify previous field + // values) + + // Convert the given time into the schedule's timezone, if one is specified. + // Save the original timezone so we can convert back after we find a time. + // Note that schedules without a time zone specified (time.Local) are treated + // as local to the time provided. + origLocation := t.Location() + loc := s.Location + if loc == time.Local { + loc = t.Location() + } + if s.Location != time.Local { + t = t.In(s.Location) + } + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 12 { + t = t.Add(time.Duration(24-t.Hour()) * time.Hour) + } else { + t = t.Add(time.Duration(-t.Hour()) * time.Hour) + } + } + + if t.Day() == 1 { + goto WRAP + } + } + + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} diff --git a/vendor/github.com/scylladb/go-log/.gitignore b/vendor/github.com/scylladb/go-log/.gitignore new file mode 100644 index 00000000000..ae9c9c4e3ab --- /dev/null +++ b/vendor/github.com/scylladb/go-log/.gitignore @@ -0,0 +1,18 @@ +# Created by .ignore support plugin (hsz.mobi) +### Go template +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories +vendor/ + diff --git a/vendor/github.com/scylladb/go-log/README.md b/vendor/github.com/scylladb/go-log/README.md new file mode 100644 index 00000000000..ca94da2fc11 --- /dev/null +++ b/vendor/github.com/scylladb/go-log/README.md @@ -0,0 +1,39 @@ +# Log + +This is a wrapper over [Uber zap](https://github.com/uber-go/zap) library that replaces the sugared logger. + +Features: + +* Syslog integration +* Automatic stacktraces for errors +* Context aware tracing ID +* Easy to use +* Fast! + +Example: + +```go +logger, err := log.NewProduction(log.Config{ + Mode: log.SyslogMode, + Level: zapcore.InfoLevel, + Encoding: log.JSONEncoding, +}) +if err != nil { + t.Fatal(err) +} +logger.Info(ctx, "Could not connect to database", + "sleep", 5*time.Second, + "error", errors.New("I/O error"), +) +logger.Named("sub").Error(ctx, "Unexpected error", "error", errors.New("unexpected")) +``` + +## Benchmarks + +Benchmark results of running against zap and zap sugared loggers on Intel(R) Core(TM) i7-7500U CPU @ 2.70GHz. + +``` +BenchmarkZap-4 2000000 978 ns/op 256 B/op 1 allocs/op +BenchmarkZapSugared-4 1000000 1353 ns/op 528 B/op 2 allocs/op +BenchmarkLogger-4 1000000 1167 ns/op 256 B/op 1 allocs/op +``` diff --git a/vendor/github.com/scylladb/go-log/config.go b/vendor/github.com/scylladb/go-log/config.go new file mode 100644 index 00000000000..fcac96c8ae3 --- /dev/null +++ b/vendor/github.com/scylladb/go-log/config.go @@ -0,0 +1,161 @@ +// Copyright (C) 2017 ScyllaDB + +package log + +import ( + "fmt" + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// Mode specifies logs destination. +type Mode int8 + +const ( + // StderrMode logs are written to standard error. + StderrMode Mode = iota + // StderrMode logs are written to standard output. + StdoutMode +) + +func (m Mode) String() string { + switch m { + case StderrMode: + return "stderr" + case StdoutMode: + return "stdout" + } + + return "" +} + +// MarshalText implements encoding.TextMarshaler. +func (m Mode) MarshalText() ([]byte, error) { + return []byte(m.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (m *Mode) UnmarshalText(text []byte) error { + switch string(text) { + case "stderr", "STDERR": + *m = StderrMode + case "stdout": + *m = StdoutMode + default: + return fmt.Errorf("unrecognized mode: %q", string(text)) + } + + return nil +} + +// Encoding specifies log encoding. +type Encoding int8 + +const ( + JSONEncoding Encoding = iota + ConsoleEncoding +) + +func (e Encoding) String() string { + switch e { + case JSONEncoding: + return "json" + case ConsoleEncoding: + return "console" + } + + return "" +} + +// MarshalText implements encoding.TextMarshaler. +func (e Encoding) MarshalText() ([]byte, error) { + return []byte(e.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (e *Encoding) UnmarshalText(text []byte) (err error) { + *e, err = ParseEncoding(string(text)) + return +} + +// ParseLevel parses an AtomicLevel from a string. +func ParseLevel(level string) (zap.AtomicLevel, error) { + return zap.ParseAtomicLevel(level) +} + +// ParseEncoding parses an Encoding from a string. +func ParseEncoding(encoding string) (Encoding, error) { + switch encoding { + case "JSON", "json": + return JSONEncoding, nil + case "CONSOLE", "console": + return ConsoleEncoding, nil + default: + return 0, fmt.Errorf("unrecognized encoding: %q", encoding) + } +} + +// Config specifies log mode and level. +type Config struct { + Mode Mode `json:"mode" yaml:"mode"` + Level zap.AtomicLevel `json:"level" yaml:"level"` + Sampling *zap.SamplingConfig `json:"sampling" yaml:"sampling"` + Encoding Encoding `json:"encoding" yaml:"encoding"` +} + +// NewProduction builds a production Logger based on the configuration. +func NewProduction(c Config, opts ...zap.Option) (Logger, error) { + enc := zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } + + cfg := zap.NewProductionConfig() + cfg.EncoderConfig = enc + cfg.OutputPaths = []string{c.Mode.String()} + cfg.Sampling = c.Sampling + cfg.Level = c.Level + cfg.Encoding = c.Encoding.String() + cfg.DisableCaller = true + + l, err := cfg.Build(opts...) + if err != nil { + return NopLogger, err + } + return NewLogger(l), nil +} + +// NewDevelopment creates a new logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +func NewDevelopment() Logger { + return NewDevelopmentWithLevel(zapcore.DebugLevel) +} + +// NewDevelopmentWithLevel creates a new logger that writes level and above +// logs to standard error in a human-friendly format. +func NewDevelopmentWithLevel(level zapcore.Level) Logger { + cfg := zap.NewDevelopmentConfig() + cfg.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder + cfg.EncoderConfig.EncodeTime = shortTimeEncoder + cfg.EncoderConfig.CallerKey = "" + cfg.Level.SetLevel(level) + + l, _ := cfg.Build() + return Logger{base: l} +} + +func shortTimeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(t.Format("15:04:05.000")) +} diff --git a/vendor/github.com/scylladb/go-log/context.go b/vendor/github.com/scylladb/go-log/context.go new file mode 100644 index 00000000000..b66fb7d8f9b --- /dev/null +++ b/vendor/github.com/scylladb/go-log/context.go @@ -0,0 +1,74 @@ +// Copyright (C) 2017 ScyllaDB + +package log + +import ( + "context" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// ctxt is a context key type. +type ctxt byte + +// ctxt enumeration. +const ( + ctxTraceID ctxt = iota + ctxFields +) + +// WithFields assigns additional fields to the logging context. +// Existing fields are overridden and new fields are added. +func WithFields(ctx context.Context, keyvals ...interface{}) context.Context { + if len(keyvals)%2 != 0 || len(keyvals) == 0 { + return ctx + } + + fields := make([]zapcore.Field, 0, len(keyvals)/2) + for i := 0; i < len(keyvals); i += 2 { + // Consume this value and the next, treating them as a key-value pair. + key, val := keyvals[i], keyvals[i+1] + if keyStr, ok := key.(string); !ok { + break + } else { + fields = append(fields, zap.Any(keyStr, val)) + } + } + + var value []zapcore.Field + v := ctx.Value(ctxFields) + if v == nil { + value = make([]zapcore.Field, 0, len(fields)) + } else { + value = v.([]zapcore.Field) + } + for i := range fields { + found := false + for j := range value { + if value[j].Key == fields[i].Key { + value[j] = fields[i] + found = true + break + } + } + if !found { + value = append(value, fields[i]) + } + } + return context.WithValue(ctx, ctxFields, value) +} + +// contextFields returns key-value pairs assigned to the context sorted by the +// key. +func contextFields(ctx context.Context) []zapcore.Field { + if ctx == nil { + return nil + } + v, ok := ctx.Value(ctxFields).([]zapcore.Field) + if !ok { + return nil + } + + return v +} diff --git a/vendor/github.com/scylladb/go-log/error.go b/vendor/github.com/scylladb/go-log/error.go new file mode 100644 index 00000000000..cba56b7a401 --- /dev/null +++ b/vendor/github.com/scylladb/go-log/error.go @@ -0,0 +1,45 @@ +package log + +import ( + "fmt" + "io" + + pkgErrors "github.com/pkg/errors" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func stringifyErrors(fields []zapcore.Field, withStack bool) []zapcore.Field { + var stacks []zapcore.Field + + for i, f := range fields { + if f.Type == zapcore.ErrorType { + if withStack { + if s, ok := fields[i].Interface.(stackTracer); ok { + stacks = append(stacks, zap.String(f.Key+"Stack", fmt.Sprintf("%+v", stackPrinter{s.StackTrace()}))) + } + } + + fields[i].Type = zapcore.StringType + fields[i].String = f.Interface.(error).Error() + fields[i].Interface = nil + } + } + + return append(fields, stacks...) +} + +type stackTracer interface { + StackTrace() pkgErrors.StackTrace +} + +type stackPrinter struct { + stack []pkgErrors.Frame +} + +func (p stackPrinter) Format(s fmt.State, verb rune) { + for _, f := range p.stack { + f.Format(s, 'v') + io.WriteString(s, "\n") + } +} diff --git a/vendor/github.com/scylladb/go-log/logger.go b/vendor/github.com/scylladb/go-log/logger.go new file mode 100644 index 00000000000..68d81dc0acd --- /dev/null +++ b/vendor/github.com/scylladb/go-log/logger.go @@ -0,0 +1,182 @@ +// Copyright (C) 2017 ScyllaDB + +package log + +import ( + "context" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// Logger logs messages. +type Logger struct { + base *zap.Logger + baseFields []zapcore.Field +} + +// NewLogger creates a new logger backed by a zap.Logger. +func NewLogger(base *zap.Logger) Logger { + return Logger{base: base} +} + +// NopLogger doesn't log anything. +var NopLogger = Logger{} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (l Logger) Named(name string) Logger { + if l.base == nil { + return l + } + + l.base = l.base.Named(name) + + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (l Logger) WithOptions(opts ...zap.Option) Logger { + if l.base == nil { + return l + } + + l.base = l.base.WithOptions(opts...) + + return l +} + +// With adds a variadic number of fields to the logging context. +func (l Logger) With(keyvals ...interface{}) Logger { + if l.base == nil { + return l + } + + fields := l.zapify(context.Background(), keyvals) + l.baseFields = append(l.baseFields, fields...) + l.base = l.base.With(fields...) + + return l +} + +// Sync flushes any buffered log entries. Applications should take care to call +// Sync before exiting. +func (l Logger) Sync() error { + if l.base == nil { + return nil + } + return l.base.Sync() +} + +// Debug logs a message with some additional context. +func (l Logger) Debug(ctx context.Context, msg string, keyvals ...interface{}) { + l.log(ctx, zapcore.DebugLevel, msg, keyvals) +} + +// Info logs a message with some additional context. +func (l Logger) Info(ctx context.Context, msg string, keyvals ...interface{}) { + l.log(ctx, zapcore.InfoLevel, msg, keyvals) +} + +// Error logs a message with some additional context. +func (l Logger) Error(ctx context.Context, msg string, keyvals ...interface{}) { + l.log(ctx, zapcore.ErrorLevel, msg, keyvals) +} + +// Fatal logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (l Logger) Fatal(ctx context.Context, msg string, keyvals ...interface{}) { + l.log(ctx, zapcore.FatalLevel, msg, keyvals) +} + +func (l Logger) log(ctx context.Context, lvl zapcore.Level, msg string, keyvals []interface{}) { + if l.base == nil { + return + } + if !l.base.Core().Enabled(lvl) { + return + } + + if ce := l.base.Check(lvl, msg); ce != nil { + fields := stringifyErrors(l.zapify(ctx, keyvals), lvl >= zapcore.ErrorLevel) + ce.Write(fields...) + } +} + +// Filed ordering: logger fields > context fields > passed fields > trace_id. +func (l Logger) zapify(ctx context.Context, keyvals []interface{}) []zapcore.Field { + if len(keyvals)%2 != 0 { + l.base.DPanic("odd number of elements") + return nil + } + + var ( + extraFields int + extra []zapcore.Field + trace *zapcore.Field + ok bool + ) + + if ctx != nil { + trace, ok = ctx.Value(ctxTraceID).(*zapcore.Field) + if ok { + extraFields++ + } + extra = contextFields(ctx) + extraFields += len(extra) + } + + if len(keyvals)+extraFields == 0 { + return nil + } + + fields := make([]zapcore.Field, 0, len(keyvals)/2+extraFields) + + if len(extra) > 0 { + // Exclude fields that are set by calling With on logger. + for i := range extra { + if containsKey(l.baseFields, extra[i].Key) > -1 { + continue + } + fields = append(fields, extra[i]) + } + } + + for i := 0; i < len(keyvals); i += 2 { + // Consume this value and the next, treating them as a key-value pair. + key, val := keyvals[i], keyvals[i+1] + + if keyStr, ok := key.(string); !ok { + l.base.DPanic("key not a string", zap.Any("key", key)) + break + } else { + j := containsKey(fields, keyStr) + if j > -1 { + fields[j] = zap.Any(keyStr, val) + continue + } + fields = append(fields, zap.Any(keyStr, val)) + } + } + + if trace != nil { + fields = append(fields, *trace) + } + + return fields +} + +func containsKey(fields []zapcore.Field, key string) int { + for i := range fields { + if fields[i].Key == key { + return i + } + } + return -1 +} + +// BaseOf unwraps l and returns the base zap.Logger. +func BaseOf(l Logger) *zap.Logger { + return l.base +} diff --git a/vendor/github.com/scylladb/go-log/syslog.go b/vendor/github.com/scylladb/go-log/syslog.go new file mode 100644 index 00000000000..2bc3a35f615 --- /dev/null +++ b/vendor/github.com/scylladb/go-log/syslog.go @@ -0,0 +1,97 @@ +// Copyright (C) 2017 ScyllaDB + +package log + +import ( + "log/syslog" + + "go.uber.org/zap/zapcore" +) + +// SyslogCore is a zapcore.Core that logs to syslog. +type SyslogCore struct { + zapcore.LevelEnabler + enc zapcore.Encoder + out *syslog.Writer +} + +// NewSyslogCore creates a Core that writes logs to a syslog. +func NewSyslogCore(enc zapcore.Encoder, out *syslog.Writer, enab zapcore.LevelEnabler) *SyslogCore { + return &SyslogCore{ + LevelEnabler: enab, + enc: enc, + out: out, + } +} + +// With implements zapcore.Core. +func (c *SyslogCore) With(fields []zapcore.Field) zapcore.Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +// Check implements zapcore.Core. +func (c *SyslogCore) Check(ent zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +// Write implements zapcore.Core. +func (c *SyslogCore) Write(ent zapcore.Entry, fields []zapcore.Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + + switch ent.Level { + case zapcore.DebugLevel: + err = c.out.Debug(buf.String()) + case zapcore.InfoLevel: + err = c.out.Info(buf.String()) + case zapcore.WarnLevel: + err = c.out.Warning(buf.String()) + case zapcore.ErrorLevel: + err = c.out.Err(buf.String()) + case zapcore.DPanicLevel: + err = c.out.Crit(buf.String()) + case zapcore.PanicLevel: + err = c.out.Crit(buf.String()) + case zapcore.FatalLevel: + err = c.out.Crit(buf.String()) + default: + _, err = c.out.Write(buf.Bytes()) + } + buf.Free() + if err != nil { + return err + } + if ent.Level > zapcore.ErrorLevel { + // Since we may be crashing the program, sync the output. Ignore Sync + // errors, pending a clean solution to issue #370. + c.Sync() + } + return nil +} + +// Sync implements zapcore.Core. It closes the underlying connection to syslog +// daemon. +func (c *SyslogCore) Sync() error { + return c.out.Close() +} + +func (c *SyslogCore) clone() *SyslogCore { + return &SyslogCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} + +func addFields(enc zapcore.ObjectEncoder, fields []zapcore.Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} diff --git a/vendor/github.com/scylladb/go-log/trace.go b/vendor/github.com/scylladb/go-log/trace.go new file mode 100644 index 00000000000..dffc06f0bc9 --- /dev/null +++ b/vendor/github.com/scylladb/go-log/trace.go @@ -0,0 +1,63 @@ +// Copyright (C) 2017 ScyllaDB + +package log + +import ( + "context" + "crypto/rand" + "encoding/base64" + "io" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +// WithTraceID ensures that the context has a trace ID, if not returns a new +// context with a random trace ID. +func WithTraceID(ctx context.Context) context.Context { + if ctx.Value(ctxTraceID) != nil { + return ctx + } + + return WithNewTraceID(ctx) +} + +// WithNewTraceID returns a new context with a random trace ID. +func WithNewTraceID(ctx context.Context) context.Context { + v := zap.String("_trace_id", newTraceID()) + return context.WithValue(ctx, ctxTraceID, &v) +} + +// CopyTraceID allows for copying the trace ID from a context to another context. +func CopyTraceID(ctx, from context.Context) context.Context { + v, ok := from.Value(ctxTraceID).(*zapcore.Field) + if !ok { + return ctx + } + + return context.WithValue(ctx, ctxTraceID, v) +} + +// TraceID returns trace ID of the context. +func TraceID(ctx context.Context) string { + if ctx == nil { + return "" + } + v, ok := ctx.Value(ctxTraceID).(*zapcore.Field) + if !ok { + return "" + } + return v.String +} + +func newTraceID() string { + var uuid [16]byte + _, err := io.ReadFull(rand.Reader, uuid[:]) + if err != nil { + return "" + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + + return base64.RawURLEncoding.EncodeToString(uuid[:]) +} diff --git a/vendor/github.com/scylladb/go-set/b16set/b16set.go b/vendor/github.com/scylladb/go-set/b16set/b16set.go new file mode 100644 index 00000000000..3fa2b28f185 --- /dev/null +++ b/vendor/github.com/scylladb/go-set/b16set/b16set.go @@ -0,0 +1,279 @@ +// Copyright (C) 2017 ScyllaDB +// Use of this source code is governed by a ALv2-style +// license that can be found at https://github.com/scylladb/go-set/LICENSE. + +package b16set + +import ( + "fmt" + "math" + "strings" +) + +var ( + // helpful to not write everywhere struct{}{} + keyExists = struct{}{} + nonExistent [16]byte +) + +// Set is the main set structure that holds all the data +// and methods used to working with the set. +type Set struct { + m map[[16]byte]struct{} +} + +// New creates and initializes a new Set. +func New(ts ...[16]byte) *Set { + s := NewWithSize(len(ts)) + s.Add(ts...) + return s +} + +// NewWithSize creates a new Set and gives make map a size hint. +func NewWithSize(size int) *Set { + return &Set{make(map[[16]byte]struct{}, size)} +} + +// Add includes the specified items (one or more) to the Set. The underlying +// Set s is modified. If passed nothing it silently returns. +func (s *Set) Add(items ...[16]byte) { + for _, item := range items { + s.m[item] = keyExists + } +} + +// Remove deletes the specified items from the Set. The underlying Set s is +// modified. If passed nothing it silently returns. +func (s *Set) Remove(items ...[16]byte) { + for _, item := range items { + delete(s.m, item) + } +} + +// Pop deletes and returns an item from the Set. The underlying Set s is +// modified. If Set is empty, the zero value is returned. +func (s *Set) Pop() [16]byte { + for item := range s.m { + delete(s.m, item) + return item + } + return nonExistent +} + +// Pop2 tries to delete and return an item from the Set. The underlying Set s +// is modified. The second value is a bool that is true if the item existed in +// the set, and false if not. If Set is empty, the zero value and false are +// returned. +func (s *Set) Pop2() ([16]byte, bool) { + for item := range s.m { + delete(s.m, item) + return item, true + } + return nonExistent, false +} + +// Has looks for the existence of items passed. It returns false if nothing is +// passed. For multiple items it returns true only if all of the items exist. +func (s *Set) Has(items ...[16]byte) bool { + has := false + for _, item := range items { + if _, has = s.m[item]; !has { + break + } + } + return has +} + +// HasAny looks for the existence of any of the items passed. +// It returns false if nothing is passed. +// For multiple items it returns true if any of the items exist. +func (s *Set) HasAny(items ...[16]byte) bool { + has := false + for _, item := range items { + if _, has = s.m[item]; has { + break + } + } + return has +} + +// Size returns the number of items in a Set. +func (s *Set) Size() int { + return len(s.m) +} + +// Clear removes all items from the Set. +func (s *Set) Clear() { + s.m = make(map[[16]byte]struct{}) +} + +// IsEmpty reports whether the Set is empty. +func (s *Set) IsEmpty() bool { + return s.Size() == 0 +} + +// IsEqual test whether s and t are the same in size and have the same items. +func (s *Set) IsEqual(t *Set) bool { + // return false if they are no the same size + if s.Size() != t.Size() { + return false + } + + equal := true + t.Each(func(item [16]byte) bool { + _, equal = s.m[item] + return equal // if false, Each() will end + }) + + return equal +} + +// IsSubset tests whether t is a subset of s. +func (s *Set) IsSubset(t *Set) bool { + if s.Size() < t.Size() { + return false + } + + subset := true + + t.Each(func(item [16]byte) bool { + _, subset = s.m[item] + return subset + }) + + return subset +} + +// IsSuperset tests whether t is a superset of s. +func (s *Set) IsSuperset(t *Set) bool { + return t.IsSubset(s) +} + +// Each traverses the items in the Set, calling the provided function for each +// Set member. Traversal will continue until all items in the Set have been +// visited, or if the closure returns false. +func (s *Set) Each(f func(item [16]byte) bool) { + for item := range s.m { + if !f(item) { + break + } + } +} + +// Copy returns a new Set with a copy of s. +func (s *Set) Copy() *Set { + u := NewWithSize(s.Size()) + for item := range s.m { + u.m[item] = keyExists + } + return u +} + +// String returns a string representation of s +func (s *Set) String() string { + v := make([]string, 0, s.Size()) + for item := range s.m { + v = append(v, fmt.Sprintf("%v", item)) + } + return fmt.Sprintf("[%s]", strings.Join(v, ", ")) +} + +// List returns a slice of all items. There is also StringSlice() and +// IntSlice() methods for returning slices of type string or int. +func (s *Set) List() [][16]byte { + v := make([][16]byte, 0, s.Size()) + for item := range s.m { + v = append(v, item) + } + return v +} + +// Merge is like Union, however it modifies the current Set it's applied on +// with the given t Set. +func (s *Set) Merge(t *Set) { + for item := range t.m { + s.m[item] = keyExists + } +} + +// Separate removes the Set items containing in t from Set s. Please aware that +// it's not the opposite of Merge. +func (s *Set) Separate(t *Set) { + for item := range t.m { + delete(s.m, item) + } +} + +// Union is the merger of multiple sets. It returns a new set with all the +// elements present in all the sets that are passed. +func Union(sets ...*Set) *Set { + maxPos := -1 + maxSize := 0 + for i, set := range sets { + if l := set.Size(); l > maxSize { + maxSize = l + maxPos = i + } + } + if maxSize == 0 { + return New() + } + + u := sets[maxPos].Copy() + for i, set := range sets { + if i == maxPos { + continue + } + for item := range set.m { + u.m[item] = keyExists + } + } + return u +} + +// Difference returns a new set which contains items which are in in the first +// set but not in the others. +func Difference(set1 *Set, sets ...*Set) *Set { + s := set1.Copy() + for _, set := range sets { + s.Separate(set) + } + return s +} + +// Intersection returns a new set which contains items that only exist in all +// given sets. +func Intersection(sets ...*Set) *Set { + minPos := -1 + minSize := math.MaxInt64 + for i, set := range sets { + if l := set.Size(); l < minSize { + minSize = l + minPos = i + } + } + if minSize == math.MaxInt64 || minSize == 0 { + return New() + } + + t := sets[minPos].Copy() + for i, set := range sets { + if i == minPos { + continue + } + for item := range t.m { + if _, has := set.m[item]; !has { + delete(t.m, item) + } + } + } + return t +} + +// SymmetricDifference returns a new set which s is the difference of items +// which are in one of either, but not in both. +func SymmetricDifference(s *Set, t *Set) *Set { + u := Difference(s, t) + v := Difference(t, s) + return Union(u, v) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/auth/auth.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/auth/auth.go new file mode 100644 index 00000000000..f75f16ccd50 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/auth/auth.go @@ -0,0 +1,76 @@ +// Copyright (C) 2017 ScyllaDB + +package auth + +import ( + "crypto/subtle" + "encoding/json" + "net/http" + "strings" + "time" + + "github.com/scylladb/scylla-manager/v3/pkg/util/httpx" +) + +// AddToken sets authorization header. If token is empty it immediately returns +// the next handler. +func AddToken(next http.RoundTripper, token string) http.RoundTripper { + if token == "" { + return next + } + + return httpx.RoundTripperFunc(func(req *http.Request) (resp *http.Response, err error) { + r := httpx.CloneRequest(req) + r.Header.Set("Authorization", "Bearer "+token) + return next.RoundTrip(r) + }) +} + +// ValidateToken is http server middleware that checks if Authorization header +// contains `Bearer token`. +// If not the execution would be held for the penalty duration and then 401 +// status code with provided body would be returned. +// If token is empty it immediately returns the next handler. +func ValidateToken(token string, penalty time.Duration, + unauthorizedBody json.RawMessage, +) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + if token == "" { + return next + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !secureCompare(bearerAuth(r), token) { + if penalty > 0 { + time.Sleep(penalty) + } + w.WriteHeader(http.StatusUnauthorized) + w.Write(unauthorizedBody) // nolint: errcheck + } else { + next.ServeHTTP(w, r) + } + }) + } +} + +// bearerAuth returns the token provided in the request's Authorization header. +func bearerAuth(r *http.Request) (token string) { + auth := r.Header.Get("Authorization") + if auth == "" { + return + } + return parseBearerAuth(auth) +} + +func parseBearerAuth(auth string) (token string) { + const prefix = "Bearer " + // Case insensitive prefix match. See Issue 22736. + if len(auth) < len(prefix) || !strings.EqualFold(auth[:len(prefix)], prefix) { + return "" + } + return auth[len(prefix):] +} + +func secureCompare(x, y string) bool { + return subtle.ConstantTimeCompare([]byte(x), []byte(y)) == 1 +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/dht/doc.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/dht/doc.go new file mode 100644 index 00000000000..3f71a5ff7b6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/dht/doc.go @@ -0,0 +1,5 @@ +// Copyright (C) 2017 ScyllaDB + +// Package dht reimplements selected elements of +// https://github.com/scylladb/scylla/tree/master/dht. +package dht diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/dht/murmur3partitioner.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/dht/murmur3partitioner.go new file mode 100644 index 00000000000..3d465ed4c1a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/dht/murmur3partitioner.go @@ -0,0 +1,13 @@ +// Copyright (C) 2017 ScyllaDB + +package dht + +import ( + "math" +) + +// Full token range. +const ( + Murmur3MinToken = int64(math.MinInt64) + Murmur3MaxToken = int64(math.MaxInt64) +) diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/managerclient/model.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/managerclient/model.go index 3e1acb6b355..82821df4103 100644 --- a/vendor/github.com/scylladb/scylla-manager/v3/pkg/managerclient/model.go +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/managerclient/model.go @@ -3,6 +3,7 @@ package managerclient import ( + "encoding/json" "fmt" "io" "sort" @@ -14,7 +15,9 @@ import ( "github.com/pkg/errors" "github.com/scylladb/go-set/strset" "github.com/scylladb/scylla-manager/v3/pkg/managerclient/table" + "github.com/scylladb/scylla-manager/v3/pkg/service/scheduler" "github.com/scylladb/scylla-manager/v3/pkg/util/inexlist" + "github.com/scylladb/scylla-manager/v3/pkg/util/timeutc" "github.com/scylladb/scylla-manager/v3/pkg/util/version" "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla-manager/models" "github.com/scylladb/termtables" @@ -513,7 +516,18 @@ func (li TaskListItems) Render(w io.Writer) error { var schedule string if t.Schedule.Cron != "" { - schedule = t.Schedule.Cron + var cronSpec scheduler.CronSpecification + err := json.Unmarshal([]byte(t.Schedule.Cron), &cronSpec) + if err != nil { + schedule = t.Schedule.Cron + } else { + schedule = cronSpec.Spec + if cronSpec.StartDate.After(timeutc.Now()) { + c := scheduler.MustCron(cronSpec.Spec, cronSpec.StartDate) + schedule += fmt.Sprintf(" with first activation after %s", + c.Next(cronSpec.StartDate).Format("2006-01-02 15:04:05")) + } + } } else if t.Schedule.Interval != "" { schedule = t.Schedule.Interval } diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/backup.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/backup.go new file mode 100644 index 00000000000..286b51b7fec --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/backup.go @@ -0,0 +1,99 @@ +// Copyright (C) 2017 ScyllaDB + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +type BackupMetrics struct { + snapshot *prometheus.GaugeVec + filesSizeBytes *prometheus.GaugeVec + filesUploadedBytes *prometheus.GaugeVec + filesSkippedBytes *prometheus.GaugeVec + filesFailedBytes *prometheus.GaugeVec + purgeFiles *prometheus.GaugeVec + purgeDeletedFiles *prometheus.GaugeVec +} + +func NewBackupMetrics() BackupMetrics { + g := gaugeVecCreator("backup") + + return BackupMetrics{ + snapshot: g("Indicates if snapshot was taken.", + "snapshot", "cluster", "keyspace", "host"), + filesSizeBytes: g("Total size of backup files in bytes.", + "files_size_bytes", "cluster", "keyspace", "table", "host"), + filesUploadedBytes: g("Number of bytes uploaded to backup location.", + "files_uploaded_bytes", "cluster", "keyspace", "table", "host"), + filesSkippedBytes: g("Number of deduplicated bytes already uploaded to backup location.", + "files_skipped_bytes", "cluster", "keyspace", "table", "host"), + filesFailedBytes: g("Number of bytes failed to upload to backup location.", + "files_failed_bytes", "cluster", "keyspace", "table", "host"), + purgeFiles: g("Number of files that need to be deleted due to retention policy.", + "purge_files", "cluster", "host"), + purgeDeletedFiles: g("Number of files that were deleted.", + "purge_deleted_files", "cluster", "host"), + } +} + +// MustRegister shall be called to make the metrics visible by prometheus client. +func (m BackupMetrics) MustRegister() BackupMetrics { + prometheus.MustRegister(m.all()...) + return m +} + +func (m BackupMetrics) all() []prometheus.Collector { + return []prometheus.Collector{ + m.snapshot, + m.filesSizeBytes, + m.filesUploadedBytes, + m.filesSkippedBytes, + m.filesFailedBytes, + m.purgeFiles, + m.purgeDeletedFiles, + } +} + +// ResetClusterMetrics resets all backup metrics labeled with the cluster. +func (m BackupMetrics) ResetClusterMetrics(clusterID uuid.UUID) { + for _, c := range m.all() { + setGaugeVecMatching(c.(*prometheus.GaugeVec), unspecifiedValue, clusterMatcher(clusterID)) + } +} + +// SetSnapshot updates backup "snapshot" metric. +func (m BackupMetrics) SetSnapshot(clusterID uuid.UUID, keyspace, host string, taken bool) { + l := prometheus.Labels{ + "cluster": clusterID.String(), + "keyspace": keyspace, + "host": host, + } + v := 0. + if taken { + v = 1 + } + m.snapshot.With(l).Set(v) +} + +// SetFilesProgress updates backup "files_{uploaded,skipped,failed}_bytes" metrics. +func (m BackupMetrics) SetFilesProgress(clusterID uuid.UUID, keyspace, table, host string, size, uploaded, skipped, failed int64) { + l := prometheus.Labels{ + "cluster": clusterID.String(), + "keyspace": keyspace, + "table": table, + "host": host, + } + m.filesSizeBytes.With(l).Set(float64(size)) + m.filesUploadedBytes.With(l).Set(float64(uploaded)) + m.filesSkippedBytes.With(l).Set(float64(skipped)) + m.filesFailedBytes.With(l).Set(float64(failed)) +} + +// SetPurgeFiles updates backup "purge_files" and "purge_deleted_files" metrics. +func (m BackupMetrics) SetPurgeFiles(clusterID uuid.UUID, host string, total, deleted int) { + m.purgeFiles.WithLabelValues(clusterID.String(), host).Set(float64(total)) + m.purgeDeletedFiles.WithLabelValues(clusterID.String(), host).Set(float64(deleted)) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/cluster.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/cluster.go new file mode 100644 index 00000000000..c37a7cf5f3d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/cluster.go @@ -0,0 +1,34 @@ +// Copyright (C) 2017 ScyllaDB + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +type ClusterMetrics struct { + name *prometheus.GaugeVec +} + +func NewClusterMetrics() ClusterMetrics { + g := gaugeVecCreator("cluster") + + return ClusterMetrics{ + name: g("Mapping from cluster ID to name.", "name", "cluster", "name"), + } +} + +// MustRegister shall be called to make the metrics visible by prometheus client. +func (m ClusterMetrics) MustRegister() ClusterMetrics { + prometheus.MustRegister( + m.name, + ) + return m +} + +// SetName updates "name" metric. +func (m ClusterMetrics) SetName(clusterID uuid.UUID, name string) { + DeleteMatching(m.name, LabelMatcher("cluster", clusterID.String())) + m.name.WithLabelValues(clusterID.String(), name).Add(0) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/delete.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/delete.go new file mode 100644 index 00000000000..4b325a8aa36 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/delete.go @@ -0,0 +1,82 @@ +// Copyright (C) 2017 ScyllaDB + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// CollectorDeleter extends prometheus.Collector with Delete. +type CollectorDeleter interface { + prometheus.Collector + Delete(labels prometheus.Labels) bool +} + +// DeleteMatching removes metric instances with matching labels. +func DeleteMatching(c CollectorDeleter, matcher func(*dto.Metric) bool) { + var data dto.Metric + var toDelete []prometheus.Labels + + for m := range collect(c) { + if err := m.Write(&data); err != nil { + continue + } + if matcher(&data) { + toDelete = append(toDelete, makeLabels(data.Label)) + } + } + + for _, labels := range toDelete { + c.Delete(labels) + } +} + +const unspecifiedValue = float64(-1) + +// setGaugeVecMatching sets metric instances with matching labels to the +// given value. +func setGaugeVecMatching(c *prometheus.GaugeVec, value float64, matcher func(*dto.Metric) bool) { // nolint: unparam + var ( + data dto.Metric + labels []prometheus.Labels + ) + + for m := range collect(c) { + if err := m.Write(&data); err != nil { + continue + } + if matcher(&data) { + labels = append(labels, makeLabels(data.Label)) + } + } + + for _, l := range labels { + m, err := c.GetMetricWith(l) + if err != nil { + panic(err) + } + m.Set(value) + } +} + +func collect(c prometheus.Collector) chan prometheus.Metric { + ch := make(chan prometheus.Metric) + go func() { + c.Collect(ch) + close(ch) + }() + return ch +} + +func makeLabels(pairs []*dto.LabelPair) prometheus.Labels { + labels := make(prometheus.Labels) + + for _, kv := range pairs { + if kv != nil { + labels[kv.GetName()] = kv.GetValue() + } + } + + return labels +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/matcher.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/matcher.go new file mode 100644 index 00000000000..c129674c742 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/matcher.go @@ -0,0 +1,24 @@ +// Copyright (C) 2017 ScyllaDB + +package metrics + +import ( + dto "github.com/prometheus/client_model/go" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +// LabelMatcher returns a matcher checking only single label. +func LabelMatcher(name, value string) func(m *dto.Metric) bool { + return func(m *dto.Metric) bool { + for _, l := range m.GetLabel() { + if l.GetName() == name && l.GetValue() == value { + return true + } + } + return false + } +} + +func clusterMatcher(clusterID uuid.UUID) func(m *dto.Metric) bool { + return LabelMatcher("cluster", clusterID.String()) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/metrics.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/metrics.go new file mode 100644 index 00000000000..22ae0d51895 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/metrics.go @@ -0,0 +1,18 @@ +// Copyright (C) 2017 ScyllaDB + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +func gaugeVecCreator(subsystem string) func(help, name string, labels ...string) *prometheus.GaugeVec { + return func(help, name string, labels ...string) *prometheus.GaugeVec { + return prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "scylla_manager", + Subsystem: subsystem, + Name: name, + Help: help, + }, labels) + } +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/repair.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/repair.go new file mode 100644 index 00000000000..8c26f6e0647 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/repair.go @@ -0,0 +1,108 @@ +// Copyright (C) 2017 ScyllaDB + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +type RepairMetrics struct { + progress *prometheus.GaugeVec + tokenRangesTotal *prometheus.GaugeVec + tokenRangesSuccess *prometheus.GaugeVec + tokenRangesError *prometheus.GaugeVec + inFlightJobs *prometheus.GaugeVec + inFlightTokenRanges *prometheus.GaugeVec +} + +func NewRepairMetrics() RepairMetrics { + g := gaugeVecCreator("repair") + + return RepairMetrics{ + progress: g("Total percentage repair progress.", "progress", "cluster"), + tokenRangesTotal: g("Total number of token ranges to repair.", + "token_ranges_total", "cluster", "keyspace", "table", "host"), + tokenRangesSuccess: g("Number of repaired token ranges.", + "token_ranges_success", "cluster", "keyspace", "table", "host"), + tokenRangesError: g("Number of segments that failed to repair.", + "token_ranges_error", "cluster", "keyspace", "table", "host"), + inFlightJobs: g("Number of currently running Scylla repair jobs.", + "inflight_jobs", "cluster", "host"), + inFlightTokenRanges: g("Number of token ranges that are being repaired.", + "inflight_token_ranges", "cluster", "host"), + } +} + +func (m RepairMetrics) all() []prometheus.Collector { + return []prometheus.Collector{ + m.progress, + m.tokenRangesTotal, + m.tokenRangesSuccess, + m.tokenRangesError, + m.inFlightJobs, + m.inFlightTokenRanges, + } +} + +// MustRegister shall be called to make the metrics visible by prometheus client. +func (m RepairMetrics) MustRegister() RepairMetrics { + prometheus.MustRegister(m.all()...) + return m +} + +// ResetClusterMetrics resets all metrics labeled with the cluster. +func (m RepairMetrics) ResetClusterMetrics(clusterID uuid.UUID) { + for _, c := range m.all() { + setGaugeVecMatching(c.(*prometheus.GaugeVec), unspecifiedValue, clusterMatcher(clusterID)) + } +} + +// SetTokenRanges updates "token_ranges_{total,success,error}" metrics. +func (m RepairMetrics) SetTokenRanges(clusterID uuid.UUID, keyspace, table, host string, total, success, errcnt int64) { + l := prometheus.Labels{ + "cluster": clusterID.String(), + "keyspace": keyspace, + "table": table, + "host": host, + } + m.tokenRangesTotal.With(l).Set(float64(total)) + m.tokenRangesSuccess.With(l).Set(float64(success)) + m.tokenRangesError.With(l).Set(float64(errcnt)) +} + +// AddJob updates "inflight_{jobs,token_ranges}" metrics. +func (m RepairMetrics) AddJob(clusterID uuid.UUID, host string, tokenRanges int) { + l := prometheus.Labels{ + "cluster": clusterID.String(), + "host": host, + } + m.inFlightJobs.With(l).Add(1) + m.inFlightTokenRanges.With(l).Add(float64(tokenRanges)) +} + +// SubJob updates "inflight_{jobs,token_ranges}" metrics. +func (m RepairMetrics) SubJob(clusterID uuid.UUID, host string, tokenRanges int) { + l := prometheus.Labels{ + "cluster": clusterID.String(), + "host": host, + } + m.inFlightJobs.With(l).Sub(1) + m.inFlightTokenRanges.With(l).Sub(float64(tokenRanges)) +} + +// SetProgress sets "progress" metric. +func (m RepairMetrics) SetProgress(clusterID uuid.UUID, progress float64) { + l := prometheus.Labels{ + "cluster": clusterID.String(), + } + m.progress.With(l).Set(progress) +} + +// AddProgress updates "progress" metric. +func (m RepairMetrics) AddProgress(clusterID uuid.UUID, delta float64) { + l := prometheus.Labels{ + "cluster": clusterID.String(), + } + m.progress.With(l).Add(delta) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/restore.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/restore.go new file mode 100644 index 00000000000..509adf439ff --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/restore.go @@ -0,0 +1,183 @@ +// Copyright (C) 2023 ScyllaDB + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +type RestoreMetrics struct { + batchSize *prometheus.GaugeVec + remainingBytes *prometheus.GaugeVec + state *prometheus.GaugeVec + progress *prometheus.GaugeVec + viewBuildStatus *prometheus.GaugeVec +} + +func NewRestoreMetrics() RestoreMetrics { + g := gaugeVecCreator("restore") + + return RestoreMetrics{ + batchSize: g("Cumulative size of the batches of files taken by the host to restore the data.", "batch_size", "cluster", "host"), + remainingBytes: g("Remaining bytes of backup to be restored yet.", "remaining_bytes", + "cluster", "snapshot_tag", "location", "dc", "node", "keyspace", "table"), + state: g("Defines current state of the restore process (idle/download/load/error).", "state", "cluster", "location", "snapshot_tag", "host"), + progress: g("Defines current progress of the restore process.", "progress", "cluster", "snapshot_tag"), + viewBuildStatus: g("Defines build status of recreated view.", "view_build_status", "cluster", "keyspace", "view"), + } +} + +// MustRegister shall be called to make the metrics visible by prometheus client. +func (m RestoreMetrics) MustRegister() RestoreMetrics { + prometheus.MustRegister(m.all()...) + return m +} + +func (m RestoreMetrics) all() []prometheus.Collector { + return []prometheus.Collector{ + m.batchSize, + m.remainingBytes, + m.state, + m.progress, + m.viewBuildStatus, + } +} + +// ResetClusterMetrics resets all restore metrics labeled with the cluster. +func (m RestoreMetrics) ResetClusterMetrics(clusterID uuid.UUID) { + for _, c := range m.all() { + setGaugeVecMatching(c.(*prometheus.GaugeVec), unspecifiedValue, clusterMatcher(clusterID)) + } +} + +// IncreaseBatchSize updates restore "batch_size" metrics. +func (m RestoreMetrics) IncreaseBatchSize(clusterID uuid.UUID, host string, size int64) { + l := prometheus.Labels{ + "cluster": clusterID.String(), + "host": host, + } + + m.batchSize.With(l).Add(float64(size)) +} + +// DecreaseBatchSize updates restore "batch_size" metrics. +func (m RestoreMetrics) DecreaseBatchSize(clusterID uuid.UUID, host string, size int64) { + l := prometheus.Labels{ + "cluster": clusterID.String(), + "host": host, + } + + m.batchSize.With(l).Sub(float64(size)) +} + +// SetRemainingBytes sets restore "remaining_bytes" metric. +func (m RestoreMetrics) SetRemainingBytes(labels RestoreBytesLabels, remainingBytes int64) { + l := prometheus.Labels{ + "cluster": labels.ClusterID, + "snapshot_tag": labels.SnapshotTag, + "location": labels.Location, + "dc": labels.DC, + "node": labels.Node, + "keyspace": labels.Keyspace, + "table": labels.Table, + } + m.remainingBytes.With(l).Set(float64(remainingBytes)) +} + +// RestoreBytesLabels is a set of labels for restore metrics. +type RestoreBytesLabels struct { + ClusterID string + SnapshotTag string + Location string + DC string + Node string + Keyspace string + Table string +} + +// DecreaseRemainingBytes decreases restore "remaining_bytes" metric. +func (m RestoreMetrics) DecreaseRemainingBytes(labels RestoreBytesLabels, restoredBytes int64) { + l := prometheus.Labels{ + "cluster": labels.ClusterID, + "snapshot_tag": labels.SnapshotTag, + "location": labels.Location, + "dc": labels.DC, + "node": labels.Node, + "keyspace": labels.Keyspace, + "table": labels.Table, + } + m.remainingBytes.With(l).Sub(float64(restoredBytes)) +} + +// RestoreProgressLabels is a set of labels for restore "progress" metric. +// RestoreProgressLabels does not contain DC and Node labels since we only care about global restore progress. +type RestoreProgressLabels struct { + ClusterID string + SnapshotTag string +} + +// SetProgress sets restore "progress" metric, +// progress should be a value between 0 and 100, that indicates global restore progress. +func (m RestoreMetrics) SetProgress(labels RestoreProgressLabels, progress float64) { + l := prometheus.Labels{ + "cluster": labels.ClusterID, + "snapshot_tag": labels.SnapshotTag, + } + m.progress.With(l).Set(progress) +} + +// RestoreState is the enum that defines how node is used during the restore. +type RestoreState int + +const ( + // RestoreStateIdle defines idle state. + RestoreStateIdle RestoreState = iota + // RestoreStateDownloading means that node is downloading data from backup location. + RestoreStateDownloading + // RestoreStateLoading means that node is calling load&stream. + RestoreStateLoading + // RestoreStateError means that node ended up with error. + RestoreStateError +) + +// SetRestoreState sets restore "state" metric. +func (m RestoreMetrics) SetRestoreState(clusterID uuid.UUID, location backupspec.Location, snapshotTag, host string, state RestoreState) { + l := prometheus.Labels{ + "cluster": clusterID.String(), + "location": location.String(), + "snapshot_tag": snapshotTag, + "host": host, + } + m.state.With(l).Set(float64(state)) +} + +// ViewBuildStatus defines build status of a view. +type ViewBuildStatus int + +// ViewBuildStatus enumeration. +const ( + BuildStatusUnknown ViewBuildStatus = iota + BuildStatusStarted + BuildStatusSuccess + BuildStatusError +) + +// RestoreViewBuildStatusLabels is a set of labels for restore "view_build_status" metric. +type RestoreViewBuildStatusLabels struct { + ClusterID string + Keyspace string + View string +} + +// SetViewBuildStatus sets restore "view_build_status" metric. +func (m RestoreMetrics) SetViewBuildStatus(labels RestoreViewBuildStatusLabels, status ViewBuildStatus) { + l := prometheus.Labels{ + "cluster": labels.ClusterID, + "keyspace": labels.Keyspace, + "view": labels.View, + } + m.viewBuildStatus.With(l).Set(float64(status)) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/scheduler.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/scheduler.go new file mode 100644 index 00000000000..2e220ae4455 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/metrics/scheduler.go @@ -0,0 +1,84 @@ +// Copyright (C) 2017 ScyllaDB + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +type SchedulerMetrics struct { + suspended *prometheus.GaugeVec + runIndicator *prometheus.GaugeVec + runsTotal *prometheus.GaugeVec + lastSuccess *prometheus.GaugeVec +} + +func NewSchedulerMetrics() SchedulerMetrics { + g := gaugeVecCreator("scheduler") + + return SchedulerMetrics{ + suspended: g("If the cluster is suspended the value is 1 otherwise it's 0.", + "suspended", "cluster"), + runIndicator: g("If the task is running the value is 1 otherwise it's 0.", + "run_indicator", "cluster", "type", "task"), + runsTotal: g("Total number of task runs parametrized by status.", + "run_total", "cluster", "type", "task", "status"), + lastSuccess: g("Start time of the last successful run as a Unix timestamp.", + "last_success", "cluster", "type", "task"), + } +} + +func (m SchedulerMetrics) all() []prometheus.Collector { + return []prometheus.Collector{ + m.suspended, + m.runIndicator, + m.runsTotal, + m.lastSuccess, + } +} + +// MustRegister shall be called to make the metrics visible by prometheus client. +func (m SchedulerMetrics) MustRegister() SchedulerMetrics { + prometheus.MustRegister(m.all()...) + return m +} + +// ResetClusterMetrics resets all metrics labeled with the cluster. +func (m SchedulerMetrics) ResetClusterMetrics(clusterID uuid.UUID) { + for _, c := range m.all() { + setGaugeVecMatching(c.(*prometheus.GaugeVec), unspecifiedValue, clusterMatcher(clusterID)) + } +} + +// Init sets 0 values for all metrics. +func (m SchedulerMetrics) Init(clusterID uuid.UUID, taskType string, taskID uuid.UUID, statuses ...string) { + m.runIndicator.WithLabelValues(clusterID.String(), taskType, taskID.String()).Add(0) + for _, s := range statuses { + m.runsTotal.WithLabelValues(clusterID.String(), taskType, taskID.String(), s).Add(0) + } +} + +// BeginRun updates "run_indicator". +func (m SchedulerMetrics) BeginRun(clusterID uuid.UUID, taskType string, taskID uuid.UUID) { + m.runIndicator.WithLabelValues(clusterID.String(), taskType, taskID.String()).Inc() +} + +// EndRun updates "run_indicator", "runs_total", and "last_success". +func (m SchedulerMetrics) EndRun(clusterID uuid.UUID, taskType string, taskID uuid.UUID, status string, startTime int64) { + m.runIndicator.WithLabelValues(clusterID.String(), taskType, taskID.String()).Dec() + m.runsTotal.WithLabelValues(clusterID.String(), taskType, taskID.String(), status).Inc() + if status == "DONE" { + m.lastSuccess.WithLabelValues(clusterID.String(), taskType, taskID.String()).Set(float64(startTime)) + } +} + +// Suspend sets "suspend" to 1. +func (m SchedulerMetrics) Suspend(clusterID uuid.UUID) { + m.suspended.WithLabelValues(clusterID.String()).Set(1) +} + +// Resume sets "suspend" to 0. +func (m SchedulerMetrics) Resume(clusterID uuid.UUID) { + m.suspended.WithLabelValues(clusterID.String()).Set(0) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/accounting.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/accounting.go new file mode 100644 index 00000000000..fede639720e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/accounting.go @@ -0,0 +1,20 @@ +// Copyright (C) 2017 ScyllaDB + +package rclone + +import ( + "context" + + "github.com/rclone/rclone/fs/accounting" +) + +// StartAccountingOperations starts token bucket and transaction limiter +// tracking. +func StartAccountingOperations() { + // Start the token bucket limiter + accounting.TokenBucket.StartTokenBucket(context.Background()) + // Start the bandwidth update ticker + accounting.TokenBucket.StartTokenTicker(context.Background()) + // Start the transactions per second limiter + accounting.StartLimitTPS(context.Background()) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/aws.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/aws.go new file mode 100644 index 00000000000..ec8bf47ca26 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/aws.go @@ -0,0 +1,87 @@ +// Copyright (C) 2017 ScyllaDB + +package rclone + +import ( + "context" + "encoding/json" + "io" + "net/http" + "time" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" +) + +// awsRegionFromMetadataAPI uses instance metadata API v2 to fetch region of the +// running instance see https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html +// Returns empty string if region can't be obtained for whatever reason. +// Fallbacks to IMDSv1 if the session token cannot be obtained. +func awsRegionFromMetadataAPI() string { + const docURL = "http://169.254.169.254/latest/dynamic/instance-identity/document" + + // Step 1: Request an IMDSv2 session token + token, err := awsAPIToken() + // fallback to IMDSv1 when error on token retrieval + if err != nil { + fs.Errorf(nil, "%+v", err) + token = "" + } + + // Step 2: Use the session token to retrieve instance metadata + reqMetadata, err := http.NewRequestWithContext(context.Background(), http.MethodGet, docURL, http.NoBody) + if err != nil { + fs.Errorf(nil, "create metadata request: %+v", err) + return "" + } + if token != "" { + reqMetadata.Header.Set("X-aws-ec2-metadata-token", token) + } + + metadataClient := http.Client{ + Timeout: 2 * time.Second, + } + resMetadata, err := metadataClient.Do(reqMetadata) + if err != nil { + fs.Errorf(nil, "IMDSv2 failed to fetch instance identity: %+v", err) + return "" + } + defer resMetadata.Body.Close() + + metadata := struct { + Region string `json:"region"` + }{} + if err := json.NewDecoder(resMetadata.Body).Decode(&metadata); err != nil { + fs.Errorf(nil, "parse instance region: %+v", err) + return "" + } + + return metadata.Region +} + +func awsAPIToken() (string, error) { + const tokenURL = "http://169.254.169.254/latest/api/token" + + reqToken, err := http.NewRequestWithContext(context.Background(), http.MethodPut, tokenURL, http.NoBody) + if err != nil { + return "", errors.Wrap(err, "create token request") + } + reqToken.Header.Set("X-aws-ec2-metadata-token-ttl-seconds", "21600") + tokenClient := http.Client{ + Timeout: 2 * time.Second, + } + resToken, err := tokenClient.Do(reqToken) + if err != nil { + return "", errors.Wrap(err, "IMDSv2 failed to fetch session token") + } + defer resToken.Body.Close() + + if resToken.StatusCode != http.StatusOK { + return "", errors.Wrap(err, "failed to retrieve session token") + } + token, err := io.ReadAll(resToken.Body) + if err != nil { + return "", errors.Wrap(err, "failed to read session token") + } + return string(token), nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/backend/localdir/localdir.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/backend/localdir/localdir.go new file mode 100644 index 00000000000..ce56dffb757 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/backend/localdir/localdir.go @@ -0,0 +1,130 @@ +// Copyright (C) 2017 ScyllaDB + +// Package localdir is rclone backend based on local backend provided by rclone. +// The difference from local is that data is always rooted at a directory +// that can be specified dynamically on creation. +package localdir + +import ( + "context" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "github.com/rclone/rclone/backend/local" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/config/configmap" +) + +const ( + linkSuffix = ".rclonelink" +) + +// Init registers new data provider with rclone. +func Init(name, description, rootDir string) { + fsi := &fs.RegInfo{ + Name: name, + Description: description, + NewFs: NewFs(rootDir), + Options: []fs.Option{{ + Name: "nounc", + Help: "Disable UNC (long path names) conversion on Windows", + Examples: []fs.OptionExample{{ + Value: "true", + Help: "Disables long file names", + }}, + }, { + Name: "copy_links", + Help: "Follow symlinks and copy the pointed to item.", + Default: false, + NoPrefix: true, + ShortOpt: "L", + Advanced: true, + }, { + Name: "links", + Help: "Translate symlinks to/from regular files with a '" + linkSuffix + "' extension", + Default: false, + NoPrefix: true, + ShortOpt: "l", + Advanced: true, + }, { + Name: "skip_links", + Help: `Don't warn about skipped symlinks. +This flag disables warning messages on skipped symlinks or junction +points, as you explicitly acknowledge that they should be skipped.`, + Default: false, + NoPrefix: true, + Advanced: true, + }, { + Name: "no_unicode_normalization", + Help: `Don't apply unicode normalization to paths and filenames (Deprecated) + +This flag is deprecated now. Rclone no longer normalizes unicode file +names, but it compares them with unicode normalization in the sync +routine instead.`, + Default: false, + Advanced: true, + }, { + Name: "no_check_updated", + Help: `Don't check to see if the files change during upload + +Normally rclone checks the size and modification time of files as they +are being uploaded and aborts with a message which starts "can't copy +- source file is being updated" if the file changes during upload. + +However on some file systems this modification time check may fail (eg +[Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this +check can be disabled with this flag.`, + Default: false, + Advanced: true, + }, { + Name: "one_file_system", + Help: "Don't cross filesystem boundaries (unix/macOS only).", + Default: false, + NoPrefix: true, + ShortOpt: "x", + Advanced: true, + }, { + Name: "case_sensitive", + Help: `Force the filesystem to report itself as case sensitive. + +Normally the local backend declares itself as case insensitive on +Windows/macOS and case sensitive for everything else. Use this flag +to override the default choice.`, + Default: false, + Advanced: true, + }, { + Name: "case_insensitive", + Help: `Force the filesystem to report itself as case insensitive + +Normally the local backend declares itself as case insensitive on +Windows/macOS and case sensitive for everything else. Use this flag +to override the default choice.`, + Default: false, + Advanced: true, + }}, + } + + fs.Register(fsi) +} + +func NewFs(rootDir string) func(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + return func(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, error) { + // filepath.Clean will turn everything that goes up and beyond root into + // a single /. + // We are prepending slash to turn input into an absolute path. + p := filepath.Clean("/" + root) + if len(root) > 1 && p == "/" { + // If root has more than one byte and after cleanPath we end up with + // empty path then we received invalid input. + return nil, errors.Wrap(fs.ErrorObjectNotFound, "accessing path outside of root") + } + var path string + if strings.HasPrefix(p, rootDir) { + path = p + } else { + path = filepath.Join(rootDir, p) + } + return local.NewFs(ctx, name, path, m) + } +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/config.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/config.go new file mode 100644 index 00000000000..88a60f371b8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/config.go @@ -0,0 +1,85 @@ +// Copyright (C) 2017 ScyllaDB + +package rclone + +import ( + "fmt" + "sync" + + "github.com/rclone/rclone/fs" + "github.com/scylladb/scylla-manager/v3/pkg" +) + +// GetConfig returns the rclone global config. +func GetConfig() *fs.ConfigInfo { + return fs.GetConfig(nil) // nolint: staticcheck +} + +// InitFsConfig enables in-memory config and sets default config values. +func InitFsConfig() { + InitFsConfigWithOptions(DefaultGlobalOptions()) +} + +// InitFsConfigWithOptions enables in-memory config and sets custom config +// values. +func InitFsConfigWithOptions(o GlobalOptions) { + initInMemoryConfig() + *GetConfig() = o +} + +func initInMemoryConfig() { + c := new(inMemoryConf) + fs.ConfigFileGet = c.Get + fs.ConfigFileSet = c.Set + fs.Infof(nil, "registered in-memory fs config") +} + +// inMemoryConf is in-memory implementation of rclone configuration for +// remote file systems. +type inMemoryConf struct { + mu sync.Mutex + sections map[string]map[string]string +} + +// Get config key under section returning the value and true if found or +// ("", false) otherwise. +func (c *inMemoryConf) Get(section, key string) (string, bool) { + c.mu.Lock() + defer c.mu.Unlock() + if c.sections == nil { + return "", false + } + s, ok := c.sections[section] + if !ok { + return "", false + } + v, ok := s[key] + return v, ok +} + +// Set the key in section to value. +// It doesn't save the config file. +func (c *inMemoryConf) Set(section, key, value string) (err error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.sections == nil { + c.sections = make(map[string]map[string]string) + } + s, ok := c.sections[section] + if !ok { + s = make(map[string]string) + } + if value == "" { + delete(c.sections[section], value) + } else { + s[key] = value + c.sections[section] = s + } + return +} + +// UserAgent returns string value that can be used as identifier in client +// calls to the service providers. +func UserAgent() string { + return fmt.Sprintf("Scylla Manager Agent %s", pkg.Version()) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/logger.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/logger.go new file mode 100644 index 00000000000..525faa88908 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/logger.go @@ -0,0 +1,26 @@ +// Copyright (C) 2017 ScyllaDB + +package rclone + +import ( + "context" + + "github.com/rclone/rclone/fs" + "github.com/scylladb/go-log" +) + +// RedirectLogPrint redirects fs.LogPrint to the logger. +func RedirectLogPrint(logger log.Logger) { + fs.LogPrint = func(level fs.LogLevel, text string) { + switch level { + case fs.LogLevelEmergency, fs.LogLevelAlert, fs.LogLevelCritical: + logger.Fatal(context.TODO(), text) + case fs.LogLevelError, fs.LogLevelWarning: + logger.Error(context.TODO(), text) + case fs.LogLevelNotice, fs.LogLevelInfo: + logger.Info(context.TODO(), text) + case fs.LogLevelDebug: + logger.Debug(context.TODO(), text) + } + } +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/operations/errors.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/operations/errors.go new file mode 100644 index 00000000000..a4d0de65ddd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/operations/errors.go @@ -0,0 +1,47 @@ +// Copyright (C) 2017 ScyllaDB + +package operations + +import ( + "encoding/xml" + "errors" + "strings" +) + +// BackendXMLError is general error parsed from Error XML message as specified in +// https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList +// https://cloud.google.com/storage/docs/xml-api/reference-status +// https://learn.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2 +type BackendXMLError struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +// ParseBackendXMLError reads the error as string and ties to parse the XML structure. +// The reason is that the error returned from rclone is flattened ex. `*errors.fundamental s3 upload: 404 Not Found: `. +func ParseBackendXMLError(err error) (*BackendXMLError, error) { + s := err.Error() + + idx := strings.Index(s, "" \ + // --role "Storage Blob Data Owner" \ + // --scopes "/subscriptions//resourceGroups//providers/Microsoft.Storage/storageAccounts//blobServices/default/containers/" \ + // > azure-principal.json + // + // See [Use Azure CLI to assign an Azure role for access to blob and queue data](https://docs.microsoft.com/en-us/azure/storage/common/storage-auth-aad-rbac-cli) + // for more details. + ServicePrincipalFile string `yaml:"service_principal_file"` + // Storage Account Key (leave blank to use SAS URL or Emulator) + Key string `yaml:"key"` + // SAS URL for container level access only + // (leave blank if using account/key or Emulator) + SasUrl string `yaml:"sas_url"` + // Use a managed service identity to authenticate (only works in Azure) + // + // When true, use a [managed service identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/) + // to authenticate to Azure Storage instead of a SAS token or account key. + // + // If the VM(SS) on which this program is running has a system-assigned identity, it will + // be used by default. If the resource has no system-assigned but exactly one user-assigned identity, + // the user-assigned identity will be used by default. If the resource has multiple user-assigned + // identities, the identity to use must be explicitly specified using exactly one of the msi_object_id, + // msi_client_id, or msi_mi_res_id parameters. + UseMsi string `yaml:"use_msi"` + // Object ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_mi_res_id specified. + MsiObjectID string `yaml:"msi_object_id"` + // Object ID of the user-assigned MSI to use, if any. Leave blank if msi_object_id or msi_mi_res_id specified. + MsiClientID string `yaml:"msi_client_id"` + // Azure resource ID of the user-assigned MSI to use, if any. Leave blank if msi_client_id or msi_object_id specified. + MsiMiResID string `yaml:"msi_mi_res_id"` + // Uses local storage emulator if provided as 'true' (leave blank if using real azure storage endpoint) + UseEmulator string `yaml:"use_emulator"` + // Endpoint for the service + // Leave blank normally. + Endpoint string `yaml:"endpoint"` + // Cutoff for switching to chunked upload (<= 256MB). (Deprecated) + UploadCutoff string `yaml:"upload_cutoff"` + // Upload chunk size (<= 100MB). + // + // Note that this is stored in memory and there may be up to + // "--transfers" chunks stored at once in memory. + ChunkSize string `yaml:"chunk_size"` + // Size of blob list. + // + // This sets the number of blobs requested in each listing chunk. Default + // is the maximum, 5000. "List blobs" requests are permitted 2 minutes + // per megabyte to complete. If an operation is taking longer than 2 + // minutes per megabyte on average, it will time out ( + // [source](https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-blob-service-operations#exceptions-to-default-timeout-interval) + // ). This can be used to limit the number of blobs items to return, to + // avoid the time out. + ListChunk string `yaml:"list_chunk"` + // Access tier of blob: hot, cool or archive. + // + // Archived blobs can be restored by setting access tier to hot or + // cool. Leave blank if you intend to use default access tier, which is + // set at account level + // + // If there is no "access tier" specified, rclone doesn't apply any tier. + // rclone performs "Set Tier" operation on blobs while uploading, if objects + // are not modified, specifying "access tier" to new one will have no effect. + // If blobs are in "archive tier" at remote, trying to perform data transfer + // operations from remote will not be allowed. User should first restore by + // tiering blob to "Hot" or "Cool". + AccessTier string `yaml:"access_tier"` + // Delete archive tier blobs before overwriting. + // + // Archive tier blobs cannot be updated. So without this flag, if you + // attempt to update an archive tier blob, then rclone will produce the + // error: + // + // can't update archive tier blob without --azureblob-archive-tier-delete + // + // With this flag set then before rclone attempts to overwrite an archive + // tier blob, it will delete the existing blob before uploading its + // replacement. This has the potential for data loss if the upload fails + // (unlike updating a normal blob) and also may cost more since deleting + // archive tier blobs early may be chargable. + ArchiveTierDelete string `yaml:"archive_tier_delete"` + // Don't store MD5 checksum with object metadata. + // + // Normally rclone will calculate the MD5 checksum of the input before + // uploading it so it can add it to metadata on the object. This is great + // for data integrity checking but can cause long delays for large files + // to start uploading. + DisableChecksum string `yaml:"disable_checksum"` + // How often internal memory buffer pools will be flushed. + // Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. + // This option controls how often unused buffers will be removed from the pool. + MemoryPoolFlushTime string `yaml:"memory_pool_flush_time"` + // Whether to use mmap buffers in internal memory pool. + MemoryPoolUseMmap string `yaml:"memory_pool_use_mmap"` + // This sets the encoding for the backend. + // + // See: the [encoding section in the overview](/overview/#encoding) for more info. + Encoding string `yaml:"encoding"` +} + +// GCSOptions is a clone rclone file system Options designed for inclusion +// in Scylla Manager Agent config, and YAML parsing. +type GCSOptions struct { + // OAuth Client Id + // Leave blank normally. + ClientID string `yaml:"client_id"` + // OAuth Client Secret + // Leave blank normally. + ClientSecret string `yaml:"client_secret"` + // OAuth Access Token as a JSON blob. + Token string `yaml:"token"` + // Auth server URL. + // Leave blank to use the provider defaults. + AuthUrl string `yaml:"auth_url"` + // Token server url. + // Leave blank to use the provider defaults. + TokenUrl string `yaml:"token_url"` + // Project number. + // Optional - needed only for list/create/delete buckets - see your developer console. + ProjectNumber string `yaml:"project_number"` + // Service Account Credentials JSON file path + // Leave blank normally. + // Needed only if you want use SA instead of interactive login. + // + // Leading `~` will be expanded in the file name as will environment variables such as `${RCLONE_CONFIG_DIR}`. + ServiceAccountFile string `yaml:"service_account_file"` + // Service Account Credentials JSON blob + // Leave blank normally. + // Needed only if you want use SA instead of interactive login. + ServiceAccountCredentials string `yaml:"service_account_credentials"` + // Access public buckets and objects without credentials + // Set to 'true' if you just want to download files and don't configure credentials. + Anonymous string `yaml:"anonymous"` + // Access Control List for new objects. + ObjectAcl string `yaml:"object_acl"` + // Access Control List for new buckets. + BucketAcl string `yaml:"bucket_acl"` + // Access checks should use bucket-level IAM policies. + // + // If you want to upload objects to a bucket with Bucket Policy Only set + // then you will need to set this. + // + // When it is set, rclone: + // + // - ignores ACLs set on buckets + // - ignores ACLs set on objects + // - creates buckets with Bucket Policy Only set + // + // Docs: https://cloud.google.com/storage/docs/bucket-policy-only + BucketPolicyOnly string `yaml:"bucket_policy_only"` + // Location for the newly created buckets. + Location string `yaml:"location"` + // The storage class to use when storing objects in Google Cloud Storage. + StorageClass string `yaml:"storage_class"` + // This sets the encoding for the backend. + // + // See: the [encoding section in the overview](/overview/#encoding) for more info. + Encoding string `yaml:"encoding"` + // How often internal memory buffer pools will be flushed. + // Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. + // This option controls how often unused buffers will be removed from the pool. + MemoryPoolFlushTime string `yaml:"memory_pool_flush_time"` + // Whether to use mmap buffers in internal memory pool. + MemoryPoolUseMmap string `yaml:"memory_pool_use_mmap"` + // Chunk size to use for uploading. + // + // When uploading large files or files with unknown + // size (eg from "rclone rcat" or uploaded with "rclone mount" or google + // photos or google docs) they will be uploaded as multi chunk uploads + // using this chunk size. + // + // Files which contains fewer than size bytes will be uploaded in a single request. + // Files which contains size bytes or more will be uploaded in separate chunks. + // If size is zero, media will be uploaded in a single request. + ChunkSize string `yaml:"chunk_size"` + // How many items are returned in one chunk during directory listing + ListChunk string `yaml:"list_chunk"` + // Whether to create bucket if it doesn't exists. + // If bucket doesn't exists, error will be returned.' + AllowCreateBucket string `yaml:"allow_create_bucket"` +} + +// LocalOptions is a clone rclone file system Options designed for inclusion +// in Scylla Manager Agent config, and YAML parsing. +type LocalOptions struct { + // Disable UNC (long path names) conversion on Windows + Nounc string `yaml:"nounc"` + // Follow symlinks and copy the pointed to item. + CopyLinks string `yaml:"copy_links"` + // Translate symlinks to/from regular files with a '.rclonelink' extension + Links string `yaml:"links"` + // Don't warn about skipped symlinks. + // This flag disables warning messages on skipped symlinks or junction + // points, as you explicitly acknowledge that they should be skipped. + SkipLinks string `yaml:"skip_links"` + // Assume the Stat size of links is zero (and read them instead) + // + // On some virtual filesystems (such ash LucidLink), reading a link size via a Stat call always returns 0. + // However, on unix it reads as the length of the text in the link. This may cause errors like this when + // syncing: + // + // Failed to copy: corrupted on transfer: sizes differ 0 vs 13 + // + // Setting this flag causes rclone to read the link and use that as the size of the link + // instead of 0 which in most cases fixes the problem. + ZeroSizeLinks string `yaml:"zero_size_links"` + // Don't apply unicode normalization to paths and filenames (Deprecated) + // + // This flag is deprecated now. Rclone no longer normalizes unicode file + // names, but it compares them with unicode normalization in the sync + // routine instead. + NoUnicodeNormalization string `yaml:"no_unicode_normalization"` + // Don't check to see if the files change during upload + // + // Normally rclone checks the size and modification time of files as they + // are being uploaded and aborts with a message which starts "can't copy + // - source file is being updated" if the file changes during upload. + // + // However on some file systems this modification time check may fail (e.g. + // [Glusterfs #2206](https://github.com/rclone/rclone/issues/2206)) so this + // check can be disabled with this flag. + // + // If this flag is set, rclone will use its best efforts to transfer a + // file which is being updated. If the file is only having things + // appended to it (e.g. a log) then rclone will transfer the log file with + // the size it had the first time rclone saw it. + // + // If the file is being modified throughout (not just appended to) then + // the transfer may fail with a hash check failure. + // + // In detail, once the file has had stat() called on it for the first + // time we: + // + // - Only transfer the size that stat gave + // - Only checksum the size that stat gave + // - Don't update the stat info for the file + NoCheckUpdated string `yaml:"no_check_updated"` + // Don't cross filesystem boundaries (unix/macOS only). + OneFileSystem string `yaml:"one_file_system"` + // Force the filesystem to report itself as case sensitive. + // + // Normally the local backend declares itself as case insensitive on + // Windows/macOS and case sensitive for everything else. Use this flag + // to override the default choice. + CaseSensitive string `yaml:"case_sensitive"` + // Force the filesystem to report itself as case insensitive + // + // Normally the local backend declares itself as case insensitive on + // Windows/macOS and case sensitive for everything else. Use this flag + // to override the default choice. + CaseInsensitive string `yaml:"case_insensitive"` + // Disable sparse files for multi-thread downloads + // + // On Windows platforms rclone will make sparse files when doing + // multi-thread downloads. This avoids long pauses on large files where + // the OS zeros the file. However sparse files may be undesirable as they + // cause disk fragmentation and can be slow to work with. + NoSparse string `yaml:"no_sparse"` + // Disable setting modtime + // + // Normally rclone updates modification time of files after they are done + // uploading. This can cause permissions issues on Linux platforms when + // the user rclone is running as does not own the file uploaded, such as + // when copying to a CIFS mount owned by another user. If this option is + // enabled, rclone will no longer update the modtime after copying a file. + NoSetModtime string `yaml:"no_set_modtime"` + // This sets the encoding for the backend. + // + // See: the [encoding section in the overview](/overview/#encoding) for more info. + Encoding string `yaml:"encoding"` +} + +// S3Options is a clone rclone file system Options designed for inclusion +// in Scylla Manager Agent config, and YAML parsing. +type S3Options struct { + // Choose your S3 provider. + Provider string `yaml:"provider"` + // Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars). + // Only applies if access_key_id and secret_access_key is blank. + EnvAuth string `yaml:"env_auth"` + // AWS Access Key ID. + // Leave blank for anonymous access or runtime credentials. + AccessKeyID string `yaml:"access_key_id"` + // AWS Secret Access Key (password) + // Leave blank for anonymous access or runtime credentials. + SecretAccessKey string `yaml:"secret_access_key"` + // Region to connect to. + Region string `yaml:"region"` + // Endpoint for S3 API. + // Leave blank if using AWS to use the default endpoint for the region. + Endpoint string `yaml:"endpoint"` + // Location constraint - must be set to match the Region. + // Used when creating buckets only. + LocationConstraint string `yaml:"location_constraint"` + // Canned ACL used when creating buckets and storing or copying objects. + // + // This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too. + // + // For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + // + // Note that this ACL is applied when server-side copying objects as S3 + // doesn't copy the ACL from the source but rather writes a fresh one. + Acl string `yaml:"acl"` + // Canned ACL used when creating buckets. + // + // For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl + // + // Note that this ACL is applied when only when creating buckets. If it + // isn't set then "acl" is used instead. + BucketAcl string `yaml:"bucket_acl"` + // Enables requester pays option when interacting with S3 bucket. + RequesterPays string `yaml:"requester_pays"` + // The server-side encryption algorithm used when storing this object in S3. + ServerSideEncryption string `yaml:"server_side_encryption"` + // If using SSE-C, the server-side encryption algorithm used when storing this object in S3. + SseCustomerAlgorithm string `yaml:"sse_customer_algorithm"` + // If using KMS ID you must provide the ARN of Key. + SseKmsKeyID string `yaml:"sse_kms_key_id"` + // If using SSE-C you must provide the secret encryption key used to encrypt/decrypt your data. + SseCustomerKey string `yaml:"sse_customer_key"` + // If using SSE-C you may provide the secret encryption key MD5 checksum (optional). + // + // If you leave it blank, this is calculated automatically from the sse_customer_key provided. + SseCustomerKeyMd5 string `yaml:"sse_customer_key_md5"` + // The storage class to use when storing new objects in S3. + StorageClass string `yaml:"storage_class"` + // Cutoff for switching to chunked upload + // + // Any files larger than this will be uploaded in chunks of chunk_size. + // The minimum is 0 and the maximum is 5GB. + UploadCutoff string `yaml:"upload_cutoff"` + // Chunk size to use for uploading. + // + // When uploading files larger than upload_cutoff or files with unknown + // size (e.g. from "rclone rcat" or uploaded with "rclone mount" or google + // photos or google docs) they will be uploaded as multipart uploads + // using this chunk size. + // + // Note that "--s3-upload-concurrency" chunks of this size are buffered + // in memory per transfer. + // + // If you are transferring large files over high-speed links and you have + // enough memory, then increasing this will speed up the transfers. + // + // Rclone will automatically increase the chunk size when uploading a + // large file of known size to stay below the 10,000 chunks limit. + // + // Files of unknown size are uploaded with the configured + // chunk_size. Since the default chunk size is 5MB and there can be at + // most 10,000 chunks, this means that by default the maximum size of + // a file you can stream upload is 48GB. If you wish to stream upload + // larger files then you will need to increase chunk_size. + ChunkSize string `yaml:"chunk_size"` + // Maximum number of parts in a multipart upload. + // + // This option defines the maximum number of multipart chunks to use + // when doing a multipart upload. + // + // This can be useful if a service does not support the AWS S3 + // specification of 10,000 chunks. + // + // Rclone will automatically increase the chunk size when uploading a + // large file of a known size to stay below this number of chunks limit. + MaxUploadParts string `yaml:"max_upload_parts"` + // Cutoff for switching to multipart copy + // + // Any files larger than this that need to be server-side copied will be + // copied in chunks of this size. + // + // The minimum is 0 and the maximum is 5GB. + CopyCutoff string `yaml:"copy_cutoff"` + // Don't store MD5 checksum with object metadata + // + // Normally rclone will calculate the MD5 checksum of the input before + // uploading it so it can add it to metadata on the object. This is great + // for data integrity checking but can cause long delays for large files + // to start uploading. + DisableChecksum string `yaml:"disable_checksum"` + // Path to the shared credentials file + // + // If env_auth = true then rclone can use a shared credentials file. + // + // If this variable is empty rclone will look for the + // "AWS_SHARED_CREDENTIALS_FILE" env variable. If the env value is empty + // it will default to the current user's home directory. + // + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + SharedCredentialsFile string `yaml:"shared_credentials_file"` + // Profile to use in the shared credentials file + // + // If env_auth = true then rclone can use a shared credentials file. This + // variable controls which profile is used in that file. + // + // If empty it will default to the environment variable "AWS_PROFILE" or + // "default" if that environment variable is also not set. + Profile string `yaml:"profile"` + // An AWS session token + SessionToken string `yaml:"session_token"` + // Concurrency for multipart uploads. + // + // This is the number of chunks of the same file that are uploaded + // concurrently. + // + // If you are uploading small numbers of large files over high-speed links + // and these uploads do not fully utilize your bandwidth, then increasing + // this may help to speed up the transfers. + UploadConcurrency string `yaml:"upload_concurrency"` + // If true use path style access if false use virtual hosted style. + // + // If this is true (the default) then rclone will use path style access, + // if false then rclone will use virtual path style. See [the AWS S3 + // docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) + // for more info. + // + // Some providers (e.g. AWS, Aliyun OSS, Netease COS, or Tencent COS) require this set to + // false - rclone will do this automatically based on the provider + // setting. + ForcePathStyle string `yaml:"force_path_style"` + // If true use v2 authentication. + // + // If this is false (the default) then rclone will use v4 authentication. + // If it is set then rclone will use v2 authentication. + // + // Use this only if v4 signatures don't work, e.g. pre Jewel/v10 CEPH. + V2Auth string `yaml:"v2_auth"` + // If true use the AWS S3 accelerated endpoint. + // + // See: [AWS S3 Transfer acceleration](https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration-examples.html) + UseAccelerateEndpoint string `yaml:"use_accelerate_endpoint"` + // If true avoid calling abort upload on a failure, leaving all successfully uploaded parts on S3 for manual recovery. + // + // It should be set to true for resuming uploads across different sessions. + // + // WARNING: Storing parts of an incomplete multipart upload counts towards space usage on S3 and will add additional costs if not cleaned up. + LeavePartsOnError string `yaml:"leave_parts_on_error"` + // Size of listing chunk (response list for each ListObject S3 request). + // + // This option is also known as "MaxKeys", "max-items", or "page-size" from the AWS S3 specification. + // Most services truncate the response list to 1000 objects even if requested more than that. + // In AWS S3 this is a global maximum and cannot be changed, see [AWS S3](https://docs.aws.amazon.com/cli/latest/reference/s3/ls.html). + // In Ceph, this can be increased with the "rgw list buckets max chunk" option. + ListChunk string `yaml:"list_chunk"` + // If set, don't attempt to check the bucket exists or create it + // + // This can be useful when trying to minimise the number of transactions + // rclone does if you know the bucket exists already. + // + // It can also be needed if the user you are using does not have bucket + // creation permissions. Before v1.52.0 this would have passed silently + // due to a bug. + NoCheckBucket string `yaml:"no_check_bucket"` + // If set, don't HEAD uploaded objects to check integrity + // + // This can be useful when trying to minimise the number of transactions + // rclone does. + // + // Setting it means that if rclone receives a 200 OK message after + // uploading an object with PUT then it will assume that it got uploaded + // properly. + // + // In particular it will assume: + // + // - the metadata, including modtime, storage class and content type was as uploaded + // - the size was as uploaded + // + // It reads the following items from the response for a single part PUT: + // + // - the MD5SUM + // - The uploaded date + // + // For multipart uploads these items aren't read. + // + // If an source object of unknown length is uploaded then rclone **will** do a + // HEAD request. + // + // Setting this flag increases the chance for undetected upload failures, + // in particular an incorrect size, so it isn't recommended for normal + // operation. In practice the chance of an undetected upload failure is + // very small even with this flag. + NoHead string `yaml:"no_head"` + // This sets the encoding for the backend. + // + // See: the [encoding section in the overview](/overview/#encoding) for more info. + Encoding string `yaml:"encoding"` + // How often internal memory buffer pools will be flushed. + // Uploads which requires additional buffers (f.e multipart) will use memory pool for allocations. + // This option controls how often unused buffers will be removed from the pool. + MemoryPoolFlushTime string `yaml:"memory_pool_flush_time"` + // Whether to use mmap buffers in internal memory pool. + MemoryPoolUseMmap string `yaml:"memory_pool_use_mmap"` + // Disable usage of http2 for S3 backends + // + // There is currently an unsolved issue with the s3 (specifically minio) backend + // and HTTP/2. HTTP/2 is enabled by default for the s3 backend but can be + // disabled here. When the issue is solved this flag will be removed. + // + // See: https://github.com/rclone/rclone/issues/4673, https://github.com/rclone/rclone/issues/3631 + DisableHttp2 string `yaml:"disable_http2"` +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/progress.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/progress.go new file mode 100644 index 00000000000..a251c4d0f37 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/progress.go @@ -0,0 +1,109 @@ +// Copyright (C) 2017 ScyllaDB + +package rclone + +import ( + "bytes" + "fmt" + "strings" + "sync" + "time" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/operations" + "github.com/rclone/rclone/lib/terminal" +) + +const ( + // Interval between progress prints. + defaultProgressInterval = 500 * time.Millisecond +) + +// StartProgress starts the progress bar printing +// +// It returns a func which should be called to stop the stats. +func StartProgress() func() { + stopStats := make(chan struct{}) + oldLogPrint := fs.LogPrint + oldSyncPrint := operations.SyncPrintf + + // Intercept output from functions such as HashLister to stdout + operations.SyncPrintf = func(format string, a ...interface{}) { + printProgress(fmt.Sprintf(format, a...)) + } + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + progressInterval := defaultProgressInterval + ticker := time.NewTicker(progressInterval) + for { + select { + case <-ticker.C: + printProgress("") + case <-stopStats: + ticker.Stop() + printProgress("") + fs.LogPrint = oldLogPrint + operations.SyncPrintf = oldSyncPrint + fmt.Println("") + return + } + } + }() + return func() { + close(stopStats) + wg.Wait() + } +} + +// State for the progress printing. +var ( + nlines = 0 // number of lines in the previous stats block + progressMu sync.Mutex +) + +// printProgress prints the progress with an optional log. +func printProgress(logMessage string) { + progressMu.Lock() + defer progressMu.Unlock() + + var buf bytes.Buffer + w, _ := terminal.GetSize() + stats := strings.TrimSpace(accounting.GlobalStats().String()) + logMessage = strings.TrimSpace(logMessage) + + out := func(s string) { + buf.WriteString(s) + } + + if logMessage != "" { + out("\n") + out(terminal.MoveUp) + } + // Move to the start of the block we wrote erasing all the previous lines + for i := 0; i < nlines-1; i++ { + out(terminal.EraseLine) + out(terminal.MoveUp) + } + out(terminal.EraseLine) + out(terminal.MoveToStartOfLine) + if logMessage != "" { + out(terminal.EraseLine) + out(logMessage + "\n") + } + fixedLines := strings.Split(stats, "\n") + nlines = len(fixedLines) + for i, line := range fixedLines { + if len(line) > w { + line = line[:w] + } + out(line) + if i != nlines-1 { + out("\n") + } + } + terminal.Write(buf.Bytes()) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/prometheus.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/prometheus.go new file mode 100644 index 00000000000..4051a5ab21e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/prometheus.go @@ -0,0 +1,25 @@ +// Copyright (C) 2017 ScyllaDB + +package rclone + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/fshttp" +) + +// MustRegisterPrometheusMetrics registers rclone metrics with prometheus. +func MustRegisterPrometheusMetrics(namespace string) { + // Accounting + a := accounting.NewRcloneCollector(context.Background(), namespace) + prometheus.MustRegister(a) + + // HTTP level metrics + m := fshttp.NewMetrics(namespace) + for _, c := range m.Collectors() { + prometheus.MustRegister(c) + } + fshttp.DefaultMetrics = m +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/providers.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/providers.go new file mode 100644 index 00000000000..b1dcc26f03f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/providers.go @@ -0,0 +1,130 @@ +// Copyright (C) 2017 ScyllaDB + +package rclone + +import ( + "os" + "reflect" + "strings" + + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/scylladb/go-reflectx" + "github.com/scylladb/go-set/strset" + "github.com/scylladb/scylla-manager/v3/pkg/rclone/backend/localdir" + "go.uber.org/multierr" +) + +var providers = strset.New() + +// HasProvider returns true iff provider was registered. +func HasProvider(name string) bool { + return providers.Has(name) +} + +// RegisterLocalDirProvider must be called before server is started. +// It allows for adding dynamically adding localdir providers. +func RegisterLocalDirProvider(name, description, rootDir string) error { + if _, err := os.Stat(rootDir); os.IsNotExist(err) { + return errors.Wrapf(err, "register local dir provider %s", rootDir) + } + localdir.Init(name, description, rootDir) + + return errors.Wrap(registerProvider(name, name, LocalOptions{}), "register provider") +} + +// MustRegisterLocalDirProvider calls RegisterLocalDirProvider and panics on +// error. +func MustRegisterLocalDirProvider(name, description, rootDir string) { + if err := RegisterLocalDirProvider(name, description, rootDir); err != nil { + panic(err) + } +} + +// RegisterS3Provider must be called before server is started. +// It allows for adding dynamically adding s3 provider named s3. +func RegisterS3Provider(opts S3Options) error { + const ( + name = "s3" + backend = "s3" + ) + + opts.AutoFill() + if err := opts.Validate(); err != nil { + return err + } + + return errors.Wrap(registerProvider(name, backend, opts), "register provider") +} + +// MustRegisterS3Provider calls RegisterS3Provider and panics on error. +func MustRegisterS3Provider(provider, endpoint, accessKeyID, secretAccessKey string) { + opts := DefaultS3Options() + opts.Provider = provider + opts.Endpoint = endpoint + opts.AccessKeyID = accessKeyID + opts.SecretAccessKey = secretAccessKey + + if err := RegisterS3Provider(opts); err != nil { + panic(err) + } +} + +// RegisterGCSProvider must be called before server is started. +// It allows for adding dynamically adding gcs provider named gcs. +func RegisterGCSProvider(opts GCSOptions) error { + const ( + name = "gcs" + backend = "gcs" + ) + + opts.AutoFill() + + return errors.Wrap(registerProvider(name, backend, opts), "register provider") +} + +// RegisterAzureProvider must be called before server is started. +// It allows for adding dynamically adding gcs provider named gcs. +func RegisterAzureProvider(opts AzureOptions) error { + const ( + name = "azure" + backend = "azureblob" + ) + + opts.AutoFill() + + return errors.Wrap(registerProvider(name, backend, opts), "register provider") +} + +func registerProvider(name, backend string, options interface{}) error { + var ( + m = reflectx.NewMapper("yaml").FieldMap(reflect.ValueOf(options)) + extra = []string{"name=" + name} + errs error + ) + + // Set type + errs = multierr.Append(errs, fs.ConfigFileSet(name, "type", backend)) + + // Set and log options + for key, rval := range m { + if s := rval.String(); s != "" { + errs = multierr.Append(errs, fs.ConfigFileSet(name, key, s)) + if strings.Contains(key, "secret") || strings.Contains(key, "key") { + extra = append(extra, key+"="+strings.Repeat("*", len(s))) + } else { + extra = append(extra, key+"="+s) + } + } + } + + // Check for errors + if errs != nil { + return errors.Wrapf(errs, "register %s provider", name) + } + + providers.Add(name) + fs.Infof(nil, "registered %s provider [%s]", name, strings.Join(extra, ", ")) + + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rate.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rate.go new file mode 100644 index 00000000000..6812bfdda03 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rate.go @@ -0,0 +1,19 @@ +// Copyright (C) 2017 ScyllaDB + +package rclone + +import ( + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" +) + +// SetRateLimit sets the global rate limit to a given amount of MiB per second. +// Set to 0 to for full throttle. +func SetRateLimit(mib int) { + var bw fs.BwPair + if mib > 0 { + bw.Tx = fs.SizeSuffix(mib) * fs.MebiByte + bw.Rx = fs.SizeSuffix(mib) * fs.MebiByte + } + accounting.TokenBucket.SetBwLimit(bw) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/imports.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/imports.go new file mode 100644 index 00000000000..4ed26414543 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/imports.go @@ -0,0 +1,15 @@ +// Copyright (C) 2017 ScyllaDB + +package rcserver + +// Needed for triggering global registrations in rclone. +import ( + _ "github.com/rclone/rclone/backend/azureblob" + _ "github.com/rclone/rclone/backend/googlecloudstorage" + _ "github.com/rclone/rclone/backend/local" + _ "github.com/rclone/rclone/backend/s3" + _ "github.com/rclone/rclone/fs/accounting" + _ "github.com/rclone/rclone/fs/operations" + _ "github.com/rclone/rclone/fs/rc/jobs" + _ "github.com/rclone/rclone/fs/sync" +) diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/internal/rclone_supported_calls.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/internal/rclone_supported_calls.go new file mode 100644 index 00000000000..f2d9450cc83 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/internal/rclone_supported_calls.go @@ -0,0 +1,28 @@ +// Copyright (C) 2017 ScyllaDB +// Code generated by go-swagger; DO NOT EDIT. + +package internal + +import ( + "github.com/scylladb/go-set/strset" +) + +var RcloneSupportedCalls = strset.New( + "core/bwlimit", + "core/stats-delete", + "core/stats-reset", + "job/info", + "job/progress", + "job/stop", + "operations/about", + "operations/check-permissions", + "operations/copyfile", + "operations/deletefile", + "operations/fileinfo", + "operations/list", + "operations/movefile", + "operations/purge", + "sync/copydir", + "sync/copypaths", + "sync/movedir", +) diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/internalgen.sh b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/internalgen.sh new file mode 100644 index 00000000000..3d335603b45 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/internalgen.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2017 ScyllaDB +# + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cd "${SCRIPT_DIR}" || exit 1 +rm -f internal/rclone_supported_calls.go +jq '.paths | [keys[] | select(. | startswith("/rclone")) | sub("^/rclone/"; "")]' $(git rev-parse --show-toplevel)/swagger/agent.json | \ + go run internal/templates/jsontemplate.go internal/templates/rclone_supported_calls.gotmpl > \ + internal/rclone_supported_calls.go \ No newline at end of file diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/json.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/json.go new file mode 100644 index 00000000000..0b9ed6b967b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/json.go @@ -0,0 +1,77 @@ +// Copyright (C) 2017 ScyllaDB + +package rcserver + +import ( + "encoding/json" + "io" + "net/http" +) + +type writerFlusher interface { + io.Writer + http.Flusher +} + +type jsonEncoder struct { + wf writerFlusher + enc *json.Encoder + err error +} + +func newJSONEncoder(wf writerFlusher) *jsonEncoder { + return &jsonEncoder{ + wf: wf, + enc: json.NewEncoder(wf), + } +} + +func (e *jsonEncoder) OpenObject() { + e.writeString(`{`) +} + +func (e *jsonEncoder) CloseObject() { + e.writeString(`}`) +} + +func (e *jsonEncoder) OpenList(name string) { + e.writeString(`"` + name + `":[`) +} + +func (e *jsonEncoder) CloseList() { + e.writeString("]") +} + +func (e *jsonEncoder) Field(key string, value interface{}) { + e.writeString(`"` + key + `":`) + e.Encode(value) +} + +func (e *jsonEncoder) Encode(v interface{}) { + if e.err != nil { + return + } + e.err = e.enc.Encode(v) +} + +func (e *jsonEncoder) Delim() { + e.writeString(`,`) +} + +func (e *jsonEncoder) writeString(s string) { + if e.err != nil { + return + } + _, e.err = e.wf.Write([]byte(s)) +} + +func (e *jsonEncoder) Flush() { + if e.err != nil { + return + } + e.wf.Flush() +} + +func (e *jsonEncoder) Error() error { + return e.err +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/list.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/list.go new file mode 100644 index 00000000000..bef65f92508 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/list.go @@ -0,0 +1,97 @@ +// Copyright (C) 2017 ScyllaDB + +package rcserver + +import ( + rcops "github.com/rclone/rclone/fs/operations" + "github.com/rclone/rclone/fs/rc" +) + +const defaultListEncoderMaxItems = 200 + +type listJSONEncoder struct { + enc *jsonEncoder + buf []*rcops.ListJSONItem + maxItems int + started bool +} + +func newListJSONEncoder(wf writerFlusher, maxItems int) *listJSONEncoder { + return &listJSONEncoder{ + enc: newJSONEncoder(wf), + buf: make([]*rcops.ListJSONItem, 0, maxItems), + maxItems: maxItems, + } +} + +func (e *listJSONEncoder) Callback(item *rcops.ListJSONItem) error { + // Aggregate items + e.buf = append(e.buf, item) + if len(e.buf) < e.maxItems { + return nil + } + + // Write and flush buffer + if !e.started { + e.enc.OpenObject() + e.enc.OpenList("list") + e.enc.Encode(e.buf[0]) + + e.started = true + e.buf = e.buf[1:] + } + for i := range e.buf { + e.enc.Delim() + e.enc.Encode(e.buf[i]) + } + e.enc.Flush() + + e.reset() + + return e.enc.Error() +} + +func (e *listJSONEncoder) reset() { + e.buf = e.buf[:0] +} + +func (e *listJSONEncoder) Close() { + if !e.started { + return + } + + // Write remaining list items + for i := range e.buf { + e.enc.Delim() + e.enc.Encode(e.buf[i]) + } + + // Close and flush json + e.enc.CloseList() + e.enc.CloseObject() + e.enc.Flush() +} + +func (e *listJSONEncoder) Result(err error) (rc.Params, error) { + if err != nil { + return e.errorResult(err) + } + return e.result() +} + +func (e *listJSONEncoder) errorResult(err error) (rc.Params, error) { + if !e.started { + return nil, err + } + return nil, errResponseWritten +} + +func (e *listJSONEncoder) result() (rc.Params, error) { + // If not sent business as usual + if !e.started { + return rc.Params{"list": e.buf}, nil + } + + // Notify response sent + return nil, errResponseWritten +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/metrics.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/metrics.go new file mode 100644 index 00000000000..9004b6ed8b7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/metrics.go @@ -0,0 +1,20 @@ +// Copyright (C) 2017 ScyllaDB + +package rcserver + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +var agentUnexposedAccess = prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "scylla_manager", + Subsystem: "agent", + Name: "unexposed_access", + Help: "Attempted access to the unexposed endpoint of the agent", +}, []string{"addr", "path"}) + +func init() { + prometheus.MustRegister( + agentUnexposedAccess, + ) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/rc.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/rc.go new file mode 100644 index 00000000000..57d9a92edee --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/rc.go @@ -0,0 +1,655 @@ +// Copyright (C) 2017 ScyllaDB + +package rcserver + +import ( + "context" + "fmt" + "io" + "os" + "time" + + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/accounting" + "github.com/rclone/rclone/fs/filter" + "github.com/rclone/rclone/fs/object" + rcops "github.com/rclone/rclone/fs/operations" + "github.com/rclone/rclone/fs/rc" + "github.com/rclone/rclone/fs/rc/jobs" + "github.com/rclone/rclone/fs/sync" + "github.com/scylladb/scylla-manager/v3/pkg/rclone" + "github.com/scylladb/scylla-manager/v3/pkg/rclone/operations" + "github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/internal" + "github.com/scylladb/scylla-manager/v3/pkg/util/timeutc" + "go.uber.org/multierr" +) + +// rcJobInfo aggregates core, transferred, and job stats into a single call. +// If jobid parameter is provided but job is not found then nil is returned for +// all three aggregated stats. +// If jobid parameter is not provided then transferred and core stats are +// returned for all groups to allow access to global transfer stats. +func rcJobInfo(ctx context.Context, in rc.Params) (out rc.Params, err error) { + var ( + jobOut, statsOut, transOut map[string]interface{} + jobErr, statsErr, transErr error + ) + // Load Job status only if jobid is explicitly set. + if jobid, err := in.GetInt64("jobid"); err == nil { + wait, err := in.GetInt64("wait") + if err != nil && !rc.IsErrParamNotFound(err) { + jobErr = err + } else if wait > 0 { + jobErr = waitForJobFinish(ctx, jobid, wait) + } + if jobErr == nil { + jobOut, jobErr = rcCalls.Get("job/status").Fn(ctx, in) + in["group"] = fmt.Sprintf("job/%d", jobid) + } + } + + if jobErr == nil { + statsOut, statsErr = rcCalls.Get("core/stats").Fn(ctx, in) + transOut, transErr = rcCalls.Get("core/transferred").Fn(ctx, in) + } else if errors.Is(jobErr, errJobNotFound) { + jobErr = nil + fs.Errorf(nil, "Job not found") + } + + return rc.Params{ + "job": jobOut, + "stats": statsOut, + "transferred": transOut["transferred"], + }, multierr.Combine(jobErr, statsErr, transErr) +} + +func init() { + rc.Add(rc.Call{ + Path: "job/info", + AuthRequired: true, + Fn: rcJobInfo, + Title: "Group all status calls into one", + Help: `This takes the following parameters + +- jobid - id of the job to get status of +- wait - seconds to wait for job operation to complete + +Returns + +job: job status +stats: running stats +transferred: transferred stats +`, + }) +} + +// rcJobProgress aggregates and returns prepared job progress information. +func rcJobProgress(ctx context.Context, in rc.Params) (out rc.Params, err error) { + var jobOut, aggregatedOut map[string]interface{} + jobid, err := in.GetInt64("jobid") + if err != nil { + return nil, err + } + wait, err := in.GetInt64("wait") + if err != nil && !rc.IsErrParamNotFound(err) { + return nil, err + } + + if wait > 0 { + err = waitForJobFinish(ctx, jobid, wait) + if err != nil { + return nil, err + } + } + + jobOut, err = rcCalls.Get("job/status").Fn(ctx, in) + if err != nil { + return nil, err + } + in["group"] = fmt.Sprintf("job/%d", jobid) + aggregatedOut, err = rcCalls.Get("core/aggregated").Fn(ctx, in) + if err != nil { + return nil, err + } + + if err := rc.Reshape(&out, aggregateJobInfo(jobOut, aggregatedOut)); err != nil { + return nil, err + } + return out, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "job/progress", + AuthRequired: true, + Fn: rcJobProgress, + Title: "Return job progress", + Help: `This takes the following parameters + +- jobid - id of the job to get progress of +- wait - seconds to wait for job operation to complete + +Returns + +status: string +completed_at: string +started_at: string +error: string +failed: int64 +skipped: int64 +uploaded: int64 +`, + }) +} + +type jobProgress struct { + // status of the job + // Enum: [success error running not_found] + Status JobStatus `json:"status"` + // time at which job completed + // Format: date-time + CompletedAt time.Time `json:"completed_at"` + // time at which job started + // Format: date-time + StartedAt time.Time `json:"started_at"` + // string description of the error (empty if successful) + Error string `json:"error"` + // number of bytes that failed transfer + Failed int64 `json:"failed"` + // number of bytes that were skipped + Skipped int64 `json:"skipped"` + // number of bytes that are successfully uploaded + Uploaded int64 `json:"uploaded"` +} + +type jobFields struct { + ID int64 `mapstructure:"id"` + StartTime string `mapstructure:"startTime"` + EndTime string `mapstructure:"endTime"` + Finished bool `mapstructure:"finished"` + Success bool `mapstructure:"success"` + Error string `mapstructure:"error"` +} + +type aggFields struct { + Aggregated accounting.AggregatedTransferInfo `mapstructure:"aggregated"` +} + +func aggregateJobInfo(jobParam, aggregatedParam rc.Params) jobProgress { + // Parse parameters + var job jobFields + if err := mapstructure.Decode(jobParam, &job); err != nil { + panic(err) + } + var aggregated aggFields + if err := mapstructure.Decode(aggregatedParam, &aggregated); err != nil { + panic(err) + } + + // Init job progress + p := jobProgress{ + Status: statusOfJob(job), + Error: job.Error, + } + if t, err := timeutc.Parse(time.RFC3339, job.StartTime); err == nil && !t.IsZero() { + p.StartedAt = t + } + if t, err := timeutc.Parse(time.RFC3339, job.EndTime); err == nil && !t.IsZero() { + p.CompletedAt = t + } + + p.Uploaded = aggregated.Aggregated.Uploaded + p.Skipped = aggregated.Aggregated.Skipped + p.Failed = aggregated.Aggregated.Failed + + return p +} + +// JobStatus represents one of the available job statuses. +type JobStatus string + +// JobStatus enumeration. +const ( + JobError JobStatus = "error" + JobSuccess JobStatus = "success" + JobRunning JobStatus = "running" + JobNotFound JobStatus = "not_found" +) + +func statusOfJob(job jobFields) (status JobStatus) { + status = JobRunning + + switch { + case job.ID == 0: + status = JobNotFound + case job.Finished && job.Success: + status = JobSuccess + case job.Finished && !job.Success: + status = JobError + } + + return +} + +var errJobNotFound = errors.New("job not found") + +func waitForJobFinish(ctx context.Context, jobid, wait int64) error { + w := time.Second * time.Duration(wait) + done := make(chan struct{}) + + stop, err := jobs.OnFinish(jobid, func() { + close(done) + }) + if err != nil { + // Returning errJobNotFound because jobs.OnFinish can fail only if job + // is not available and it doesn't return any specific error to signal + // that higher up the call chain. + return errJobNotFound + } + defer stop() + + timer := time.NewTimer(w) + defer timer.Stop() + + select { + case <-done: + return nil + case <-timer.C: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// rcFileInfo returns basic object information. +func rcFileInfo(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, remote, err := rc.GetFsAndRemote(ctx, in) + if err != nil { + return nil, err + } + o, err := f.NewObject(ctx, remote) + if err != nil { + return nil, err + } + out = rc.Params{ + "modTime": o.ModTime(ctx), + "size": o.Size(), + } + return +} + +func init() { + rc.Add(rc.Call{ + Path: "operations/fileinfo", + AuthRequired: true, + Fn: rcFileInfo, + Title: "Get basic file information", + Help: `This takes the following parameters + +- fs - a remote name string eg "s3:path/to/dir"`, + }) +} + +// rcCat returns the whole remote object in body. +func rcCat(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, remote, err := rc.GetFsAndRemote(ctx, in) + if err != nil { + return nil, err + } + o, err := f.NewObject(ctx, remote) + if err != nil { + return nil, err + } + w, err := in.GetHTTPResponseWriter() + if err != nil { + return nil, err + } + r, err := o.Open(ctx) + if err != nil { + return nil, err + } + defer r.Close() + + h := w.Header() + h.Set("Content-Type", "application/octet-stream") + h.Set("Content-Length", fmt.Sprint(o.Size())) + + n, err := io.Copy(w, r) + if err != nil { + if n == 0 { + return nil, err + } + fs.Errorf(o, "copy error %s", err) + } + + return nil, errResponseWritten +} + +func init() { + rc.Add(rc.Call{ + Path: "operations/cat", + AuthRequired: true, + Fn: wrap(rcCat, pathHasPrefix("backup/meta/")), + Title: "Concatenate any files and send them in response", + Help: `This takes the following parameters + +- fs - a remote name string eg "s3:path/to/dir" + +Returns + +- body - file content`, + NeedsResponse: true, + }) + + // Adding it here because it is not part of the agent.json. + // It should be removed once we are able to generate client for this call. + internal.RcloneSupportedCalls.Add("operations/cat") +} + +func rcPut(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, remote, err := rc.GetFsAndRemote(ctx, in) + if err != nil { + return nil, err + } + + r, err := in.GetHTTPRequest() + if err != nil { + return nil, err + } + defer r.Body.Close() + + info := object.NewStaticObjectInfo(remote, timeutc.Now(), r.ContentLength, true, nil, f) + + dst, err := f.NewObject(ctx, remote) + if err == nil { + if rcops.Equal(ctx, info, dst) { + return nil, nil + } else if rclone.GetConfig().Immutable { + fs.Errorf(dst, "Source and destination exist but do not match: immutable file modified") + return nil, fs.ErrorImmutableModified + } + } else if !errors.Is(err, fs.ErrorObjectNotFound) { + return nil, err + } + + obj, err := rcops.RcatSize(ctx, f, remote, r.Body, r.ContentLength, info.ModTime(ctx)) + if err != nil { + return nil, err + } + fs.Debugf(obj, "Upload Succeeded") + + return nil, err +} + +func init() { + rc.Add(rc.Call{ + Path: "operations/put", + Fn: rcPut, + Title: "Save provided content as file", + AuthRequired: true, + Help: `This takes the following parameters: + +- fs - a remote name string eg "s3:path/to/file" +- body - file content`, + NeedsRequest: true, + }) + + // Adding it here because it is not part of the agent.json. + // It should be removed once we are able to generate client for this call. + internal.RcloneSupportedCalls.Add("operations/put") +} + +// rcCheckPermissions checks if location is available for listing, getting, +// creating, and deleting objects. +func rcCheckPermissions(ctx context.Context, in rc.Params) (out rc.Params, err error) { + l, err := rc.GetFs(ctx, in) + if err != nil { + return nil, errors.Wrap(err, "init location") + } + + if err := operations.CheckPermissions(ctx, l); err != nil { + fs.Errorf(nil, "Location check: error=%s", err) + return nil, err + } + + fs.Infof(nil, "Location check done") + return nil, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "operations/check-permissions", + AuthRequired: true, + Fn: rcCheckPermissions, + Title: "Checks listing, getting, creating, and deleting objects", + Help: `This takes the following parameters + +- fs - a remote name string eg "s3:repository" + +`, + }) +} + +func init() { + c := rc.Calls.Get("operations/movefile") + c.Fn = wrap(c.Fn, sameDir()) +} + +// VersionedFileRegex is a rclone formatted regex that can be used to distinguish versioned files. +const VersionedFileRegex = `{**.sm_*UTC}` + +// rcChunkedList supports streaming output of the listing. +func rcChunkedList(ctx context.Context, in rc.Params) (out rc.Params, err error) { + f, remote, err := rc.GetFsAndRemote(ctx, in) + if err != nil { + return nil, err + } + + var opt rcops.ListJSONOpt + err = in.GetStruct("opt", &opt) + if rc.NotErrParamNotFound(err) { + return nil, err + } + + newest, err := in.GetBool("newestOnly") + if rc.NotErrParamNotFound(err) { + return nil, err + } + versioned, err := in.GetBool("versionedOnly") + if rc.NotErrParamNotFound(err) { + return nil, err + } + if newest && versioned { + return nil, errors.New("newestOnly and versionedOnly parameters can't be specified at the same time") + } + if (newest || versioned) && opt.DirsOnly { + return nil, errors.New("newestOnly and versionedOnly doesn't work on directories") + } + + ctx, cfg := filter.AddConfig(ctx) + if newest { + if err := cfg.Add(false, VersionedFileRegex); err != nil { + return nil, err + } + } + if versioned { + if err := cfg.Add(true, VersionedFileRegex); err != nil { + return nil, err + } + if err := cfg.Add(false, `{**}`); err != nil { + return nil, err + } + } + + w, err := in.GetHTTPResponseWriter() + if err != nil { + return nil, err + } + enc := newListJSONEncoder(w.(writerFlusher), defaultListEncoderMaxItems) + err = rcops.ListJSON(ctx, f, remote, &opt, enc.Callback) + if err != nil { + return enc.Result(err) + } + // Localdir fs implementation ignores permission errors, but stores them in + // statistics. We must inform user about them. + if err := accounting.Stats(ctx).GetLastError(); err != nil { + if os.IsPermission(errors.Cause(err)) { + return enc.Result(err) + } + } + + enc.Close() + + return enc.Result(nil) +} + +func init() { + c := rc.Calls.Get("operations/list") + c.Fn = rcChunkedList + c.NeedsResponse = true +} + +// rcMoveOrCopyDir returns an rc function that moves or copies files from +// source to destination directory depending on the constructor argument. +// Only works for directories with single level depth. +func rcMoveOrCopyDir(doMove bool) func(ctx context.Context, in rc.Params) (rc.Params, error) { + return func(ctx context.Context, in rc.Params) (rc.Params, error) { + srcFs, srcRemote, err := getFsAndRemoteNamed(ctx, in, "srcFs", "srcRemote") + if err != nil { + return nil, err + } + dstFs, dstRemote, err := getFsAndRemoteNamed(ctx, in, "dstFs", "dstRemote") + if err != nil { + return nil, err + } + + // Set suffix for files that would be otherwise overwritten or deleted + ctx, cfg := fs.AddConfig(ctx) + cfg.Suffix, err = in.GetString("suffix") + if err != nil && !rc.IsErrParamNotFound(err) { + return nil, err + } + + return nil, sync.CopyDir2(ctx, dstFs, dstRemote, srcFs, srcRemote, doMove) + } +} + +// rcCopyPaths returns rc function that copies paths from +// source to destination. +func rcCopyPaths() func(ctx context.Context, in rc.Params) (rc.Params, error) { + return func(ctx context.Context, in rc.Params) (rc.Params, error) { + srcFs, srcRemote, err := getFsAndRemoteNamed(ctx, in, "srcFs", "srcRemote") + if err != nil { + return nil, err + } + dstFs, dstRemote, err := getFsAndRemoteNamed(ctx, in, "dstFs", "dstRemote") + if err != nil { + return nil, err + } + paths, err := getStringSlice(in, "paths") + if err != nil { + return nil, err + } + + return nil, sync.CopyPaths(ctx, dstFs, dstRemote, srcFs, srcRemote, paths, false) + } +} + +// getFsAndRemoteNamed gets fs and remote path from the params, but it doesn't +// fail if remote path is not provided. +// In that case it is assumed that path is empty and root of the fs is used. +func getFsAndRemoteNamed(ctx context.Context, in rc.Params, fsName, remoteName string) (f fs.Fs, remote string, err error) { + remote, err = in.GetString(remoteName) + if err != nil && !rc.IsErrParamNotFound(err) { + return + } + f, err = rc.GetFsNamed(ctx, in, fsName) + return +} + +func getStringSlice(in rc.Params, key string) ([]string, error) { + value, err := in.Get(key) + if err != nil { + return nil, err + } + + tmp, ok := value.([]interface{}) + if !ok { + return nil, errors.Errorf("expecting []interface{} value for key %q (was %T)", key, value) + } + + var res []string + for i, v := range tmp { + str, ok := v.(string) + if !ok { + return nil, errors.Errorf("expecting string value for slice index nr %d (was %T)", i, str) + } + res = append(res, str) + } + + return res, nil +} + +func init() { + rc.Add(rc.Call{ + Path: "sync/movedir", + AuthRequired: true, + Fn: wrap(rcMoveOrCopyDir(true), localToRemote()), + Title: "Move contents of source directory to destination", + Help: `This takes the following parameters: + +- srcFs - a remote name string eg "s3:" for the source +- srcRemote - a directory path within that remote for the source +- dstFs - a remote name string eg "gcs:" for the destination +- dstRemote - a directory path within that remote for the destination`, + }) + + rc.Add(rc.Call{ + Path: "sync/copydir", + AuthRequired: true, + Fn: wrap(rcMoveOrCopyDir(false), localToRemote()), + Title: "Copy contents from source directory to destination", + Help: `This takes the following parameters: + +- srcFs - a remote name string eg "s3:" for the source +- srcRemote - a directory path within that remote for the source +- dstFs - a remote name string eg "gcs:" for the destination +- dstRemote - a directory path within that remote for the destination`, + }) + + rc.Add(rc.Call{ + Path: "sync/copypaths", + AuthRequired: true, + Fn: wrap(rcCopyPaths(), remoteToLocal()), + Title: "Copy paths from source directory to destination", + Help: `This takes the following parameters: + +- srcFs - a remote name string eg "s3:" for the source +- srcRemote - a directory path within that remote for the source +- dstFs - a remote name string eg "gcs:" for the destination +- dstRemote - a directory path within that remote for the destination +- paths - slice of paths to be copied from source directory to destination`, + }) +} + +// rcCalls contains the original rc.Calls before filtering with all the added +// custom calls in this file. +var rcCalls *rc.Registry + +func init() { + rcCalls = rc.Calls + filterRcCalls() +} + +// filterRcCalls disables all default calls and whitelists only supported calls. +func filterRcCalls() { + rc.Calls = rc.NewRegistry() + + for _, c := range rcCalls.List() { + if internal.RcloneSupportedCalls.Has(c.Path) { + rc.Add(*c) + } + } +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/rchardening.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/rchardening.go new file mode 100644 index 00000000000..3083e48d24d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/rchardening.go @@ -0,0 +1,122 @@ +// Copyright (C) 2017 ScyllaDB + +package rcserver + +import ( + "context" + "path" + "path/filepath" + "strings" + + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/fspath" + "github.com/rclone/rclone/fs/rc" +) + +type paramsValidator func(ctx context.Context, in rc.Params) error + +func wrap(fn rc.Func, v paramsValidator) rc.Func { + return func(ctx context.Context, in rc.Params) (rc.Params, error) { + if err := v(ctx, in); err != nil { + return nil, err + } + return fn(ctx, in) + } +} + +// pathHasPrefix reads "fs" and "remote" params, evaluates absolute path and +// ensures it has the required prefix. +func pathHasPrefix(prefix string) paramsValidator { + return func(ctx context.Context, in rc.Params) error { + _, p, err := joined(in, "fs", "remote") + if err != nil { + return err + } + + // Strip bucket name + i := strings.Index(p, "/") + p = p[i+1:] + + if !strings.HasPrefix(p, prefix) { + return fs.ErrorPermissionDenied + } + return nil + } +} + +func localToRemote() paramsValidator { + return func(ctx context.Context, in rc.Params) error { + fsrc, err := rc.GetFsNamed(ctx, in, "srcFs") + if err != nil { + return err + } + if !fsrc.Features().IsLocal { + return fs.ErrorPermissionDenied + } + fdst, err := rc.GetFsNamed(ctx, in, "dstFs") + if err != nil { + return err + } + if fdst.Features().IsLocal { + return fs.ErrorPermissionDenied + } + return nil + } +} + +func remoteToLocal() paramsValidator { + return func(ctx context.Context, in rc.Params) error { + fsrc, err := rc.GetFsNamed(ctx, in, "srcFs") + if err != nil { + return err + } + if fsrc.Features().IsLocal { + return fs.ErrorPermissionDenied + } + fdst, err := rc.GetFsNamed(ctx, in, "dstFs") + if err != nil { + return err + } + if !fdst.Features().IsLocal { + return fs.ErrorPermissionDenied + } + return nil + } +} + +func sameDir() paramsValidator { + return func(ctx context.Context, in rc.Params) error { + srcName, srcPath, err := joined(in, "srcFs", "srcRemote") + if err != nil { + return err + } + dstName, dstPath, err := joined(in, "dstFs", "dstRemote") + if err != nil { + return err + } + if srcName != dstName || path.Dir(srcPath) != path.Dir(dstPath) { + return fs.ErrorPermissionDenied + } + return nil + } +} + +func joined(in rc.Params, fsName, remoteName string) (configName, remotePath string, err error) { + f, err := in.GetString(fsName) + if err != nil { + return "", "", err + } + remote, err := in.GetString(remoteName) + if err != nil { + return "", "", err + } + return join(f, remote) +} + +func join(f, remote string) (configName, remotePath string, err error) { + configName, fsPath, err := fspath.Parse(f) + if err != nil { + return "", "", err + } + return configName, filepath.Clean(path.Join(fsPath, remote)), nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/rcserver.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/rcserver.go new file mode 100644 index 00000000000..fe89b5a2652 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/rcserver.go @@ -0,0 +1,327 @@ +// Copyright (C) 2017 ScyllaDB + +// Package rcserver implements the HTTP endpoint to serve the remote control +package rcserver + +//go:generate ./internalgen.sh + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "mime" + "net/http" + "os" + "strings" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/rclone/rclone/fs" + "github.com/rclone/rclone/fs/rc" + "github.com/rclone/rclone/fs/rc/jobs" + "github.com/scylladb/scylla-manager/v3/pkg/rclone" + "github.com/scylladb/scylla-manager/v3/pkg/rclone/operations" + "github.com/scylladb/scylla-manager/v3/pkg/util/timeutc" +) + +var initOnce sync.Once + +var ( + // ErrNotFound is returned when remote call is not available. + ErrNotFound = errors.New("not found") + errResponseWritten = errors.New("response already written") +) + +// Server implements http.Handler interface. +type Server struct { + memoryPool *sync.Pool +} + +// New creates new rclone server. +// Since we are overriding default behavior of saving remote configuration to +// files, we need to include code that was called in +// rclone/fs/config.LoadConfig, which initializes accounting processes but is +// no longer called. +// It's probably done this way to make sure that configuration has opportunity +// to modify global config object before these processes are started as they +// depend on it. +// We are initializing it once here to make sure it's executed only when server +// is needed and configuration is completely loaded. +func New() Server { + initOnce.Do(func() { + rclone.StartAccountingOperations() + + // Set jobs options + opts := rc.DefaultOpt + opts.JobExpireDuration = 12 * time.Hour + opts.JobExpireInterval = 1 * time.Minute + jobs.SetOpt(&opts) + // Rewind job ID to new values + jobs.SetInitialJobID(timeutc.Now().Unix()) + }) + return Server{ + memoryPool: &sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } +} + +// writeError writes a formatted error to the output. +func (s Server) writeError(path string, in rc.Params, w http.ResponseWriter, err error, status int) { + // Ignore if response was already written + if errors.Is(err, errResponseWritten) { + return + } + + fs.Errorf(nil, "rc: %q: error: %v", path, err) + // Adjust the error return for some well known errors + if e, ok := err.(operations.PermissionError); ok { // nolint: errorlint + status = e.StatusCode() + } else { + switch { + case isNotFoundErr(err): + status = http.StatusNotFound + case isBadRequestErr(err): + status = http.StatusBadRequest + case isForbiddenErr(err): + status = http.StatusForbidden + } + } + // Try to parse xml errors for increased readability + if xmlErr, e := operations.ParseBackendXMLError(err); e == nil { + err = xmlErr + } + w.WriteHeader(status) + err = s.writeJSON(w, rc.Params{ + "status": status, + "message": err.Error(), + "input": in, + "path": path, + }) + if err != nil { + // can't return the error at this point + fs.Errorf(nil, "rc: write JSON output: %v", err) + } +} + +func (s Server) writeJSON(w http.ResponseWriter, out rc.Params) error { + buf := s.memoryPool.Get().(*bytes.Buffer) + defer func() { + buf.Reset() + s.memoryPool.Put(buf) + }() + + if err := json.NewEncoder(buf).Encode(out); err != nil { + return err + } + w.Header().Set("Content-Length", fmt.Sprint(buf.Len())) + _, err := io.Copy(w, buf) + return err +} + +// nolint: errorlint +func isBadRequestErr(err error) bool { + cause := errors.Cause(err) + return rc.IsErrParamInvalid(err) || + rc.IsErrParamNotFound(err) || + IsErrParamInvalid(err) || + cause == fs.ErrorIsFile || + cause == fs.ErrorNotAFile || + cause == fs.ErrorDirectoryNotEmpty || + cause == fs.ErrorDirExists || + cause == fs.ErrorListBucketRequired +} + +// nolint: errorlint +func isNotFoundErr(err error) bool { + cause := errors.Cause(err) + return cause == fs.ErrorDirNotFound || + cause == fs.ErrorObjectNotFound || + cause == fs.ErrorNotFoundInConfigFile || + cause == errJobNotFound +} + +func isForbiddenErr(err error) bool { + return os.IsPermission(errors.Cause(err)) +} + +const ( + bodySizeLimit int64 = 1024 * 1024 + notFoundJSON = `{"message":"Not found","status":404}` +) + +// ServeHTTP implements http.Handler interface. +func (s Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + path := strings.TrimLeft(r.URL.Path, "/") + + if r.Method != http.MethodPost { + s.writeError(path, nil, w, errors.Errorf("method %q not allowed", r.Method), http.StatusMethodNotAllowed) + return + } + + contentType, err := parseContentType(r.Header) + if err != nil { + s.writeError(path, nil, w, errors.Wrap(err, "parse Content-Type header"), http.StatusBadRequest) + return + } + + w.Header().Set("Content-Type", "application/json") + + values := r.URL.Query() + if contentType == "application/x-www-form-urlencoded" || contentType == "application/octet-stream" { + // Parse the POST and URL parameters into r.Form, for others r.Form will be empty value + err := r.ParseForm() + if err != nil { + s.writeError(path, nil, w, errors.Wrap(err, "parse form/URL parameters"), http.StatusBadRequest) + return + } + values = r.Form + } + + // Merge POST and URL parameters into in + in := make(rc.Params) + for k, vs := range values { + if len(vs) > 0 { + in[k] = vs[len(vs)-1] + } + } + // Add additional parameters to pass down to a function + extra := make(rc.Params) + + // Parse a JSON blob from the input + if contentType == "application/json" { + j, err := io.ReadAll(&io.LimitedReader{R: r.Body, N: bodySizeLimit}) + if err != nil { + s.writeError(path, in, w, errors.Wrap(err, "read request body"), http.StatusBadRequest) + return + } + if len(j) > 0 { + if err := json.Unmarshal(j, &in); err != nil { + s.writeError(path, in, w, errors.Wrap(err, "read input JSON"), http.StatusBadRequest) + return + } + } + } + + // Find the call + call := rc.Calls.Get(path) + if call == nil { + agentUnexposedAccess.With(prometheus.Labels{"addr": r.RemoteAddr, "path": path}).Inc() + fs.Errorf(nil, "SECURITY call to unexported endpoint [path=%s, ip=%s]", path, r.RemoteAddr) + http.Error(w, notFoundJSON, http.StatusNotFound) + return + } + if call.NeedsRequest { + extra["_request"] = r + } + if call.NeedsResponse { + extra["_response"] = w + } + fn := call.Fn + + if err := validateFsName(in); err != nil { + s.writeError(path, in, w, err, http.StatusBadRequest) + return + } + + // Check to see if it is async or not + isAsync, err := in.GetBool("_async") + if rc.NotErrParamNotFound(err) { + s.writeError(path, in, w, err, http.StatusBadRequest) + return + } + + fs.Debugf(nil, "rc: %q: with parameters %+v", path, in) + var ( + out rc.Params + jobID int64 + ) + + // Merge in and extra to one + var inExt rc.Params + if len(extra) == 0 { + inExt = in + } else { + inExt = in.Copy() + for k, v := range extra { + inExt[k] = v + } + } + + if isAsync { + out, err = jobs.StartAsyncJob(fn, inExt) + jobID = out["jobid"].(int64) + } else { + out, err = fn(r.Context(), inExt) + } + + if rc.IsErrParamNotFound(err) || errors.Is(err, ErrNotFound) { + s.writeError(path, in, w, err, http.StatusNotFound) + return + } else if err != nil { + s.writeError(path, in, w, err, http.StatusInternalServerError) + return + } + if out == nil { + out = make(rc.Params) + } + + fs.Debugf(nil, "rc: %q: reply %+v: %v", path, out, err) + w.Header().Add("x-rclone-jobid", fmt.Sprintf("%d", jobID)) + + if err := s.writeJSON(w, out); err != nil { + s.writeError(path, in, w, err, http.StatusInternalServerError) + return + } +} + +func parseContentType(headers http.Header) (string, error) { + if headers.Get("Content-Type") == "" { + return "", nil + } + + contentType, _, err := mime.ParseMediaType(headers.Get("Content-Type")) + if err != nil { + return "", err + } + + return contentType, nil +} + +// validateFsName ensures that only allowed file systems can be used in +// parameters with file system format. +func validateFsName(in rc.Params) error { + for _, name := range []string{"fs", "srcFs", "dstFs"} { + v, err := in.GetString(name) + if err != nil { + if rc.IsErrParamNotFound(err) { + continue + } + return err + } + _, remote, _, err := fs.ParseRemote(v) + if err != nil { + return err + } + if !rclone.HasProvider(remote) { + return errParamInvalid{errors.Errorf("invalid provider %s in %s param", remote, name)} + } + } + return nil +} + +type errParamInvalid struct { + error +} + +// IsErrParamInvalid checks if the provided error is invalid. +// Added as a workaround for private error field of fs.ErrParamInvalid. +func IsErrParamInvalid(err error) bool { + _, ok := err.(errParamInvalid) // nolint: errorlint + return ok +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/activation.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/activation.go new file mode 100644 index 00000000000..af5d586e973 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/activation.go @@ -0,0 +1,112 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "container/heap" + "time" + + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +// Activation represents when a Key will be executed. +// Properties are optional they are only set for retries to ensure we retry +// the same thing. +// Stop is also optional if present specifies window end. +// Parametrized by scheduler key type. +type Activation[K comparable] struct { + time.Time + Key K + Retry int8 + Properties Properties + Stop time.Time +} + +// activationHeap implements heap.Interface. +// The activations are sorted by time in ascending order. +type activationHeap[K comparable] []Activation[K] + +// uuid.UUID key type is used in pkg/service/scheduler package. +var _ heap.Interface = (*activationHeap[uuid.UUID])(nil) + +func (h activationHeap[_]) Len() int { return len(h) } + +func (h activationHeap[_]) Less(i, j int) bool { + return h[i].Time.Before(h[j].Time) +} + +func (h activationHeap[_]) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *activationHeap[K]) Push(x interface{}) { + *h = append(*h, x.(Activation[K])) +} + +func (h *activationHeap[_]) Pop() interface{} { + old := *h + n := len(old) + item := old[n-1] + *h = old[0 : n-1] + return item +} + +// activationQueue is a priority queue based on activationHeap. +// There may be only a single activation for a given activation key. +// On Push if key exists it is updated. +type activationQueue[K comparable] struct { + h activationHeap[K] +} + +func newActivationQueue[K comparable]() *activationQueue[K] { + return &activationQueue[K]{ + h: []Activation[K]{}, + } +} + +// Push returns true iff head was changed. +func (q *activationQueue[K]) Push(a Activation[K]) bool { + if idx := q.find(a.Key); idx >= 0 { + []Activation[K](q.h)[idx] = a + heap.Fix(&q.h, idx) + } else { + heap.Push(&q.h, a) + } + return q.h[0].Key == a.Key +} + +func (q *activationQueue[K]) Pop() (Activation[K], bool) { + if len(q.h) == 0 { + return Activation[K]{}, false + } + return heap.Pop(&q.h).(Activation[K]), true +} + +func (q *activationQueue[K]) Top() (Activation[K], bool) { + if len(q.h) == 0 { + return Activation[K]{}, false + } + return []Activation[K](q.h)[0], true +} + +// Remove returns true iff head if head was changed. +func (q *activationQueue[K]) Remove(key K) bool { + idx := q.find(key) + if idx >= 0 { + heap.Remove(&q.h, idx) + } + return idx == 0 +} + +func (q *activationQueue[K]) find(key K) int { + for i, v := range q.h { + if v.Key == key { + return i + } + } + return -1 +} + +func (q *activationQueue[_]) Size() int { + return len(q.h) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/listener.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/listener.go new file mode 100644 index 00000000000..c7f20783386 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/listener.go @@ -0,0 +1,102 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "context" + "time" + + "github.com/scylladb/go-log" +) + +// Listener specifies pluggable hooks for scheduler events. +// Parametrized by scheduler key type. +type Listener[K comparable] interface { + OnSchedulerStart(context.Context) + OnSchedulerStop(context.Context) + OnRunStart(ctx *RunContext[K]) + OnRunSuccess(ctx *RunContext[K]) + OnRunStop(ctx *RunContext[K], err error) + OnRunWindowEnd(ctx *RunContext[K], err error) + OnRunError(ctx *RunContext[K], err error) + OnSchedule(ctx context.Context, key K, begin, end time.Time, retno int8) + OnUnschedule(ctx context.Context, key K) + OnTrigger(ctx context.Context, key K, success bool) + OnStop(ctx context.Context, key K) + OnRetryBackoff(ctx context.Context, key K, backoff time.Duration, retno int8) + OnNoTrigger(ctx context.Context, key K) + OnSleep(ctx context.Context, key K, d time.Duration) +} + +type nopListener[K comparable] struct{} + +func (l nopListener[_]) OnSchedulerStart(context.Context) { +} + +func (l nopListener[_]) OnSchedulerStop(context.Context) { +} + +func (l nopListener[K]) OnRunStart(*RunContext[K]) { +} + +func (l nopListener[K]) OnRunSuccess(*RunContext[K]) { +} + +func (l nopListener[K]) OnRunStop(*RunContext[K], error) { +} + +func (l nopListener[K]) OnRunWindowEnd(*RunContext[K], error) { +} + +func (l nopListener[K]) OnRunError(*RunContext[K], error) { +} + +func (l nopListener[K]) OnSchedule(_ context.Context, _ K, _, _ time.Time, _ int8) { +} + +func (l nopListener[K]) OnUnschedule(context.Context, K) { +} + +func (l nopListener[K]) OnTrigger(context.Context, K, bool) { +} + +func (l nopListener[K]) OnStop(context.Context, K) { +} + +func (l nopListener[K]) OnRetryBackoff(context.Context, K, time.Duration, int8) { +} + +func (l nopListener[K]) OnNoTrigger(context.Context, K) { +} + +func (l nopListener[K]) OnSleep(context.Context, K, time.Duration) { +} + +// NopListener returns a Listener implementation that has no effects. +func NopListener[K comparable]() Listener[K] { + return nopListener[K]{} +} + +type errorLogListener[K comparable] struct { + nopListener[K] + logger log.Logger +} + +func (l errorLogListener[K]) OnRunError(ctx *RunContext[K], err error) { + l.logger.Error(ctx, "OnRunError", "key", ctx.Key, "retry", ctx.Retry, "error", err) +} + +func (l errorLogListener[K]) OnRunWindowEnd(ctx *RunContext[K], err error) { + l.logger.Info(ctx, "OnRunWindowEnd", "key", ctx.Key, "retry", ctx.Retry, "error", err) +} + +func (l errorLogListener[K]) OnRunStop(ctx *RunContext[K], err error) { + l.logger.Info(ctx, "OnRunStop", "key", ctx.Key, "retry", ctx.Retry, "error", err) +} + +// ErrorLogListener returns listener that logs errors. +func ErrorLogListener[K comparable](logger log.Logger) Listener[K] { + return errorLogListener[K]{ + logger: logger, + } +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/scheduler.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/scheduler.go new file mode 100644 index 00000000000..65de2887be9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/scheduler.go @@ -0,0 +1,422 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/scylladb/scylla-manager/v3/pkg/util/retry" +) + +// Properties are externally defined task parameters. +type Properties = any + +// RunContext is a bundle of Context, Key, Properties and additional runtime +// information. +type RunContext[K comparable] struct { + context.Context //nolint:containedctx + Key K + Properties Properties + Retry int8 + + err error +} + +func newRunContext[K comparable](key K, properties Properties, stop time.Time) (*RunContext[K], context.CancelFunc) { + var ( + ctx context.Context + cancel context.CancelFunc + ) + if stop.IsZero() { + ctx, cancel = context.WithCancel(context.Background()) + } else { + ctx, cancel = context.WithDeadline(context.Background(), stop) + } + + return &RunContext[K]{ + Context: ctx, + Key: key, + Properties: properties, + }, cancel +} + +// RunFunc specifies interface for key execution. +// When the provided context is cancelled function must return with +// context.Cancelled error, or an error caused by this error. +// Compatible functions can be passed to Scheduler constructor. +type RunFunc[K comparable] func(ctx RunContext[K]) error + +// Trigger provides the next activation date. +// Implementations must return the same values for the same now parameter. +// A zero time can be returned to indicate no more executions. +type Trigger interface { + Next(now time.Time) time.Time +} + +// Details holds Properties, Trigger and auxiliary Key configuration. +type Details struct { + Properties Properties + Trigger Trigger + Backoff retry.Backoff + Window Window + Location *time.Location +} + +// Scheduler manages keys and triggers. +// A key uniquely identifies a scheduler task. +// There can be a single instance of a key scheduled or running at all times. +// Scheduler gets the next activation time for a key from a trigger. +// On key activation the RunFunc is called. +type Scheduler[K comparable] struct { + now func() time.Time + run RunFunc[K] + listener Listener[K] + timer *time.Timer + + queue *activationQueue[K] + details map[K]Details + running map[K]context.CancelFunc + closed bool + mu sync.Mutex + + wakeupCh chan struct{} + wg sync.WaitGroup +} + +func NewScheduler[K comparable](now func() time.Time, run RunFunc[K], listener Listener[K]) *Scheduler[K] { + return &Scheduler[K]{ + now: now, + run: run, + listener: listener, + timer: time.NewTimer(0), + queue: newActivationQueue[K](), + details: make(map[K]Details), + running: make(map[K]context.CancelFunc), + wakeupCh: make(chan struct{}, 1), + } +} + +// Schedule updates properties and trigger of an existing key or adds a new key. +func (s *Scheduler[K]) Schedule(ctx context.Context, key K, d Details) { + s.mu.Lock() + defer s.mu.Unlock() + + s.details[key] = d + // If running key will be scheduled when done in reschedule. + if _, running := s.running[key]; running { + return + } + + now := s.now() + if d.Location != nil { + now = now.In(d.Location) + } + next := d.Trigger.Next(now) + + s.scheduleLocked(ctx, key, next, 0, nil, d.Window) +} + +func (s *Scheduler[K]) reschedule(ctx *RunContext[K]) { + key := ctx.Key + + s.mu.Lock() + defer s.mu.Unlock() + + cancel, ok := s.running[key] + if ok { + cancel() + } + delete(s.running, key) + + if s.closed { + return + } + d, ok := s.details[key] + if !ok { + return + } + + now := s.now() + if d.Location != nil { + now = now.In(d.Location) + } + next := d.Trigger.Next(now) + + var ( + retno int8 + p Properties + ) + switch { + case shouldContinue(ctx.err): + next = now + retno = ctx.Retry + p = ctx.Properties + case shouldRetry(ctx.err): + if d.Backoff != nil { + if b := d.Backoff.NextBackOff(); b != retry.Stop { + next = now.Add(b) + retno = ctx.Retry + 1 + p = ctx.Properties + s.listener.OnRetryBackoff(ctx, key, b, retno) + } + } + default: + if d.Backoff != nil { + d.Backoff.Reset() + } + } + s.scheduleLocked(ctx, key, next, retno, p, d.Window) +} + +func shouldContinue(err error) bool { + return errors.Is(err, context.DeadlineExceeded) +} + +func shouldRetry(err error) bool { + return !(err == nil || errors.Is(err, context.Canceled) || retry.IsPermanent(err)) +} + +func (s *Scheduler[K]) scheduleLocked(ctx context.Context, key K, next time.Time, retno int8, p Properties, w Window) { + if next.IsZero() { + s.listener.OnNoTrigger(ctx, key) + s.unscheduleLocked(key) + return + } + + begin, end := w.Next(next) + + s.listener.OnSchedule(ctx, key, begin, end, retno) + a := Activation[K]{Key: key, Time: begin, Retry: retno, Properties: p, Stop: end} + if s.queue.Push(a) { + s.wakeup() + } +} + +// Unschedule cancels schedule of a key. It does not stop an active run. +func (s *Scheduler[K]) Unschedule(ctx context.Context, key K) { + s.listener.OnUnschedule(ctx, key) + s.mu.Lock() + s.unscheduleLocked(key) + s.mu.Unlock() +} + +func (s *Scheduler[K]) unscheduleLocked(key K) { + delete(s.details, key) + if s.queue.Remove(key) { + s.wakeup() + } +} + +// Trigger immediately runs a scheduled key. +// If key is already running the call will have no effect and true is returned. +// If key is not scheduled the call will have no effect and false is returned. +func (s *Scheduler[K]) Trigger(ctx context.Context, key K) bool { + s.mu.Lock() + if _, running := s.running[key]; running { + s.mu.Unlock() + s.listener.OnTrigger(ctx, key, true) + return true + } + + if s.queue.Remove(key) { + s.wakeup() + } + _, ok := s.details[key] + var runCtx *RunContext[K] + if ok { + runCtx = s.newRunContextLocked(Activation[K]{Key: key}) + } + s.mu.Unlock() + + s.listener.OnTrigger(ctx, key, ok) + if ok { + s.asyncRun(runCtx) + } + return ok +} + +// Stop notifies RunFunc to stop by cancelling the context. +func (s *Scheduler[K]) Stop(ctx context.Context, key K) { + s.listener.OnStop(ctx, key) + s.mu.Lock() + defer s.mu.Unlock() + if cancel, ok := s.running[key]; ok { + cancel() + } +} + +// Close makes Start function exit, stops all runs, call Wait to wait for the +// runs to return. +// It returns two sets of keys the running that were canceled and pending that +// were scheduled to run. +func (s *Scheduler[K]) Close() (running, pending []K) { + s.mu.Lock() + defer s.mu.Unlock() + + s.closed = true + s.wakeup() + for k, cancel := range s.running { + running = append(running, k) + cancel() + } + for _, a := range s.queue.h { + pending = append(pending, a.Key) + } + return +} + +// Wait waits for runs to return call after Close. +func (s *Scheduler[_]) Wait() { + s.wg.Wait() +} + +// Activations returns activation information for given keys. +func (s *Scheduler[K]) Activations(keys ...K) []Activation[K] { + pos := make(map[K]int, len(keys)) + for i, k := range keys { + pos[k] = i + } + r := make([]Activation[K], len(keys)) + s.mu.Lock() + for _, a := range s.queue.h { + if i, ok := pos[a.Key]; ok { + r[i] = a + } + } + s.mu.Unlock() + return r +} + +// Start is the scheduler main loop. +func (s *Scheduler[_]) Start(ctx context.Context) { + s.listener.OnSchedulerStart(ctx) + + for { + var d time.Duration + s.mu.Lock() + if s.closed { + s.mu.Unlock() + break + } + top, ok := s.queue.Top() + s.mu.Unlock() + + if !ok { + d = -1 + } else { + d = s.activateIn(top) + } + s.listener.OnSleep(ctx, top.Key, d) + if !s.sleep(ctx, d) { + if ctx.Err() != nil { + break + } + continue + } + + s.mu.Lock() + a, ok := s.queue.Pop() + if a.Key != top.Key { + if ok { + s.queue.Push(a) + } + s.mu.Unlock() + continue + } + runCtx := s.newRunContextLocked(a) + s.mu.Unlock() + + s.asyncRun(runCtx) + } + + s.listener.OnSchedulerStop(ctx) +} + +func (s *Scheduler[K]) activateIn(a Activation[K]) time.Duration { + d := a.Sub(s.now()) + if d < 0 { + d = 0 + } + return d +} + +// sleep waits for one of the following events: context is cancelled, +// duration expires (if d >= 0) or wakeup function is called. +// If d < 0 the timer is disabled. +// Returns true iff timer expired. +func (s *Scheduler[_]) sleep(ctx context.Context, d time.Duration) bool { + if !s.timer.Stop() { + select { + case <-s.timer.C: + default: + } + } + + if d == 0 { + return true + } + + var timer <-chan time.Time + if d > 0 { + s.timer.Reset(d) + timer = s.timer.C + } + + select { + case <-ctx.Done(): + return false + case <-s.wakeupCh: + return false + case <-timer: + return true + } +} + +func (s *Scheduler[_]) wakeup() { + select { + case s.wakeupCh <- struct{}{}: + default: + } +} + +func (s *Scheduler[K]) newRunContextLocked(a Activation[K]) *RunContext[K] { + var p Properties + if a.Properties != nil { + p = a.Properties + } else { + p = s.details[a.Key].Properties + } + + ctx, cancel := newRunContext(a.Key, p, a.Stop) + ctx.Retry = a.Retry + s.running[a.Key] = cancel + return ctx +} + +func (s *Scheduler[K]) asyncRun(ctx *RunContext[K]) { + s.listener.OnRunStart(ctx) + s.wg.Add(1) + go func(ctx *RunContext[K]) { + defer s.wg.Done() + ctx.err = s.run(*ctx) + s.onRunEnd(ctx) + s.reschedule(ctx) + }(ctx) +} + +func (s *Scheduler[K]) onRunEnd(ctx *RunContext[K]) { + err := ctx.err + switch { + case err == nil: + s.listener.OnRunSuccess(ctx) + case errors.Is(err, context.Canceled): + s.listener.OnRunStop(ctx, err) + case errors.Is(err, context.DeadlineExceeded): + s.listener.OnRunWindowEnd(ctx, err) + default: + s.listener.OnRunError(ctx, err) + } +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/cron.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/cron.go new file mode 100644 index 00000000000..5dcb9e65473 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/cron.go @@ -0,0 +1,15 @@ +// Copyright (C) 2017 ScyllaDB + +package trigger + +import ( + "github.com/robfig/cron/v3" + "github.com/scylladb/scylla-manager/v3/pkg/scheduler" +) + +var cronParser = cron.NewParser(cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor) + +// NewCron returns a cron Trigger for a given spec. +func NewCron(spec string) (scheduler.Trigger, error) { + return cronParser.Parse(spec) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/legacy.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/legacy.go new file mode 100644 index 00000000000..6ed4e773ee8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/legacy.go @@ -0,0 +1,34 @@ +// Copyright (C) 2017 ScyllaDB + +package trigger + +import ( + "time" + + "github.com/scylladb/scylla-manager/v3/pkg/scheduler" +) + +type legacy struct { + startDate time.Time + interval time.Duration +} + +// NewLegacy returns Trigger based on interval duration that was used in +// Scylla Manager 2.x and before. +func NewLegacy(startDate time.Time, interval time.Duration) scheduler.Trigger { + return legacy{startDate: startDate, interval: interval} +} + +func (l legacy) Next(now time.Time) time.Time { + if l.startDate.After(now) { + return l.startDate + } + if l.interval == 0 { + return time.Time{} + } + lastStart := l.startDate.Add(now.Sub(l.startDate).Round(l.interval)) + for lastStart.Before(now) { + lastStart = lastStart.Add(l.interval) + } + return lastStart +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/multi.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/multi.go new file mode 100644 index 00000000000..50cd0acab09 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/multi.go @@ -0,0 +1,29 @@ +// Copyright (C) 2017 ScyllaDB + +package trigger + +import ( + "time" + + "github.com/scylladb/scylla-manager/v3/pkg/scheduler" +) + +type multi []scheduler.Trigger + +// NewMulti returns a trigger that joins multiple triggers. +func NewMulti(t ...scheduler.Trigger) scheduler.Trigger { + return multi(t) +} + +func (m multi) Next(now time.Time) time.Time { + var min time.Time + for _, t := range m { + next := t.Next(now) + if min.IsZero() { + min = next + } else if !next.IsZero() && next.Before(min) { + min = next + } + } + return min +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/once.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/once.go new file mode 100644 index 00000000000..6eef8ab5294 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger/once.go @@ -0,0 +1,29 @@ +// Copyright (C) 2017 ScyllaDB + +package trigger + +import ( + "time" + + "github.com/scylladb/scylla-manager/v3/pkg/scheduler" + "go.uber.org/atomic" +) + +type once struct { + v *atomic.Bool +} + +// NewOnce creates a trigger that fires once at a specified time. +// There is a sub second threshold to enable starting once now. +func NewOnce() scheduler.Trigger { + return once{ + v: atomic.NewBool(false), + } +} + +func (o once) Next(now time.Time) time.Time { + if o.v.CAS(false, true) { + return now + } + return time.Time{} +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/window.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/window.go new file mode 100644 index 00000000000..cf33631ba3e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scheduler/window.go @@ -0,0 +1,262 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" +) + +// EachDay is a special weekday marker that matches any day. +const EachDay = time.Weekday(7) + +// WeekdayTime specifies weekday and time in that day. +// The time must be less than 24h. +type WeekdayTime struct { + Weekday time.Weekday + Time time.Duration +} + +func (i WeekdayTime) MarshalText() (text []byte, err error) { + day := weekday[i.Weekday] + if day != "" { + day += "-" + } + var hh, mm int + m := int(i.Time.Minutes()) + hh = m / 60 + mm = m % 60 + return []byte(fmt.Sprintf("%s%d:%02d", day, hh, mm)), nil +} + +var ( + weekdayTimeRegexp = regexp.MustCompile("(?i)^((Mon|Tue|Wed|Thu|Fri|Sat|Sun)-)?([0-9]{1,2}):([0-9]{2})$") + weekday = map[time.Weekday]string{ + time.Monday: "Mon", + time.Tuesday: "Tue", + time.Wednesday: "Wed", + time.Thursday: "Thu", + time.Friday: "Fri", + time.Saturday: "Sat", + time.Sunday: "Sun", + } + weekdayRev = map[string]time.Weekday{ + "": EachDay, + "mon": time.Monday, + "tue": time.Tuesday, + "wed": time.Wednesday, + "thu": time.Thursday, + "fri": time.Friday, + "sat": time.Saturday, + "sun": time.Sunday, + } +) + +func (i *WeekdayTime) UnmarshalText(text []byte) error { + m := weekdayTimeRegexp.FindSubmatch(text) + if len(m) == 0 { + return errors.New("invalid format") + } + var wdt WeekdayTime + + w, ok := weekdayRev[strings.ToLower(string(m[2]))] + if !ok { + return errors.Errorf("unknown day of week %q", string(m[2])) + } + wdt.Weekday = w + + hh, _ := strconv.Atoi(string(m[3])) // nolint: errcheck + if hh >= 24 { + return errors.Errorf("invalid hour %d", hh) + } + mm, _ := strconv.Atoi(string(m[4])) // nolint: errcheck + if mm >= 60 { + return errors.Errorf("invalid minute %d", mm) + } + wdt.Time = time.Duration(hh*60+mm) * time.Minute + + *i = wdt + return nil +} + +const day = 24 * time.Hour + +// Next returns the closest time after now that matches the weekday and time. +// It is Location aware, the same time in different locations will have +// different results. +func (i WeekdayTime) Next(now time.Time) time.Time { + t := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, now.Location()) + w := i.Weekday - t.Weekday() + if w < 0 || w == 0 && now.Sub(t) > i.Time { + w += 7 + } + + d := t.Add(time.Duration(w) * day) + + return time.Date(d.Year(), d.Month(), d.Day(), int(i.Time.Hours()), int(i.Time.Minutes())%60, + int(i.Time.Seconds())%60, int(i.Time.Nanoseconds())%1e9, t.Location()) +} + +type slot struct { + Begin WeekdayTime + End WeekdayTime + pos int +} + +func (s slot) validate() error { + if s.Begin.Weekday == EachDay || s.End.Weekday == EachDay { + if s.Begin.Weekday != s.End.Weekday { + return errors.New("begin and end must be each day") + } + } + if s.Begin.Time >= 24*time.Hour { + return errors.New("time must be less than 24h") + } + if s.End.Time >= 24*time.Hour { + return errors.New("time must be less than 24h") + } + if s.Begin == s.End { + return errors.New("equal") + } + return nil +} + +func (s slot) expand() []slot { + if s.Begin.Weekday != EachDay { + return []slot{s} + } + w := make([]slot, 7) + for i := 0; i < 7; i++ { + w[i].Begin.Weekday = time.Weekday(i) + w[i].Begin.Time = s.Begin.Time + w[i].End.Weekday = time.Weekday(i) + w[i].End.Time = s.End.Time + if s.Begin.Time > s.End.Time { + w[i].End.Weekday++ + w[i].End.Weekday %= 7 + } + } + return w +} + +// Window specifies repeatable time windows when scheduler can run a function. +// When window ends the scheduler schedules a continuation in a next window. +type Window []slot + +func NewWindow(wdt ...WeekdayTime) (Window, error) { + if len(wdt) == 0 { + return nil, errors.New("empty") + } + if len(wdt)%2 != 0 { + return nil, errors.New("number of points must be even") + } + + l := len(wdt) / 2 + w := make(Window, 0, l) + for i := 0; i < l; i++ { + j := 2 * i + s := slot{ + Begin: wdt[j], + End: wdt[j+1], + pos: j, + } + if err := s.validate(); err != nil { + return nil, errors.Wrapf(err, "[%d,%d]", j, j+1) + } + w = append(w, s.expand()...) + } + + index := func(i int) int64 { + return w[i].Begin.Next(time.Time{}).UnixNano() + } + sort.Slice(w, func(i, j int) bool { + return index(i) < index(j) + }) + + return joinSlots(w), nil +} + +func joinSlots(w Window) Window { + out := make(Window, 0, len(w)) + out = append(out, w[0]) + + cur := func() int { + return len(out) - 1 + } + + // Keep rolling time and join overlapping slots. + t := time.Time{} + for i := 1; i < len(w); i++ { + b := w[i].Begin.Next(t) + e := out[cur()].End.Next(t) + if b.After(e) { + out = append(out, w[i]) + t = b + } else { // nolint: gocritic + if ew := w[i].End.Next(t); ew.After(e) { + out[cur()].End = w[i].End + } + } + } + // Wrap around and see if the last element can ingest the first. + if len(out) > 1 { + b := out[0].Begin.Next(t) + e := out[cur()].End.Next(t) + if !b.After(e) { + if ew := out[0].End.Next(t); ew.After(e) { + out[cur()].End = out[0].End + out = out[1:] + } + } + } + + return out +} + +// Next returns the closest open slot begin and end time given now value. +// The end time is always > now, begin may be before now in case now is +// inside an open slot. +func (w Window) Next(now time.Time) (begin, end time.Time) { + if w == nil { + return now, time.Time{} + } + + // To find the smallest value of w[i].End.Time(now) over i we use + // binary search on a helper function that compares n-th value with 0-th + // value. The calculated values are sorted but may be shifted. + // + // Ex 1, now=W + // + // M | T | W | T | F | S | S + // --+---+---+---+---+---+-- + // 5 | 6 | 0 | 1 | 2 | 3 | 4 (values) + // 0 | 0 | 1 | 1 | 1 | 1 | 1 (indicator) + // + // Ex 2, now=S + // --+---+---+---+---+---+-- + // 1 | 2 | 3 | 4 | 5 | 6 | 0 (values) + // 0 | 0 | 0 | 0 | 0 | 0 | 1 (indicator) + + u0 := w[0].End.Next(now).Unix() + i := sort.Search(len(w), func(i int) bool { + u := w[i].End.Next(now).Unix() + return u < u0 + }) + if i == len(w) { + i = 0 + } + + begin = w[i].Begin.Next(now) + end = w[i].End.Next(now) + + if begin.After(end) { + begin = now + } + return // nolint: nakedret +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/schema/table/generate.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/schema/table/generate.go new file mode 100644 index 00000000000..8a6f721e378 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/schema/table/generate.go @@ -0,0 +1,5 @@ +// Copyright (C) 2017 ScyllaDB + +package table + +//go:generate ../../../bin/schemagen -keyspace scylla_manager -output . -pkgname table diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/schema/table/table.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/schema/table/table.go new file mode 100644 index 00000000000..476c630a2dc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/schema/table/table.go @@ -0,0 +1,342 @@ +// Code generated by "gocqlx/cmd/schemagen"; DO NOT EDIT. + +package table + +import "github.com/scylladb/gocqlx/v2/table" + +// Table models. +var ( + BackupRun = table.New(table.Metadata{ + Name: "backup_run", + Columns: []string{ + "cluster_id", + "dc", + "id", + "location", + "nodes", + "prev_id", + "snapshot_tag", + "stage", + "start_time", + "task_id", + "units", + }, + PartKey: []string{ + "cluster_id", + "task_id", + }, + SortKey: []string{ + "id", + }, + }) + + BackupRunProgress = table.New(table.Metadata{ + Name: "backup_run_progress", + Columns: []string{ + "agent_job_id", + "cluster_id", + "completed_at", + "error", + "failed", + "host", + "run_id", + "size", + "skipped", + "started_at", + "table_name", + "task_id", + "unit", + "uploaded", + }, + PartKey: []string{ + "cluster_id", + "task_id", + "run_id", + }, + SortKey: []string{ + "host", + "unit", + "table_name", + }, + }) + + Cluster = table.New(table.Metadata{ + Name: "cluster", + Columns: []string{ + "auth_token", + "id", + "known_hosts", + "name", + "port", + "force_tls_disabled", + "force_non_ssl_session_port", + "host", + }, + PartKey: []string{ + "id", + }, + SortKey: []string{}, + }) + + Drawer = table.New(table.Metadata{ + Name: "drawer", + Columns: []string{ + "cluster_id", + "key", + "value", + }, + PartKey: []string{ + "cluster_id", + }, + SortKey: []string{ + "key", + }, + }) + + GocqlxMigrate = table.New(table.Metadata{ + Name: "gocqlx_migrate", + Columns: []string{ + "checksum", + "done", + "end_time", + "name", + "start_time", + }, + PartKey: []string{ + "name", + }, + SortKey: []string{}, + }) + + RepairRun = table.New(table.Metadata{ + Name: "repair_run", + Columns: []string{ + "cluster_id", + "dc", + "end_time", + "host", + "id", + "intensity", + "parallel", + "prev_id", + "start_time", + "task_id", + }, + PartKey: []string{ + "cluster_id", + "task_id", + }, + SortKey: []string{ + "id", + }, + }) + + RepairRunProgress = table.New(table.Metadata{ + Name: "repair_run_progress", + Columns: []string{ + "cluster_id", + "completed_at", + "duration", + "duration_started_at", + "error", + "host", + "keyspace_name", + "run_id", + "size", + "started_at", + "success", + "table_name", + "task_id", + "token_ranges", + }, + PartKey: []string{ + "cluster_id", + "task_id", + "run_id", + }, + SortKey: []string{ + "host", + "keyspace_name", + "table_name", + }, + }) + + RepairRunState = table.New(table.Metadata{ + Name: "repair_run_state", + Columns: []string{ + "cluster_id", + "keyspace_name", + "run_id", + "success_ranges", + "table_name", + "task_id", + }, + PartKey: []string{ + "cluster_id", + "task_id", + "run_id", + }, + SortKey: []string{ + "keyspace_name", + "table_name", + }, + }) + + RestoreRun = table.New(table.Metadata{ + Name: "restore_run", + Columns: []string{ + "cluster_id", + "id", + "keyspace_name", + "location", + "manifest_path", + "prev_id", + "repair_task_id", + "snapshot_tag", + "stage", + "table_name", + "task_id", + "units", + "views", + }, + PartKey: []string{ + "cluster_id", + "task_id", + }, + SortKey: []string{ + "id", + }, + }) + + RestoreRunProgress = table.New(table.Metadata{ + Name: "restore_run_progress", + Columns: []string{ + "agent_job_id", + "cluster_id", + "download_completed_at", + "download_started_at", + "downloaded", + "error", + "failed", + "host", + "keyspace_name", + "manifest_path", + "restore_completed_at", + "restore_started_at", + "run_id", + "skipped", + "sstable_id", + "table_name", + "task_id", + "versioned_progress", + }, + PartKey: []string{ + "cluster_id", + "task_id", + "run_id", + }, + SortKey: []string{ + "manifest_path", + "keyspace_name", + "table_name", + "host", + "agent_job_id", + }, + }) + + SchedulerTask = table.New(table.Metadata{ + Name: "scheduler_task", + Columns: []string{ + "cluster_id", + "deleted", + "enabled", + "error_count", + "id", + "last_error", + "last_success", + "name", + "properties", + "sched", + "status", + "success_count", + "tags", + "type", + }, + PartKey: []string{ + "cluster_id", + }, + SortKey: []string{ + "type", + "id", + }, + }) + + SchedulerTaskRun = table.New(table.Metadata{ + Name: "scheduler_task_run", + Columns: []string{ + "cause", + "cluster_id", + "end_time", + "id", + "owner", + "start_time", + "status", + "task_id", + "type", + }, + PartKey: []string{ + "cluster_id", + "type", + "task_id", + }, + SortKey: []string{ + "id", + }, + }) + + Secrets = table.New(table.Metadata{ + Name: "secrets", + Columns: []string{ + "cluster_id", + "key", + "value", + }, + PartKey: []string{ + "cluster_id", + }, + SortKey: []string{ + "key", + }, + }) + + ValidateBackupRunProgress = table.New(table.Metadata{ + Name: "validate_backup_run_progress", + Columns: []string{ + "broken_snapshots", + "cluster_id", + "completed_at", + "dc", + "deleted_files", + "host", + "location", + "manifests", + "missing_files", + "orphaned_bytes", + "orphaned_files", + "run_id", + "scanned_files", + "started_at", + "task_id", + }, + PartKey: []string{ + "cluster_id", + "task_id", + "run_id", + }, + SortKey: []string{ + "dc", + "host", + "location", + }, + }) +) diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/schema/table/update.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/schema/table/update.go new file mode 100644 index 00000000000..84c609860b5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/schema/table/update.go @@ -0,0 +1,29 @@ +// Copyright (C) 2017 ScyllaDB + +package table + +import ( + "github.com/scylladb/gocqlx/v2/table" +) + +// SchedulerTaskUpdate is subset of SchedulerTask that contain task specification, and can be used for updates. +var SchedulerTaskUpdate = table.New(table.Metadata{ + Name: "scheduler_task", + Columns: []string{ + "cluster_id", + "enabled", + "id", + "name", + "properties", + "sched", + "tags", + "type", + }, + PartKey: []string{ + "cluster_id", + }, + SortKey: []string{ + "type", + "id", + }, +}) diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client.go new file mode 100644 index 00000000000..ebf1b717c76 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client.go @@ -0,0 +1,159 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "crypto/tls" + "net" + "net/http" + "sync" + "time" + + api "github.com/go-openapi/runtime/client" + apiMiddleware "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/hailocab/go-hostpool" + "github.com/pkg/errors" + "github.com/scylladb/go-log" + + "github.com/scylladb/scylla-manager/v3/pkg/auth" + "github.com/scylladb/scylla-manager/v3/pkg/util/httpx" + agentClient "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client" + agentOperations "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations" + scyllaClient "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client" + scyllaOperations "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations" +) + +var setOpenAPIGlobalsOnce sync.Once + +func setOpenAPIGlobals() { + setOpenAPIGlobalsOnce.Do(func() { + // Timeout is defined in http client that we provide in api.NewWithClient. + // If Context is provided to operation, which is always the case here, + // this value has no meaning since OpenAPI runtime ignores it. + api.DefaultTimeout = 0 + // Disable debug output to stderr, it could have been enabled by setting + // SWAGGER_DEBUG or DEBUG env variables. + apiMiddleware.Debug = false + }) +} + +//go:generate ../rclone/rcserver/internalgen.sh + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport. Do not use this for transient transports as it can +// leak file descriptors over time. Only use this for transports that will be +// re-used for the same host(s). +func DefaultTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: 100, + + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } +} + +// Client provides means to interact with Scylla nodes. +type Client struct { + config Config + logger log.Logger + + scyllaOps scyllaOperations.ClientService + agentOps agentOperations.ClientService + client retryableClient + hostPool hostpool.HostPool + + mu sync.RWMutex + dcCache map[string]string +} + +// NewClient creates new scylla HTTP client. +func NewClient(config Config, logger log.Logger) (*Client, error) { + if err := config.Validate(); err != nil { + return nil, errors.Wrap(err, "invalid config") + } + setOpenAPIGlobals() + + // Copy hosts + hosts := make([]string, len(config.Hosts)) + copy(hosts, config.Hosts) + + pool := hostpool.NewEpsilonGreedy(hosts, config.PoolDecayDuration, &hostpool.LinearEpsilonValueCalculator{}) + + if config.Transport == nil { + config.Transport = DefaultTransport() + } + transport := config.Transport + transport = timeout(transport, config.Timeout) + transport = requestLogger(transport, logger) + transport = hostPool(transport, pool, config.Port) + transport = auth.AddToken(transport, config.AuthToken) + transport = fixContentType(transport) + + client := &http.Client{Transport: transport} + + scyllaRuntime := api.NewWithClient( + scyllaClient.DefaultHost, scyllaClient.DefaultBasePath, []string{config.Scheme}, client, + ) + agentRuntime := api.NewWithClient( + agentClient.DefaultHost, agentClient.DefaultBasePath, []string{config.Scheme}, client, + ) + + // Debug can be turned on by SWAGGER_DEBUG or DEBUG env variable + scyllaRuntime.Debug = false + agentRuntime.Debug = false + + rc := newRetryConfig(config) + scyllaOps := scyllaOperations.New(retryableWrapTransport(scyllaRuntime, rc, logger), strfmt.Default) + agentOps := agentOperations.New(retryableWrapTransport(agentRuntime, rc, logger), strfmt.Default) + + return &Client{ + config: config, + logger: logger, + scyllaOps: scyllaOps, + agentOps: agentOps, + hostPool: pool, + client: retryableWrapClient(client, rc, logger), + dcCache: make(map[string]string), + }, nil +} + +// Config returns a copy of client config. +func (c *Client) Config() Config { + return c.config +} + +// Close closes all the idle connections. +func (c *Client) Close() error { + if t, ok := c.config.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + if c.hostPool != nil { + c.hostPool.Close() + } + return nil +} + +// fixContentType adjusts Scylla REST API response so that it can be consumed +// by Open API. +func fixContentType(next http.RoundTripper) http.RoundTripper { + return httpx.RoundTripperFunc(func(req *http.Request) (resp *http.Response, err error) { + defer func() { + if resp != nil { + // Force JSON, Scylla returns "text/plain" that misleads the + // unmarshaller and breaks processing. + resp.Header.Set("Content-Type", "application/json") + } + }() + return next.RoundTrip(req) + }) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_agent.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_agent.go new file mode 100644 index 00000000000..1a78a77d6e1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_agent.go @@ -0,0 +1,168 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "context" + "net" + "net/url" + + "github.com/pkg/errors" + scyllaversion "github.com/scylladb/scylla-manager/v3/pkg/util/version" + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations" + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NodeInfo provides basic information about Scylla node. +type NodeInfo models.NodeInfo + +// NodeInfo returns basic information about `host` node. +func (c *Client) NodeInfo(ctx context.Context, host string) (*NodeInfo, error) { + p := operations.NodeInfoParams{ + Context: forceHost(ctx, host), + } + resp, err := c.agentOps.NodeInfo(&p) + if err != nil { + return nil, errors.Wrap(err, "node info") + } + return (*NodeInfo)(resp.Payload), nil +} + +// AnyNodeInfo returns basic information about any node. +func (c *Client) AnyNodeInfo(ctx context.Context) (*NodeInfo, error) { + p := operations.NodeInfoParams{ + Context: ctx, + } + resp, err := c.agentOps.NodeInfo(&p) + if err != nil { + return nil, errors.Wrap(err, "node info") + } + return (*NodeInfo)(resp.Payload), nil +} + +// CQLAddr returns CQL address from NodeInfo. +// Scylla can have separate rpc_address (CQL), listen_address and respectfully +// broadcast_rpc_address and broadcast_address if some 3rd party routing +// is added. +// `fallback` argument is used in case any of above addresses is zero address. +func (ni *NodeInfo) CQLAddr(fallback string) string { + addr, port := ni.cqlAddr(fallback), ni.CQLPort() + return net.JoinHostPort(addr, port) +} + +// CQLSSLAddr returns CQL SSL address from NodeInfo. +// Scylla can have separate rpc_address (CQL), listen_address and respectfully +// broadcast_rpc_address and broadcast_address if some 3rd party routing +// is added. +// `fallback` argument is used in case any of above addresses is zero address. +func (ni *NodeInfo) CQLSSLAddr(fallback string) string { + addr, port := ni.cqlAddr(fallback), ni.CQLSSLPort() + return net.JoinHostPort(addr, port) +} + +func (ni *NodeInfo) cqlAddr(fallback string) string { + const ipv4Zero, ipv6Zero = "0.0.0.0", "::0" + + if ni.BroadcastRPCAddress != "" { + return ni.BroadcastRPCAddress + } + if ni.RPCAddress != "" { + if ni.RPCAddress == ipv4Zero || ni.RPCAddress == ipv6Zero { + return fallback + } + return ni.RPCAddress + } + if ni.ListenAddress == ipv4Zero || ni.ListenAddress == ipv6Zero { + return fallback + } + + return ni.ListenAddress +} + +// CQLPort returns CQL port from NodeInfo. +func (ni *NodeInfo) CQLPort() string { + return ni.NativeTransportPort +} + +// CQLSSLPort returns CQL SSL port from NodeInfo. +func (ni *NodeInfo) CQLSSLPort() string { + return ni.NativeTransportPortSsl +} + +// AlternatorEnabled returns if Alternator is enabled on host. +func (ni *NodeInfo) AlternatorEnabled() bool { + return (ni.AlternatorHTTPSPort != "0" && ni.AlternatorHTTPSPort != "") || + (ni.AlternatorPort != "0" && ni.AlternatorPort != "") +} + +// AlternatorEncryptionEnabled returns if Alternator uses encrypted traffic. +func (ni *NodeInfo) AlternatorEncryptionEnabled() bool { + return ni.AlternatorHTTPSPort != "0" && ni.AlternatorHTTPSPort != "" +} + +// SupportsAlternatorQuery returns if Alternator supports querying system tables. +func (ni NodeInfo) SupportsAlternatorQuery() (bool, error) { + // Detect master builds + if scyllaversion.MasterVersion(ni.ScyllaVersion) { + return true, nil + } + + supports, err := scyllaversion.CheckConstraint(ni.ScyllaVersion, ">= 4.1, < 2000") + if err != nil { + return false, errors.Errorf("Unsupported Scylla version: %s", ni.ScyllaVersion) + } + + return supports, nil +} + +// AlternatorAddr returns Alternator address from NodeInfo. +// It chooses right address and port based on information stored in NodeInfo. +// HTTPS port has preference over HTTP. +// `fallback` argument is used in case alternator_addresses is zero address. +func (ni *NodeInfo) AlternatorAddr(fallback string) string { + const ipv4Zero, ipv6Zero = "0.0.0.0", "::0" + + u := url.URL{ + Scheme: "http", + } + + port := ni.AlternatorPort + if ni.AlternatorHTTPSPort != "" && ni.AlternatorHTTPSPort != "0" { + port = ni.AlternatorHTTPSPort + u.Scheme = "https" + } + if ni.AlternatorAddress != "" { + if ni.AlternatorAddress == ipv4Zero || ni.AlternatorAddress == ipv6Zero { + u.Host = net.JoinHostPort(fallback, port) + } else { + u.Host = net.JoinHostPort(ni.AlternatorAddress, port) + } + } else { + u.Host = net.JoinHostPort(fallback, port) + } + + return u.String() +} + +// CQLTLSEnabled returns whether TLS and client certificate +// authorization is enabled for CQL frontend. +func (ni NodeInfo) CQLTLSEnabled() (tlsEnabled, certAuth bool) { + return ni.ClientEncryptionEnabled, ni.ClientEncryptionRequireAuth +} + +// AlternatorTLSEnabled returns whether TLS and client certificate +// authorization is enabled for Alternator frontend. +func (ni NodeInfo) AlternatorTLSEnabled() (tlsEnabled, certAuth bool) { + // Alternator doesn't support client cert authorization. + certAuth = false + return ni.AlternatorEncryptionEnabled(), certAuth +} + +// FreeOSMemory calls debug.FreeOSMemory on the agent to return memory to OS. +func (c *Client) FreeOSMemory(ctx context.Context, host string) error { + p := operations.FreeOSMemoryParams{ + Context: forceHost(ctx, host), + } + _, err := c.agentOps.FreeOSMemory(&p) + return errors.Wrap(err, "free OS memory") +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_ping.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_ping.go new file mode 100644 index 00000000000..57e4907d38c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_ping.go @@ -0,0 +1,300 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "bytes" + "context" + "fmt" + "math" + "math/rand" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "sync" + "time" + + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/util/parallel" + "github.com/scylladb/scylla-manager/v3/pkg/util/timeutc" + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations" + "go.uber.org/multierr" +) + +// GetNodesWithLocationAccess returns subset of nodes which have access to remote location. +func (c *Client) GetNodesWithLocationAccess(ctx context.Context, nodes NodeStatusInfoSlice, remotePath string) (NodeStatusInfoSlice, error) { + nodeErr := make([]error, len(nodes)) + err := parallel.Run(len(nodes), parallel.NoLimit, func(i int) error { + n := nodes[i] + + err := c.RcloneCheckPermissions(ctx, n.Addr, remotePath) + if err == nil { + c.logger.Info(ctx, "Host location access check OK", + "host", n.Addr, + "location", remotePath, + ) + } else { + c.logger.Info(ctx, "Host location access check FAILED", + "hosts", n.Addr, + "location", remotePath, + "err", err, + ) + } + nodeErr[i] = err + + return nil + }, parallel.NopNotify) + if err != nil { + return nil, errors.Wrap(err, "check location access") + } + + checked := make(NodeStatusInfoSlice, 0) + for i, err := range nodeErr { + if err == nil { + checked = append(checked, nodes[i]) + } + } + + if len(checked) == 0 { + combinedErr := multierr.Combine(nodeErr...) + return nil, errors.Wrapf(combinedErr, "no nodes with access to loaction: %s", remotePath) + } + + return checked, nil +} + +// GetLiveNodes returns subset of nodes that passed connectivity check. +func (c *Client) GetLiveNodes(ctx context.Context, nodes NodeStatusInfoSlice) (NodeStatusInfoSlice, error) { + var ( + liveNodes NodeStatusInfoSlice + nodeErr = c.CheckHostsConnectivity(ctx, nodes.Hosts()) + ) + + for i, err := range nodeErr { + if err == nil { + liveNodes = append(liveNodes, nodes[i]) + } + } + if len(liveNodes) == 0 { + return nil, errors.Errorf("no live nodes") + } + + return liveNodes, nil +} + +// CheckHostsConnectivity returns a slice of errors, error at position i +// corresponds to host at position i. +func (c *Client) CheckHostsConnectivity(ctx context.Context, hosts []string) []error { + c.logger.Info(ctx, "Checking hosts connectivity", "hosts", hosts) + defer c.logger.Info(ctx, "Done checking hosts connectivity") + + size := len(hosts) + + var wg sync.WaitGroup + wg.Add(size) + + errs := make([]error, size) + for i := range hosts { + go func(i int) { + err := c.ping(ctx, hosts[i]) + if err == nil { + c.logger.Info(ctx, "Host check OK", "host", hosts[i]) + } else { + c.logger.Info(ctx, "Host check FAILED", "hosts", hosts[i], "err", err) + } + errs[i] = err + wg.Done() + }(i) + } + + wg.Wait() + + return errs +} + +// ClosestDC takes output of Datacenters, a map from DC to it's hosts and +// returns DCs sorted by speed the hosts respond. It's determined by +// the lowest latency over 3 Ping() invocations across random selection of +// hosts for each DC. +func (c *Client) ClosestDC(ctx context.Context, dcs map[string][]string) ([]string, error) { + c.logger.Info(ctx, "Measuring datacenter latencies", "dcs", extractKeys(dcs)) + + if len(dcs) == 0 { + return nil, errors.Errorf("no dcs to choose from") + } + + // Single DC no need to measure anything. + if len(dcs) == 1 { + for dc := range dcs { + return []string{dc}, nil + } + } + + type dcRTT struct { + dc string + rtt time.Duration + } + out := make(chan dcRTT, runtime.NumCPU()+1) + size := 0 + + // Test latency of 3 random hosts from each DC. + for dc, hosts := range dcs { + dc := dc + hosts := pickNRandomHosts(3, hosts) + size += len(hosts) + + for _, h := range hosts { + go func(h string) { + c.logger.Debug(ctx, "Measuring host RTT", "dc", dc, "host", h) + rtt, err := c.Ping(ctx, h, 0) + if err != nil { + c.logger.Info(ctx, "Host RTT measurement failed", + "dc", dc, + "host", h, + "err", err, + ) + rtt = math.MaxInt64 + } else { + c.logger.Debug(ctx, "Host RTT", "dc", dc, "host", h, "rtt", rtt) + } + out <- dcRTT{dc: dc, rtt: rtt} + }(h) + } + } + + // Select the lowest latency for each DC. + min := make(map[string]time.Duration, len(dcs)) + for i := 0; i < size; i++ { + v := <-out + if m, ok := min[v.dc]; !ok || m > v.rtt { + min[v.dc] = v.rtt + } + } + + // Sort DCs by lowest latency. + sorted := make([]string, 0, len(dcs)) + for dc := range dcs { + sorted = append(sorted, dc) + } + sort.Slice(sorted, func(i, j int) bool { + return min[sorted[i]] < min[sorted[j]] + }) + + // All hosts failed... + if min[sorted[0]] == math.MaxInt64 { + return nil, errors.New("could not connect to any node") + } + + c.logger.Info(ctx, "Datacenters by latency (dec)", "dcs", sorted) + + return sorted, nil +} + +func extractKeys(m map[string][]string) (keys []string) { + for k := range m { + keys = append(keys, k) + } + return +} + +func pickNRandomHosts(n int, hosts []string) []string { + if n >= len(hosts) { + return hosts + } + + rand := rand.New(rand.NewSource(timeutc.Now().UnixNano())) + + idxs := make(map[int]struct{}) + rh := make([]string, 0, n) + for ; n > 0; n-- { + idx := rand.Intn(len(hosts)) + if _, ok := idxs[idx]; !ok { + idxs[idx] = struct{}{} + rh = append(rh, hosts[idx]) + } else { + n++ + } + } + return rh +} + +// Ping checks if host is available using HTTP ping and returns RTT. +// Ping requests are not retried, use this function with caution. +func (c *Client) Ping(ctx context.Context, host string, timeout time.Duration) (time.Duration, error) { + if timeout == 0 { + timeout = c.config.Timeout + } + + if ctxTimeout, hasCustomTimeout := hasCustomTimeout(ctx); hasCustomTimeout { + timeout = min(ctxTimeout, timeout) + } + + ctx = customTimeout(ctx, timeout) + ctx = noRetry(ctx) + + t := timeutc.Now() + err := c.ping(ctx, host) + return timeutc.Since(t), err +} + +func (c *Client) newURL(host, path string) url.URL { + port := "80" + if c.config.Scheme == "https" { + port = "443" + } + + return url.URL{ + Scheme: c.config.Scheme, + Host: net.JoinHostPort(host, port), + Path: path, + } +} + +func (c *Client) ping(ctx context.Context, host string) error { + _, err := c.scyllaOps.StorageServiceScyllaReleaseVersionGet(&operations.StorageServiceScyllaReleaseVersionGetParams{ + Context: forceHost(ctx, host), + }) + if err != nil { + return err + } + return nil +} + +func min(a, b time.Duration) time.Duration { + if a > b { + return b + } + return a +} + +// PingAgent is a simple heartbeat ping to agent. +func (c *Client) PingAgent(ctx context.Context, host string, timeout time.Duration) (time.Duration, error) { + if timeout == 0 { + timeout = c.config.Timeout + } + if ctxTimeout, hasCustomTimeout := hasCustomTimeout(ctx); hasCustomTimeout { + timeout = min(ctxTimeout, timeout) + } + ctx = customTimeout(ctx, timeout) + ctx = noRetry(ctx) + + u := c.newURL(host, "/ping") + req, err := http.NewRequestWithContext(forceHost(ctx, host), http.MethodGet, u.String(), bytes.NewReader(nil)) + if err != nil { + return 0, err + } + + t := timeutc.Now() + resp, err := c.client.Do("PingAgent", req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusNoContent { + return 0, fmt.Errorf("expected %d status code from ping response, got %d", http.StatusNoContent, resp.StatusCode) + } + return timeutc.Since(t), nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_rclone.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_rclone.go new file mode 100644 index 00000000000..ebe930b4cb8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_rclone.go @@ -0,0 +1,659 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver" + "github.com/scylladb/scylla-manager/v3/pkg/util/pointer" + agentClient "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client" + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations" + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// RcloneSetBandwidthLimit sets bandwidth limit of all the current and future +// transfers performed under current client session. +// Limit is expressed in MiB per second. +// To turn off limitation set it to 0. +func (c *Client) RcloneSetBandwidthLimit(ctx context.Context, host string, limit int) error { + p := operations.CoreBwlimitParams{ + Context: forceHost(ctx, host), + BandwidthRate: &models.Bandwidth{Rate: fmt.Sprintf("%dM", limit)}, + } + _, err := c.agentOps.CoreBwlimit(&p) // nolint: errcheck + return err +} + +// RcloneJobStop stops running job. +func (c *Client) RcloneJobStop(ctx context.Context, host string, jobID int64) error { + p := operations.JobStopParams{ + Context: forceHost(ctx, host), + Jobid: &models.Jobid{Jobid: jobID}, + } + _, err := c.agentOps.JobStop(&p) // nolint: errcheck + return err +} + +// RcloneJobInfo groups stats for job, running, and completed transfers. +type RcloneJobInfo = models.JobInfo + +// RcloneJobProgress aggregates job progress stats. +type RcloneJobProgress = models.JobProgress + +// RcloneTransfer represents a single file transfer in RcloneJobProgress Transferred. +type RcloneTransfer = models.Transfer + +// GlobalProgressID represents empty job id. +// Use this value to return global stats by job info. +var GlobalProgressID int64 + +// RcloneJobInfo returns job stats, and transfers info about running stats and +// completed transfers. +// If waitSeconds > 0 then long polling will be used with number of seconds. +func (c *Client) RcloneJobInfo(ctx context.Context, host string, jobID int64, waitSeconds int) (*RcloneJobInfo, error) { + ctx = customTimeout(forceHost(ctx, host), c.longPollingTimeout(waitSeconds)) + + p := operations.JobInfoParams{ + Context: ctx, + Jobinfo: &models.JobInfoParams{ + Jobid: jobID, + Wait: int64(waitSeconds), + }, + } + resp, err := c.agentOps.JobInfo(&p) + if err != nil { + return nil, err + } + + return resp.Payload, nil +} + +// RcloneJobProgress returns aggregated stats for the job along with its status. +func (c *Client) RcloneJobProgress(ctx context.Context, host string, jobID int64, waitSeconds int) (*RcloneJobProgress, error) { + ctx = customTimeout(forceHost(ctx, host), c.longPollingTimeout(waitSeconds)) + + p := operations.JobProgressParams{ + Context: ctx, + Jobinfo: &models.JobInfoParams{ + Jobid: jobID, + Wait: int64(waitSeconds), + }, + } + resp, err := c.agentOps.JobProgress(&p) + if StatusCodeOf(err) == http.StatusNotFound { + // If we got 404 then return empty progress with not found status. + return &RcloneJobProgress{ + Status: string(rcserver.JobNotFound), + }, nil + } + if err != nil { + return nil, err + } + + return resp.Payload, nil +} + +// RcloneJobStatus returns status of the job. +// There is one running state and three completed states error, not_found, and +// success. +type RcloneJobStatus string + +const ( + // JobError signals that job completed with error. + JobError RcloneJobStatus = "error" + // JobSuccess signals that job completed with success. + JobSuccess RcloneJobStatus = "success" + // JobRunning signals that job is still running. + JobRunning RcloneJobStatus = "running" + // JobNotFound signals that job is no longer available. + JobNotFound RcloneJobStatus = "not_found" +) + +// WorthWaitingForJob checks if rclone job can (or already did) succeed. +func WorthWaitingForJob(status string) bool { + return status == string(JobSuccess) || status == string(JobRunning) +} + +// RcloneDeleteJobStats deletes job stats group. +func (c *Client) RcloneDeleteJobStats(ctx context.Context, host string, jobID int64) error { + p := operations.CoreStatsDeleteParams{ + Context: forceHost(ctx, host), + StatsParams: &models.StatsParams{ + Group: rcloneDefaultGroup(jobID), + }, + } + _, err := c.agentOps.CoreStatsDelete(&p) // nolint: errcheck + return err +} + +// RcloneResetStats resets stats. +func (c *Client) RcloneResetStats(ctx context.Context, host string) error { + p := operations.CoreStatsResetParams{ + Context: forceHost(ctx, host), + } + _, err := c.agentOps.CoreStatsReset(&p) // nolint: errcheck + return err +} + +// RcloneDefaultGroup returns default group name based on job id. +func rcloneDefaultGroup(jobID int64) string { + return fmt.Sprintf("job/%d", jobID) +} + +// RcloneMoveFile moves file from srcRemotePath to dstRemotePath. +// Remotes need to be registered with the server first. +// Remote path format is "name:bucket/path". +// Both dstRemotePath and srRemotePath must point to a file. +func (c *Client) RcloneMoveFile(ctx context.Context, host, dstRemotePath, srcRemotePath string) error { + return c.rcloneMoveOrCopyFile(ctx, host, dstRemotePath, srcRemotePath, true) +} + +// RcloneCopyFile copies file from srcRemotePath to dstRemotePath. +// Remotes need to be registered with the server first. +// Remote path format is "name:bucket/path". +// Both dstRemotePath and srRemotePath must point to a file. +func (c *Client) RcloneCopyFile(ctx context.Context, host, dstRemotePath, srcRemotePath string) error { + return c.rcloneMoveOrCopyFile(ctx, host, dstRemotePath, srcRemotePath, false) +} + +func (c *Client) rcloneMoveOrCopyFile(ctx context.Context, host, dstRemotePath, srcRemotePath string, doMove bool) error { + dstFs, dstRemote, err := rcloneSplitRemotePath(dstRemotePath) + if err != nil { + return err + } + srcFs, srcRemote, err := rcloneSplitRemotePath(srcRemotePath) + if err != nil { + return err + } + + if doMove { + p := operations.OperationsMovefileParams{ + Context: forceHost(ctx, host), + Options: &models.MoveOrCopyFileOptions{ + DstFs: dstFs, + DstRemote: dstRemote, + SrcFs: srcFs, + SrcRemote: srcRemote, + }, + } + _, err = c.agentOps.OperationsMovefile(&p) + } else { + p := operations.OperationsCopyfileParams{ + Context: forceHost(ctx, host), + Options: &models.MoveOrCopyFileOptions{ + DstFs: dstFs, + DstRemote: dstRemote, + SrcFs: srcFs, + SrcRemote: srcRemote, + }, + } + _, err = c.agentOps.OperationsCopyfile(&p) + } + + return err +} + +// RcloneMoveDir moves contents of the directory pointed by srcRemotePath to +// the directory pointed by dstRemotePath. +// Remotes need to be registered with the server first. +// Returns ID of the asynchronous job. +// Remote path format is "name:bucket/path". +// If specified, a suffix will be added to otherwise overwritten or deleted files. +func (c *Client) RcloneMoveDir(ctx context.Context, host, dstRemotePath, srcRemotePath, suffix string) (int64, error) { + return c.rcloneMoveOrCopyDir(ctx, host, dstRemotePath, srcRemotePath, true, suffix) +} + +// RcloneCopyDir copies contents of the directory pointed by srcRemotePath to +// the directory pointed by dstRemotePath. +// Remotes need to be registered with the server first. +// Returns ID of the asynchronous job. +// Remote path format is "name:bucket/path". +// If specified, a suffix will be added to otherwise overwritten or deleted files. +func (c *Client) RcloneCopyDir(ctx context.Context, host, dstRemotePath, srcRemotePath, suffix string) (int64, error) { + return c.rcloneMoveOrCopyDir(ctx, host, dstRemotePath, srcRemotePath, false, suffix) +} + +func (c *Client) rcloneMoveOrCopyDir(ctx context.Context, host, dstRemotePath, srcRemotePath string, doMove bool, suffix string) (int64, error) { + dstFs, dstRemote, err := rcloneSplitRemotePath(dstRemotePath) + if err != nil { + return 0, err + } + srcFs, srcRemote, err := rcloneSplitRemotePath(srcRemotePath) + if err != nil { + return 0, err + } + m := models.MoveOrCopyFileOptions{ + DstFs: dstFs, + DstRemote: dstRemote, + SrcFs: srcFs, + SrcRemote: srcRemote, + Suffix: suffix, + } + + var jobID int64 + if doMove { + p := operations.SyncMoveDirParams{ + Context: forceHost(ctx, host), + Options: &m, + Async: true, + } + resp, err := c.agentOps.SyncMoveDir(&p) + if err != nil { + return 0, err + } + jobID = resp.Payload.Jobid + } else { + p := operations.SyncCopyDirParams{ + Context: forceHost(ctx, host), + Options: &m, + Async: true, + } + resp, err := c.agentOps.SyncCopyDir(&p) + if err != nil { + return 0, err + } + jobID = resp.Payload.Jobid + } + + return jobID, nil +} + +// RcloneCopyPaths copies paths from srcRemoteDir/path to dstRemoteDir/path. +// Remotes need to be registered with the server first. +// Remote path format is "name:bucket/path". +// Both dstRemoteDir and srRemoteDir must point to a directory. +func (c *Client) RcloneCopyPaths(ctx context.Context, host, dstRemoteDir, srcRemoteDir string, paths []string) (int64, error) { + dstFs, dstRemote, err := rcloneSplitRemotePath(dstRemoteDir) + if err != nil { + return 0, err + } + srcFs, srcRemote, err := rcloneSplitRemotePath(srcRemoteDir) + if err != nil { + return 0, err + } + + p := operations.SyncCopyPathsParams{ + Context: forceHost(ctx, host), + Options: &models.CopyPathsOptions{ + DstFs: dstFs, + DstRemote: dstRemote, + SrcFs: srcFs, + SrcRemote: srcRemote, + Paths: paths, + }, + Async: true, + } + + resp, err := c.agentOps.SyncCopyPaths(&p) + if err != nil { + return 0, err + } + + return resp.Payload.Jobid, nil +} + +// RcloneDeleteDir removes a directory or container and all of its contents +// from the remote. +// Remote path format is "name:bucket/path". +func (c *Client) RcloneDeleteDir(ctx context.Context, host, remotePath string) error { + fs, remote, err := rcloneSplitRemotePath(remotePath) + if err != nil { + return err + } + p := operations.OperationsPurgeParams{ + Context: forceHost(ctx, host), + RemotePath: &models.RemotePath{ + Fs: fs, + Remote: remote, + }, + Async: false, + } + _, err = c.agentOps.OperationsPurge(&p) // nolint: errcheck + return err +} + +// RcloneDeleteFile removes the single file pointed to by remotePath +// Remote path format is "name:bucket/path". +func (c *Client) RcloneDeleteFile(ctx context.Context, host, remotePath string) error { + fs, remote, err := rcloneSplitRemotePath(remotePath) + if err != nil { + return err + } + p := operations.OperationsDeletefileParams{ + Context: forceHost(ctx, host), + RemotePath: &models.RemotePath{ + Fs: fs, + Remote: remote, + }, + Async: false, + } + _, err = c.agentOps.OperationsDeletefile(&p) // nolint: errcheck + return err +} + +// RcloneDiskUsage get disk space usage. +// Remote path format is "name:bucket/path". +func (c *Client) RcloneDiskUsage(ctx context.Context, host, remotePath string) (*models.FileSystemDetails, error) { + p := operations.OperationsAboutParams{ + Context: forceHost(ctx, host), + RemotePath: &models.RemotePath{ + Fs: remotePath, + }, + } + resp, err := c.agentOps.OperationsAbout(&p) + if err != nil { + return nil, err + } + return resp.Payload, nil +} + +// RcloneFileInfo returns basic remote object information. +func (c *Client) RcloneFileInfo(ctx context.Context, host, remotePath string) (*models.FileInfo, error) { + fs, remote, err := rcloneSplitRemotePath(remotePath) + if err != nil { + return nil, err + } + + p := operations.OperationsFileInfoParams{ + Context: forceHost(ctx, host), + RemotePath: &models.RemotePath{ + Fs: fs, + Remote: remote, + }, + } + resp, err := c.agentOps.OperationsFileInfo(&p) + if err != nil { + return nil, err + } + return resp.Payload, nil +} + +// RcloneOpen streams remote file content. The stream is an HTTP body. +// Callers must close the body after use. +func (c *Client) RcloneOpen(ctx context.Context, host, remotePath string) (io.ReadCloser, error) { + fs, remote, err := rcloneSplitRemotePath(remotePath) + if err != nil { + return nil, err + } + + // Due to missing generator for Swagger 3.0, and poor implementation of 2.0 + // raw file download we are downloading manually. + const urlPath = agentClient.DefaultBasePath + "/rclone/operations/cat" + + // Body + b, err := (&models.RemotePath{ + Fs: fs, + Remote: remote, + }).MarshalBinary() + if err != nil { + return nil, err + } + + u := c.newURL(host, urlPath) + req, err := http.NewRequestWithContext(forceHost(ctx, host), http.MethodPost, u.String(), bytes.NewBuffer(b)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := c.client.Do("OperationsCat", req) + if err != nil { + return nil, err + } + return resp.Body, nil +} + +// RcloneCat returns a content of a remote path. +func (c *Client) RcloneCat(ctx context.Context, host, remotePath string) ([]byte, error) { + r, err := c.RcloneOpen(ctx, host, remotePath) + if err != nil { + return nil, err + } + defer r.Close() + return io.ReadAll(r) +} + +// RcloneListDirOpts specifies options for RcloneListDir. +type RcloneListDirOpts struct { + // Show only directories in the listing + DirsOnly bool + // Show only files in the listing + FilesOnly bool + // Recurse into the listing + Recurse bool + // Read the modification time. + // For rclone backends declared as SlowModTime setting this to true will + // send additional HEAD request for each listed file if the rclone + // configuration option UseServerModTime is false. + ShowModTime bool + // Show only the newest versions of files in the listing (no snapshot tag suffix attached) + NewestOnly bool + // Show only older versions of files in the listing (snapshot tag suffix attached) + VersionedOnly bool +} + +func (opts *RcloneListDirOpts) asModelOpts() *models.ListOptionsOpt { + if opts == nil { + return &models.ListOptionsOpt{ + NoModTime: true, + NoMimeType: true, + } + } + + return &models.ListOptionsOpt{ + DirsOnly: opts.DirsOnly, + FilesOnly: opts.FilesOnly, + Recurse: opts.Recurse, + NoModTime: !opts.ShowModTime, + NoMimeType: true, + } +} + +// RcloneListDirItem represents a file in a listing with RcloneListDir. +type RcloneListDirItem = models.ListItem + +// RcloneListDir returns contents of a directory specified by remotePath. +// The remotePath is given in the following format "provider:bucket/path". +// Resulting item path is relative to the remote path. +// +// LISTING REMOTE DIRECTORIES IS KNOWN TO BE EXTREMELY SLOW. +// DO NOT USE THIS FUNCTION FOR ANYTHING BUT A FLAT DIRECTORY LISTING. +// FOR OTHER NEEDS, OR WHEN THE NUMBER OF FILES CAN BE BIG, USE RcloneListDirIter. +// +// This function must execute in the standard Timeout (15s by default) and +// will be retried if failed. +func (c *Client) RcloneListDir(ctx context.Context, host, remotePath string, opts *RcloneListDirOpts) ([]*RcloneListDirItem, error) { + p := operations.OperationsListParams{ + Context: forceHost(ctx, host), + ListOpts: &models.ListOptions{ + Fs: &remotePath, + Remote: pointer.StringPtr(""), + Opt: opts.asModelOpts(), + NewestOnly: opts != nil && opts.NewestOnly, + VersionedOnly: opts != nil && opts.VersionedOnly, + }, + } + resp, err := c.agentOps.OperationsList(&p) + if err != nil { + return nil, err + } + + return resp.Payload.List, nil +} + +// RcloneListDirIter returns contents of a directory specified by remotePath. +// The remotePath is given in the following format "provider:bucket/path". +// Resulting item path is relative to the remote path. +func (c *Client) RcloneListDirIter(ctx context.Context, host, remotePath string, opts *RcloneListDirOpts, f func(item *RcloneListDirItem)) error { + ctx = noTimeout(ctx) + ctx = noRetry(ctx) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Due to OpenAPI limitations we manually construct and sent the request + // object to stream process the response body. + const urlPath = agentClient.DefaultBasePath + "/rclone/operations/list" + + listOpts := &models.ListOptions{ + Fs: &remotePath, + Remote: pointer.StringPtr(""), + Opt: opts.asModelOpts(), + NewestOnly: opts != nil && opts.NewestOnly, + VersionedOnly: opts != nil && opts.VersionedOnly, + } + b, err := listOpts.MarshalBinary() + if err != nil { + return err + } + + u := c.newURL(host, urlPath) + req, err := http.NewRequestWithContext(forceHost(ctx, host), http.MethodPost, u.String(), bytes.NewReader(b)) + if err != nil { + return err + } + req.Header.Add("Content-Type", "application/json") + + resp, err := c.client.Do("OperationsList", req) + if err != nil { + return err + } + defer resp.Body.Close() + + dec := json.NewDecoder(resp.Body) + + // Skip tokens down to array opening + expected := []string{"{", "list", "["} + for i := range expected { + tok, err := dec.Token() + if err != nil { + return errors.Wrap(err, "read token") + } + if fmt.Sprint(tok) != expected[i] { + return errors.Errorf("json unexpected token %s expected %s", tok, expected[i]) + } + } + + errCh := make(chan error) + go func() { + var v RcloneListDirItem + for dec.More() { + // Detect context cancellation + if ctx.Err() != nil { + errCh <- ctx.Err() + return + } + + // Read value + v = RcloneListDirItem{} + if err := dec.Decode(&v); err != nil { + errCh <- err + return + } + f(&v) + + errCh <- nil + } + close(errCh) + }() + + // Rclone filters versioned files on its side. + // Since the amount of versioned files is little (usually 0), + // the timer won't be refreshed even though rclone is correctly iterating over + // remote files. To solve that, we use the MaxTimeout instead of the usual ListTimeout (#3615). + resetTimeout := c.config.ListTimeout + if listOpts.VersionedOnly { + resetTimeout = c.config.MaxTimeout + } + timer := time.NewTimer(resetTimeout) + defer timer.Stop() + + for { + select { + case err, ok := <-errCh: + if !ok { + return nil + } + if err != nil { + return err + } + timer.Reset(resetTimeout) + case <-timer.C: + return errors.Errorf("rclone list dir timeout") + } + } +} + +// RcloneCheckPermissions checks if location is available for listing, getting, +// creating, and deleting objects. +func (c *Client) RcloneCheckPermissions(ctx context.Context, host, remotePath string) error { + p := operations.OperationsCheckPermissionsParams{ + Context: forceHost(ctx, host), + RemotePath: &models.RemotePath{ + Fs: remotePath, + Remote: "", + }, + } + _, err := c.agentOps.OperationsCheckPermissions(&p) + return err +} + +// RclonePut uploads file with provided content under remotePath. +// WARNING: This API call doesn't compare checksums. It relies on sizes only. This call cannot be used for moving sstables. +func (c *Client) RclonePut(ctx context.Context, host, remotePath string, body *bytes.Buffer) error { + fs, remote, err := rcloneSplitRemotePath(remotePath) + if err != nil { + return err + } + + // Due to missing generator for Swagger 3.0, and poor implementation of 2.0 file upload + // we are uploading manually. + const urlPath = agentClient.DefaultBasePath + "/rclone/operations/put" + + u := c.newURL(host, urlPath) + req, err := http.NewRequestWithContext(forceHost(ctx, host), http.MethodPost, u.String(), body) + if err != nil { + return err + } + + q := req.URL.Query() + q.Add("fs", fs) + q.Add("remote", remote) + req.URL.RawQuery = q.Encode() + req.Header.Add("Content-Type", "application/octet-stream") + + resp, err := c.client.Do("OperationsPut", req) + if err != nil { + return err + } + defer resp.Body.Close() + if _, err := io.CopyN(io.Discard, resp.Body, resp.ContentLength); err != nil { + return err + } + return nil +} + +// rcloneSplitRemotePath splits string path into file system and file path. +func rcloneSplitRemotePath(remotePath string) (fs, path string, err error) { + parts := strings.Split(remotePath, ":") + if len(parts) != 2 { + err = errors.New("remote path without file system name") + return + } + + dirParts := strings.SplitN(parts[1], "/", 2) + root := dirParts[0] + fs = fmt.Sprintf("%s:%s", parts[0], root) + if len(dirParts) > 1 { + path = dirParts[1] + } + return +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_scylla.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_scylla.go new file mode 100644 index 00000000000..a172e57067c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/client_scylla.go @@ -0,0 +1,1001 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "bytes" + "context" + stdErrors "errors" + "fmt" + "net" + "net/http" + "regexp" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/cespare/xxhash/v2" + "github.com/pkg/errors" + "github.com/scylladb/go-set/strset" + "github.com/scylladb/scylla-manager/v3/pkg/dht" + "github.com/scylladb/scylla-manager/v3/pkg/util/slice" + "go.uber.org/multierr" + + "github.com/scylladb/scylla-manager/v3/pkg/util/parallel" + "github.com/scylladb/scylla-manager/v3/pkg/util/pointer" + "github.com/scylladb/scylla-manager/v3/pkg/util/prom" + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations" + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ErrHostInvalidResponse is to indicate that one of the root-causes is the invalid response from scylla-server. +var ErrHostInvalidResponse = fmt.Errorf("invalid response from host") + +// ClusterName returns cluster name. +func (c *Client) ClusterName(ctx context.Context) (string, error) { + resp, err := c.scyllaOps.StorageServiceClusterNameGet(&operations.StorageServiceClusterNameGetParams{Context: ctx}) + if err != nil { + return "", err + } + + return resp.Payload, nil +} + +// Status returns nodetool status alike information, items are sorted by +// Datacenter and Address. +func (c *Client) Status(ctx context.Context) (NodeStatusInfoSlice, error) { + // Get all hosts + resp, err := c.scyllaOps.StorageServiceHostIDGet(&operations.StorageServiceHostIDGetParams{Context: ctx}) + if err != nil { + return nil, err + } + + all := make([]NodeStatusInfo, len(resp.Payload)) + for i, p := range resp.Payload { + all[i].Addr = p.Key + all[i].HostID = p.Value + } + + // Get host datacenter (hopefully cached) + for i := range all { + all[i].Datacenter, err = c.HostDatacenter(ctx, all[i].Addr) + if err != nil { + return nil, err + } + } + + // Get live nodes + live, err := c.scyllaOps.GossiperEndpointLiveGet(&operations.GossiperEndpointLiveGetParams{Context: ctx}) + if err != nil { + return nil, err + } + setNodeStatus(all, NodeStatusUp, live.Payload) + + // Get joining nodes + joining, err := c.scyllaOps.StorageServiceNodesJoiningGet(&operations.StorageServiceNodesJoiningGetParams{Context: ctx}) + if err != nil { + return nil, err + } + setNodeState(all, NodeStateJoining, joining.Payload) + + // Get leaving nodes + leaving, err := c.scyllaOps.StorageServiceNodesLeavingGet(&operations.StorageServiceNodesLeavingGetParams{Context: ctx}) + if err != nil { + return nil, err + } + setNodeState(all, NodeStateLeaving, leaving.Payload) + + // Sort by Datacenter and Address + sort.Slice(all, func(i, j int) bool { + if all[i].Datacenter != all[j].Datacenter { + return all[i].Datacenter < all[j].Datacenter + } + return all[i].Addr < all[j].Addr + }) + + return all, nil +} + +// VerifyNodesAvailability checks if all nodes passed connectivity check and are in the UN state. +func (c *Client) VerifyNodesAvailability(ctx context.Context) error { + status, err := c.Status(ctx) + if err != nil { + return errors.Wrap(err, "get status") + } + + available, err := c.GetLiveNodes(ctx, status) + if err != nil { + return errors.Wrap(err, "get live nodes") + } + + availableUN := available.Live() + if len(status) == len(availableUN) { + return nil + } + + checked := strset.New() + for _, n := range availableUN { + checked.Add(n.HostID) + } + + var unavailable []string + for _, n := range status { + if !checked.Has(n.HostID) { + unavailable = append(unavailable, n.Addr) + } + } + + return fmt.Errorf("unavailable nodes: %v", unavailable) +} + +func setNodeStatus(all []NodeStatusInfo, status NodeStatus, addrs []string) { + if len(addrs) == 0 { + return + } + m := strset.New(addrs...) + + for i := range all { + if m.Has(all[i].Addr) { + all[i].Status = status + } + } +} + +func setNodeState(all []NodeStatusInfo, state NodeState, addrs []string) { + if len(addrs) == 0 { + return + } + m := strset.New(addrs...) + + for i := range all { + if m.Has(all[i].Addr) { + all[i].State = state + } + } +} + +// Datacenters returns the available datacenters in this cluster. +func (c *Client) Datacenters(ctx context.Context) (map[string][]string, error) { + resp, err := c.scyllaOps.StorageServiceHostIDGet(&operations.StorageServiceHostIDGetParams{Context: ctx}) + if err != nil { + return nil, err + } + + res := make(map[string][]string) + var errs error + + for _, p := range resp.Payload { + dc, err := c.HostDatacenter(ctx, p.Key) + if err != nil { + errs = multierr.Append(errs, err) + continue + } + res[dc] = append(res[dc], p.Key) + } + + return res, errs +} + +// HostDatacenter looks up the datacenter that the given host belongs to. +func (c *Client) HostDatacenter(ctx context.Context, host string) (dc string, err error) { + // Try reading from cache + c.mu.RLock() + dc = c.dcCache[host] + c.mu.RUnlock() + if dc != "" { + return + } + + resp, err := c.scyllaOps.SnitchDatacenterGet(&operations.SnitchDatacenterGetParams{ + Context: ctx, + Host: &host, + }) + if err != nil { + return "", err + } + dc = resp.Payload + + // Update cache + c.mu.Lock() + c.dcCache[host] = dc + c.mu.Unlock() + + return +} + +// HostIDs returns a mapping from host IP to UUID. +func (c *Client) HostIDs(ctx context.Context) (map[string]string, error) { + resp, err := c.scyllaOps.StorageServiceHostIDGet(&operations.StorageServiceHostIDGetParams{Context: ctx}) + if err != nil { + return nil, err + } + + v := make(map[string]string, len(resp.Payload)) + for i := 0; i < len(resp.Payload); i++ { + v[resp.Payload[i].Key] = resp.Payload[i].Value + } + return v, nil +} + +// CheckHostsChanged returns true iff a host was added or removed from cluster. +// In such a case the client should be discarded. +func (c *Client) CheckHostsChanged(ctx context.Context) (bool, error) { + cur, err := c.hosts(ctx) + if err != nil { + return false, err + } + if len(cur) != len(c.config.Hosts) { + return true, err + } + return !strset.New(c.config.Hosts...).Has(cur...), nil +} + +// hosts returns a list of all hosts in a cluster. +func (c *Client) hosts(ctx context.Context) ([]string, error) { + resp, err := c.scyllaOps.StorageServiceHostIDGet(&operations.StorageServiceHostIDGetParams{Context: ctx}) + if err != nil { + return nil, err + } + + v := make([]string, len(resp.Payload)) + for i := 0; i < len(resp.Payload); i++ { + v[i] = resp.Payload[i].Key + } + return v, nil +} + +// Keyspaces return a list of all the keyspaces. +func (c *Client) Keyspaces(ctx context.Context) ([]string, error) { + resp, err := c.scyllaOps.StorageServiceKeyspacesGet(&operations.StorageServiceKeyspacesGetParams{Context: ctx}) + if err != nil { + return nil, err + } + return resp.Payload, nil +} + +// Tables returns a slice of table names in a given keyspace. +func (c *Client) Tables(ctx context.Context, keyspace string) ([]string, error) { + resp, err := c.scyllaOps.ColumnFamilyNameGet(&operations.ColumnFamilyNameGetParams{Context: ctx}) + if err != nil { + return nil, err + } + + var ( + prefix = keyspace + ":" + tables []string + ) + for _, v := range resp.Payload { + if strings.HasPrefix(v, prefix) { + tables = append(tables, v[len(prefix):]) + } + } + + return tables, nil +} + +// Tokens returns list of tokens for a node. +func (c *Client) Tokens(ctx context.Context, host string) ([]int64, error) { + resp, err := c.scyllaOps.StorageServiceTokensByEndpointGet(&operations.StorageServiceTokensByEndpointGetParams{ + Endpoint: host, + Context: ctx, + }) + if err != nil { + return nil, err + } + + tokens := make([]int64, len(resp.Payload)) + for i, s := range resp.Payload { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return tokens, errors.Wrapf(err, "parsing error at pos %d", i) + } + tokens[i] = v + } + return tokens, nil +} + +// ShardCount returns number of shards in a node. +// If host is empty it will pick one from the pool. +func (c *Client) ShardCount(ctx context.Context, host string) (uint, error) { + const ( + queryMetricName = "database_total_writes" + metricName = "scylla_" + queryMetricName + ) + + metrics, err := c.metrics(ctx, host, queryMetricName) + if err != nil { + return 0, err + } + + if _, ok := metrics[metricName]; !ok { + return 0, errors.Errorf("scylla doest not expose %s metric", metricName) + } + + shards := len(metrics[metricName].Metric) + if shards == 0 { + return 0, errors.New("missing shard count") + } + + return uint(shards), nil +} + +// HostsShardCount runs ShardCount for many hosts. +func (c *Client) HostsShardCount(ctx context.Context, hosts []string) (map[string]uint, error) { + shards := make([]uint, len(hosts)) + + f := func(i int) error { + sh, err := c.ShardCount(ctx, hosts[i]) + if err != nil { + return parallel.Abort(errors.Wrapf(err, "%s: get shard count", hosts[i])) + } + shards[i] = sh + return nil + } + if err := parallel.Run(len(hosts), parallel.NoLimit, f, parallel.NopNotify); err != nil { + return nil, err + } + + out := make(map[string]uint) + for i, h := range hosts { + out[h] = shards[i] + } + return out, nil +} + +// metrics returns Scylla Prometheus metrics, `name` pattern be used to filter +// out only subset of metrics. +// If host is empty it will pick one from the pool. +func (c *Client) metrics(ctx context.Context, host, name string) (map[string]*prom.MetricFamily, error) { + u := c.newURL(host, "/metrics") + + // In case host is not set select a host from a pool. + if host != "" { + ctx = forceHost(ctx, host) + } + r, err := http.NewRequestWithContext(ctx, http.MethodGet, u.String(), http.NoBody) + if err != nil { + return nil, err + } + + if name != "" { + q := r.URL.Query() + q.Add("name", name) + r.URL.RawQuery = q.Encode() + } + + resp, err := c.client.Do("Metrics", r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return prom.ParseText(resp.Body) +} + +// DescribeRing returns a description of token range of a given keyspace. +func (c *Client) DescribeRing(ctx context.Context, keyspace string) (Ring, error) { + resp, err := c.scyllaOps.StorageServiceDescribeRingByKeyspaceGet(&operations.StorageServiceDescribeRingByKeyspaceGetParams{ + Context: ctx, + Keyspace: keyspace, + }) + if err != nil { + return Ring{}, err + } + + ring := Ring{ + ReplicaTokens: make([]ReplicaTokenRanges, 0), + HostDC: map[string]string{}, + } + dcTokens := make(map[string]int) + + replicaTokens := make(map[uint64][]TokenRange) + replicaHash := make(map[uint64][]string) + + for _, p := range resp.Payload { + // Parse tokens + startToken, err := strconv.ParseInt(p.StartToken, 10, 64) + if err != nil { + return Ring{}, errors.Wrap(err, "parse StartToken") + } + endToken, err := strconv.ParseInt(p.EndToken, 10, 64) + if err != nil { + return Ring{}, errors.Wrap(err, "parse EndToken") + } + + // Ensure deterministic order or nodes in replica set + sort.Strings(p.Endpoints) + + // Aggregate replica set token ranges + hash := ReplicaHash(p.Endpoints) + replicaHash[hash] = p.Endpoints + replicaTokens[hash] = append(replicaTokens[hash], TokenRange{ + StartToken: startToken, + EndToken: endToken, + }) + + // Update host to DC mapping + for _, e := range p.EndpointDetails { + ring.HostDC[e.Host] = e.Datacenter + } + + // Update DC token metrics + dcs := strset.New() + for _, e := range p.EndpointDetails { + if !dcs.Has(e.Datacenter) { + dcTokens[e.Datacenter]++ + dcs.Add(e.Datacenter) + } + } + } + + for hash, tokens := range replicaTokens { + // Ensure deterministic order of tokens + sort.Slice(tokens, func(i, j int) bool { + return tokens[i].StartToken < tokens[j].StartToken + }) + + ring.ReplicaTokens = append(ring.ReplicaTokens, ReplicaTokenRanges{ + ReplicaSet: replicaHash[hash], + Ranges: tokens, + }) + } + + // Detect replication strategy + if len(ring.HostDC) == 1 { + ring.Replication = LocalStrategy + } else { + ring.Replication = NetworkTopologyStrategy + for _, tokens := range dcTokens { + if tokens != len(resp.Payload) { + ring.Replication = SimpleStrategy + break + } + } + } + + return ring, nil +} + +// ReplicaHash hashes replicas so that it can be used as a map key. +func ReplicaHash(replicaSet []string) uint64 { + hash := xxhash.New() + for _, r := range replicaSet { + _, _ = hash.WriteString(r) // nolint: errcheck + _, _ = hash.WriteString(",") // nolint: errcheck + } + return hash.Sum64() +} + +// Repair invokes async repair and returns the repair command ID. +func (c *Client) Repair(ctx context.Context, keyspace, table, master string, replicaSet []string, ranges []TokenRange) (int32, error) { + dr := dumpRanges(ranges) + p := operations.StorageServiceRepairAsyncByKeyspacePostParams{ + Context: forceHost(ctx, master), + Keyspace: keyspace, + ColumnFamilies: &table, + Ranges: &dr, + } + // Single node cluster repair fails with hosts param + if len(replicaSet) > 1 { + hosts := strings.Join(replicaSet, ",") + p.Hosts = &hosts + } + + resp, err := c.scyllaOps.StorageServiceRepairAsyncByKeyspacePost(&p) + if err != nil { + return 0, err + } + return resp.Payload, nil +} + +func dumpRanges(ranges []TokenRange) string { + var buf bytes.Buffer + for i, ttr := range ranges { + if i > 0 { + _ = buf.WriteByte(',') + } + if ttr.StartToken > ttr.EndToken { + _, _ = fmt.Fprintf(&buf, "%d:%d,%d:%d", dht.Murmur3MinToken, ttr.EndToken, ttr.StartToken, dht.Murmur3MaxToken) + } else { + _, _ = fmt.Fprintf(&buf, "%d:%d", ttr.StartToken, ttr.EndToken) + } + } + return buf.String() +} + +func repairStatusShouldRetryHandler(err error) *bool { + s, m := StatusCodeAndMessageOf(err) + if s == http.StatusInternalServerError && strings.Contains(m, "unknown repair id") { + return pointer.BoolPtr(false) + } + return nil +} + +const repairStatusTimeout = 30 * time.Minute + +// RepairStatus waits for repair job to finish and returns its status. +func (c *Client) RepairStatus(ctx context.Context, host string, id int32) (CommandStatus, error) { + ctx = forceHost(ctx, host) + ctx = customTimeout(ctx, repairStatusTimeout) + ctx = withShouldRetryHandler(ctx, repairStatusShouldRetryHandler) + var ( + resp interface { + GetPayload() models.RepairAsyncStatusResponse + } + err error + ) + + resp, err = c.scyllaOps.StorageServiceRepairStatus(&operations.StorageServiceRepairStatusParams{ + Context: ctx, + ID: id, + }) + if err != nil { + return "", err + } + return CommandStatus(resp.GetPayload()), nil +} + +// When using long polling, wait duration starts only when node receives the +// request. +// longPollingTimeout is calculating timeout duration needed for request to +// reach node so context is not canceled before response is received. +func (c *Client) longPollingTimeout(waitSeconds int) time.Duration { + return time.Second*time.Duration(waitSeconds) + c.config.Timeout +} + +// ActiveRepairs returns a subset of hosts that are coordinators of a repair. +func (c *Client) ActiveRepairs(ctx context.Context, hosts []string) ([]string, error) { + type hostError struct { + host string + active bool + err error + } + out := make(chan hostError, runtime.NumCPU()+1) + + for _, h := range hosts { + h := h + go func() { + a, err := c.hasActiveRepair(ctx, h) + out <- hostError{ + host: h, + active: a, + err: errors.Wrapf(err, "host %s", h), + } + }() + } + + var ( + active []string + errs error + ) + for range hosts { + v := <-out + if v.err != nil { + errs = multierr.Append(errs, v.err) + } + if v.active { + active = append(active, v.host) + } + } + return active, errs +} + +func (c *Client) hasActiveRepair(ctx context.Context, host string) (bool, error) { + const wait = 50 * time.Millisecond + for i := 0; i < 10; i++ { + resp, err := c.scyllaOps.StorageServiceActiveRepairGet(&operations.StorageServiceActiveRepairGetParams{ + Context: forceHost(ctx, host), + }) + if err != nil { + return false, err + } + if len(resp.Payload) > 0 { + return true, nil + } + // wait before trying again + t := time.NewTimer(wait) + select { + case <-ctx.Done(): + t.Stop() + return false, ctx.Err() + case <-t.C: + } + } + return false, nil +} + +// KillAllRepairs forces a termination of all repairs running on a host, the +// operation is not retried to avoid side effects of a deferred kill. +func (c *Client) KillAllRepairs(ctx context.Context, hosts ...string) error { + ctx = noRetry(ctx) + + f := func(i int) error { + host := hosts[i] + _, err := c.scyllaOps.StorageServiceForceTerminateRepairPost(&operations.StorageServiceForceTerminateRepairPostParams{ + Context: forceHost(ctx, host), + }) + return err + } + + notify := func(i int, err error) { + host := hosts[i] + c.logger.Error(ctx, "Failed to terminate repair", + "host", host, + "error", err, + ) + } + + return parallel.Run(len(hosts), parallel.NoLimit, f, notify) +} + +const snapshotTimeout = 30 * time.Minute + +// Snapshots lists available snapshots. +func (c *Client) Snapshots(ctx context.Context, host string) ([]string, error) { + ctx = customTimeout(ctx, snapshotTimeout) + + resp, err := c.scyllaOps.StorageServiceSnapshotsGet(&operations.StorageServiceSnapshotsGetParams{ + Context: forceHost(ctx, host), + }) + if err != nil { + return nil, err + } + + var tags []string + for _, p := range resp.Payload { + tags = append(tags, p.Key) + } + + return tags, nil +} + +// SnapshotDetails returns an index of keyspaces and tables present in the given +// snapshot. +func (c *Client) SnapshotDetails(ctx context.Context, host, tag string) ([]Unit, error) { + ctx = customTimeout(ctx, snapshotTimeout) + + resp, err := c.scyllaOps.StorageServiceSnapshotsGet(&operations.StorageServiceSnapshotsGetParams{ + Context: forceHost(ctx, host), + }) + if err != nil { + return nil, err + } + + m := make(map[string]Unit) + for _, p := range resp.Payload { + if p.Key != tag { + continue + } + for _, v := range p.Value { + k, ok := m[v.Ks] + if !ok { + k = Unit{ + Keyspace: v.Ks, + } + } + k.Tables = append(k.Tables, v.Cf) + m[v.Ks] = k + } + } + + var s []Unit + for _, v := range m { + s = append(s, v) + } + sort.Slice(s, func(i, j int) bool { + return s[i].Keyspace < s[j].Keyspace + }) + + return s, nil +} + +// TakeSnapshot flushes and takes a snapshot of a keyspace. +// Multiple keyspaces may have the same tag. +// Flush is taken care of by Scylla, see table::snapshot for details. +// If snapshot already exists no error is returned. +func (c *Client) TakeSnapshot(ctx context.Context, host, tag, keyspace string, tables ...string) error { + ctx = customTimeout(ctx, snapshotTimeout) + ctx = withShouldRetryHandler(ctx, takeSnapshotShouldRetryHandler) + + var cf *string + if len(tables) > 0 { + cf = pointer.StringPtr(strings.Join(tables, ",")) + } + + p := operations.StorageServiceSnapshotsPostParams{ + Context: forceHost(ctx, host), + Tag: &tag, + Kn: &keyspace, + Cf: cf, + } + _, err := c.scyllaOps.StorageServiceSnapshotsPost(&p) + + // Ignore SnapshotAlreadyExists error + if err != nil && isSnapshotAlreadyExists(err) { + err = nil + } + + return err +} + +var snapshotAlreadyExistsRegex = regexp.MustCompile(`snapshot \w+ already exists`) + +func isSnapshotAlreadyExists(err error) bool { + _, msg := StatusCodeAndMessageOf(err) + return snapshotAlreadyExistsRegex.MatchString(msg) +} + +func takeSnapshotShouldRetryHandler(err error) *bool { + if isSnapshotAlreadyExists(err) { + return pointer.BoolPtr(false) + } + return nil +} + +// DeleteSnapshot removes a snapshot with a given tag. +func (c *Client) DeleteSnapshot(ctx context.Context, host, tag string) error { + ctx = customTimeout(ctx, snapshotTimeout) + + _, err := c.scyllaOps.StorageServiceSnapshotsDelete(&operations.StorageServiceSnapshotsDeleteParams{ // nolint: errcheck + Context: forceHost(ctx, host), + Tag: &tag, + }) + return err +} + +// DeleteTableSnapshot removes a snapshot with a given tag. +// Removed data is restricted to the provided keyspace and table. +func (c *Client) DeleteTableSnapshot(ctx context.Context, host, tag, keyspace, table string) error { + ctx = customTimeout(ctx, snapshotTimeout) + + _, err := c.scyllaOps.StorageServiceSnapshotsDelete(&operations.StorageServiceSnapshotsDeleteParams{ // nolint: errcheck + Context: forceHost(ctx, host), + Tag: &tag, + Kn: pointer.StringPtr(keyspace), + Cf: pointer.StringPtr(table), + }) + return err +} + +// TableDiskSize returns total on disk size of the table in bytes. +func (c *Client) TableDiskSize(ctx context.Context, host, keyspace, table string) (int64, error) { + resp, err := c.scyllaOps.ColumnFamilyMetricsTotalDiskSpaceUsedByNameGet(&operations.ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams{ + Context: forceHost(ctx, host), + Name: keyspace + ":" + table, + }) + if err != nil { + return 0, err + } + return resp.Payload, nil +} + +// TableExists returns true iff table exists. +func (c *Client) TableExists(ctx context.Context, host, keyspace, table string) (bool, error) { + if host != "" { + ctx = forceHost(ctx, host) + } + resp, err := c.scyllaOps.ColumnFamilyNameGet(&operations.ColumnFamilyNameGetParams{Context: ctx}) + if err != nil { + return false, err + } + return slice.ContainsString(resp.Payload, keyspace+":"+table), nil +} + +// TotalMemory returns Scylla total memory from particular host. +func (c *Client) TotalMemory(ctx context.Context, host string) (int64, error) { + const ( + queryMetricName = "memory_total_memory" + metricName = "scylla_" + queryMetricName + ) + + metrics, err := c.metrics(ctx, host, queryMetricName) + if err != nil { + return 0, err + } + + if _, ok := metrics[metricName]; !ok { + return 0, errors.New("scylla doest not expose total memory metric") + } + + var totalMemory int64 + for _, m := range metrics[metricName].Metric { + switch { + case m.Counter != nil && m.Counter.Value != nil: + totalMemory += int64(*m.Counter.Value) + case m.Gauge != nil && m.Gauge.Value != nil: + totalMemory += int64(*m.Gauge.Value) + } + } + + return totalMemory, nil +} + +// HostsTotalMemory runs TotalMemory for many hosts. +func (c *Client) HostsTotalMemory(ctx context.Context, hosts []string) (map[string]int64, error) { + memory := make([]int64, len(hosts)) + + f := func(i int) error { + mem, err := c.TotalMemory(ctx, hosts[i]) + if err != nil { + return parallel.Abort(errors.Wrapf(err, "%s: get total memory", hosts[i])) + } + memory[i] = mem + return nil + } + if err := parallel.Run(len(hosts), parallel.NoLimit, f, parallel.NopNotify); err != nil { + return nil, err + } + + out := make(map[string]int64) + for i, h := range hosts { + out[h] = memory[i] + } + return out, nil +} + +// HostKeyspaceTable is a tuple of Host and Keyspace and Table names. +type HostKeyspaceTable struct { + Host string + Keyspace string + Table string +} + +// HostKeyspaceTables is a slice of HostKeyspaceTable. +type HostKeyspaceTables []HostKeyspaceTable + +// Hosts returns slice of unique hosts. +func (t HostKeyspaceTables) Hosts() []string { + s := strset.New() + for _, v := range t { + s.Add(v.Host) + } + return s.List() +} + +// TableDiskSizeReport returns total on disk size of tables in bytes. +func (c *Client) TableDiskSizeReport(ctx context.Context, hostKeyspaceTables HostKeyspaceTables) ([]int64, error) { + // Get shard count of a first node to estimate parallelism limit + shards, err := c.ShardCount(ctx, "") + if err != nil { + return nil, errors.Wrapf(err, "shard count") + } + + var ( + limit = len(hostKeyspaceTables.Hosts()) * int(shards) + report = make([]int64, len(hostKeyspaceTables)) + ) + + f := func(i int) error { + v := hostKeyspaceTables[i] + + size, err := c.TableDiskSize(ctx, v.Host, v.Keyspace, v.Table) + if err != nil { + return parallel.Abort(errors.Wrapf(stdErrors.Join(err, ErrHostInvalidResponse), v.Host)) + } + c.logger.Debug(ctx, "Table disk size", + "host", v.Host, + "keyspace", v.Keyspace, + "table", v.Table, + "size", size, + ) + + report[i] = size + return nil + } + + notify := func(i int, err error) { + v := hostKeyspaceTables[i] + c.logger.Error(ctx, "Failed to get table disk size", + "host", v.Host, + "keyspace", v.Keyspace, + "table", v.Table, + "error", err, + ) + } + + err = parallel.Run(len(hostKeyspaceTables), limit, f, notify) + return report, err +} + +const loadSSTablesTimeout = time.Hour + +// LoadSSTables that are already downloaded to host's table upload directory. +// Used API endpoint has the following properties: +// - It is synchronous - response is received only after the loading has finished +// - It immediately returns an error if called while loading is still happening +// - It returns nil when called on an empty upload dir +// Except for the error, LoadSSTables also checks if loading of SSTables is still happening. +func (c *Client) LoadSSTables(ctx context.Context, host, keyspace, table string, loadAndStream, primaryReplicaOnly bool) (bool, error) { + const WIPError = "Already loading SSTables" + + _, err := c.scyllaOps.StorageServiceSstablesByKeyspacePost(&operations.StorageServiceSstablesByKeyspacePostParams{ + Context: customTimeout(forceHost(ctx, host), loadSSTablesTimeout), + Keyspace: keyspace, + Cf: table, + LoadAndStream: &loadAndStream, + PrimaryReplicaOnly: &primaryReplicaOnly, + }) + + if err != nil && strings.Contains(err.Error(), WIPError) { + return true, err + } + return false, err +} + +// IsAutoCompactionEnabled checks if auto compaction of given table is enabled on the host. +func (c *Client) IsAutoCompactionEnabled(ctx context.Context, host, keyspace, table string) (bool, error) { + resp, err := c.scyllaOps.ColumnFamilyAutocompactionByNameGet(&operations.ColumnFamilyAutocompactionByNameGetParams{ + Context: forceHost(ctx, host), + Name: keyspace + ":" + table, + }) + if err != nil { + return false, err + } + return resp.Payload, nil +} + +// EnableAutoCompaction enables auto compaction on the host. +func (c *Client) EnableAutoCompaction(ctx context.Context, host, keyspace, table string) error { + _, err := c.scyllaOps.ColumnFamilyAutocompactionByNamePost(&operations.ColumnFamilyAutocompactionByNamePostParams{ + Context: forceHost(ctx, host), + Name: keyspace + ":" + table, + }) + return err +} + +// DisableAutoCompaction disables auto compaction on the host. +func (c *Client) DisableAutoCompaction(ctx context.Context, host, keyspace, table string) error { + _, err := c.scyllaOps.ColumnFamilyAutocompactionByNameDelete(&operations.ColumnFamilyAutocompactionByNameDeleteParams{ + Context: forceHost(ctx, host), + Name: keyspace + ":" + table, + }) + return err +} + +// FlushTable flushes writes stored in MemTable into SSTables stored on disk. +func (c *Client) FlushTable(ctx context.Context, host, keyspace, table string) error { + _, err := c.scyllaOps.StorageServiceKeyspaceFlushByKeyspacePost(&operations.StorageServiceKeyspaceFlushByKeyspacePostParams{ + Cf: &table, + Keyspace: keyspace, + Context: forceHost(ctx, host), + }) + return err +} + +// ViewBuildStatus returns the earliest (among all nodes) build status for given view. +func (c *Client) ViewBuildStatus(ctx context.Context, keyspace, view string) (ViewBuildStatus, error) { + resp, err := c.scyllaOps.StorageServiceViewBuildStatusesByKeyspaceAndViewGet(&operations.StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams{ + Context: ctx, + Keyspace: keyspace, + View: view, + }) + if err != nil { + return "", err + } + + if len(resp.Payload) == 0 { + return StatusUnknown, nil + } + + minStatus := StatusSuccess + for _, v := range resp.Payload { + status := ViewBuildStatus(v.Value) + if status.Index() < minStatus.Index() { + minStatus = status + } + } + return minStatus, nil +} + +// ToCanonicalIP replaces ":0:0" in IPv6 addresses with "::" +// ToCanonicalIP("192.168.0.1") -> "192.168.0.1" +// ToCanonicalIP("100:200:0:0:0:0:0:1") -> "100:200::1". +func ToCanonicalIP(host string) string { + val := net.ParseIP(host) + if val == nil { + return host + } + return val.String() +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/config.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/config.go new file mode 100644 index 00000000000..0176665e30a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/config.go @@ -0,0 +1,119 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "net/http" + "time" + + "github.com/pkg/errors" + "go.uber.org/multierr" +) + +// Config specifies the Client configuration. +type Config struct { + TimeoutConfig + // Transport scheme HTTP or HTTPS. + Scheme string `yaml:"scheme"` + // Hosts specifies all the cluster hosts that for a pool of hosts for the + // client. + Hosts []string + // Port specifies the default Scylla Manager agent port. + Port string + // AuthToken specifies the authentication token. + AuthToken string + // Transport allows for setting a custom round tripper to send HTTP + // requests over not standard connections i.e. over SSH tunnel. + Transport http.RoundTripper +} + +// TimeoutConfig is the configuration of the connection exposed to users. +type TimeoutConfig struct { + // Timeout specifies time to complete a single request to Scylla REST API + // possibly including opening a TCP connection. + // The timeout may be increased exponentially on retry after a timeout error. + Timeout time.Duration `yaml:"timeout"` + // MaxTimeout specifies the effective maximal timeout value after increasing Timeout on retry. + MaxTimeout time.Duration `yaml:"max_timeout"` + // ListTimeout specifies maximum time to complete an iterative remote + // directory listing. The retrieval is performed in batches this timeout + // applies to the time it take to retrieve a single batch. + ListTimeout time.Duration `yaml:"list_timeout"` + // Backoff specifies parameters of exponential backoff used when requests + // from Scylla Manager to Scylla Agent fail. + Backoff BackoffConfig `yaml:"backoff"` + // InteractiveBackoff specifies backoff for interactive requests i.e. + // originating from API / sctool. + InteractiveBackoff BackoffConfig `yaml:"interactive_backoff"` + // PoolDecayDuration specifies size of time window to measure average + // request time in Epsilon-Greedy host pool. + PoolDecayDuration time.Duration `yaml:"pool_decay_duration"` +} + +// BackoffConfig specifies request exponential backoff parameters. +type BackoffConfig struct { + WaitMin time.Duration `yaml:"wait_min"` + WaitMax time.Duration `yaml:"wait_max"` + MaxRetries uint64 `yaml:"max_retries"` + Multiplier float64 `yaml:"multiplier"` + Jitter float64 `yaml:"jitter"` +} + +func DefaultConfig() Config { + return DefaultConfigWithTimeout(DefaultTimeoutConfig()) +} + +func DefaultConfigWithTimeout(c TimeoutConfig) Config { + return Config{ + Scheme: "https", + Port: "10001", + TimeoutConfig: c, + } +} + +func DefaultTimeoutConfig() TimeoutConfig { + return TimeoutConfig{ + Timeout: 30 * time.Second, + MaxTimeout: 1 * time.Hour, + ListTimeout: 5 * time.Minute, + Backoff: BackoffConfig{ + WaitMin: 1 * time.Second, + WaitMax: 30 * time.Second, + MaxRetries: 9, + Multiplier: 2, + Jitter: 0.2, + }, + InteractiveBackoff: BackoffConfig{ + WaitMin: time.Second, + MaxRetries: 1, + }, + PoolDecayDuration: 30 * time.Minute, + } +} + +// TestConfig is a convenience function equal to calling DefaultConfig and +// setting hosts and token manually. +func TestConfig(hosts []string, token string) Config { + config := DefaultConfig() + config.Hosts = hosts + config.AuthToken = token + + config.Timeout = 5 * time.Second + config.ListTimeout = 30 * time.Second + config.Backoff.MaxRetries = 2 + config.Backoff.WaitMin = 200 * time.Millisecond + + return config +} + +func (c Config) Validate() error { + var err error + if len(c.Hosts) == 0 { + err = multierr.Append(err, errors.New("missing hosts")) + } + if c.Port == "" { + err = multierr.Append(err, errors.New("missing port")) + } + + return err +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/config_client.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/config_client.go new file mode 100644 index 00000000000..13b0639ab20 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/config_client.go @@ -0,0 +1,304 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "context" + "fmt" + "net" + "net/http" + "strings" + "time" + + api "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/pkg/errors" + scyllaV2Client "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client" + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config" + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// ConfigClient provides means to interact with Scylla config API on a given +// host if it's directly accessible. +type ConfigClient struct { + addr string + client *scyllaV2Client.ScyllaV2 +} + +func NewConfigClient(addr string) *ConfigClient { + setOpenAPIGlobals() + + t := http.DefaultTransport + t = fixContentType(t) + c := &http.Client{ + Timeout: 30 * time.Second, + Transport: t, + } + + scyllaV2Runtime := api.NewWithClient( + addr, scyllaV2Client.DefaultBasePath, scyllaV2Client.DefaultSchemes, c, + ) + + return &ConfigClient{ + addr: addr, + client: scyllaV2Client.New(scyllaV2Runtime, strfmt.Default), + } +} + +// ListenAddress returns node listen address. +func (c *ConfigClient) ListenAddress(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigListenAddress(config.NewFindConfigListenAddressParamsWithContext(ctx)) + if err != nil { + return "", err + } + return resp.Payload, err +} + +// NativeTransportPort returns node listen port. +func (c *ConfigClient) NativeTransportPort(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigNativeTransportPort(config.NewFindConfigNativeTransportPortParamsWithContext(ctx)) + if err != nil { + return "", err + } + return fmt.Sprint(resp.Payload), err +} + +// NativeTransportPortSSL returns node listen SSL port. +func (c *ConfigClient) NativeTransportPortSSL(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigNativeTransportPortSsl(config.NewFindConfigNativeTransportPortSslParamsWithContext(ctx)) + if err != nil { + return "", err + } + return fmt.Sprint(resp.Payload), err +} + +// RPCAddress returns node rpc address. +func (c *ConfigClient) RPCAddress(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigRPCAddress(config.NewFindConfigRPCAddressParamsWithContext(ctx)) + if err != nil { + return "", err + } + return resp.Payload, err +} + +// RPCPort returns node rpc port. +func (c *ConfigClient) RPCPort(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigRPCPort(config.NewFindConfigRPCPortParamsWithContext(ctx)) + if err != nil { + return "", err + } + return fmt.Sprint(resp.Payload), err +} + +// BroadcastAddress returns node broadcast address. +func (c *ConfigClient) BroadcastAddress(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigBroadcastAddress(config.NewFindConfigBroadcastAddressParamsWithContext(ctx)) + if err != nil { + return "", err + } + return resp.Payload, err +} + +// BroadcastRPCAddress returns node broadcast rpc address. +func (c *ConfigClient) BroadcastRPCAddress(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigBroadcastRPCAddress(config.NewFindConfigBroadcastRPCAddressParamsWithContext(ctx)) + if err != nil { + return "", err + } + return resp.Payload, err +} + +// PrometheusAddress returns node prometheus address. +func (c *ConfigClient) PrometheusAddress(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigPrometheusAddress(config.NewFindConfigPrometheusAddressParamsWithContext(ctx)) + if err != nil { + return "", err + } + return resp.Payload, err +} + +// PrometheusPort returns node prometheus port. +func (c *ConfigClient) PrometheusPort(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigPrometheusPort(config.NewFindConfigPrometheusPortParamsWithContext(ctx)) + if err != nil { + return "", err + } + return fmt.Sprint(resp.Payload), err +} + +// DataDirectory returns node data directory. +func (c *ConfigClient) DataDirectory(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigDataFileDirectories(config.NewFindConfigDataFileDirectoriesParamsWithContext(ctx)) + if err != nil { + return "", err + } + if len(resp.Payload) == 0 { + return "", nil + } + return resp.Payload[0], nil +} + +// ClientEncryptionOptions represents Client encryption configuration options. +type ClientEncryptionOptions = models.ClientEncryptionOptions + +// ClientEncryptionOptions returns if client encryption options. +func (c *ConfigClient) ClientEncryptionOptions(ctx context.Context) (*ClientEncryptionOptions, error) { + resp, err := c.client.Config.FindConfigClientEncryptionOptions(config.NewFindConfigClientEncryptionOptionsParamsWithContext(ctx)) + if err != nil { + return nil, err + } + return resp.Payload, err +} + +const passwordAuthenticator = "PasswordAuthenticator" + +// CQLPasswordProtectionEnabled returns if CQL Username/Password authentication is enabled. +func (c *ConfigClient) CQLPasswordProtectionEnabled(ctx context.Context) (bool, error) { + resp, err := c.client.Config.FindConfigAuthenticator(config.NewFindConfigAuthenticatorParamsWithContext(ctx)) + if err != nil { + return false, err + } + return strings.Replace(resp.Payload, "org.apache.cassandra.auth.", "", 1) == passwordAuthenticator, err +} + +// AlternatorPort returns node alternator port. +func (c *ConfigClient) AlternatorPort(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigAlternatorPort(config.NewFindConfigAlternatorPortParamsWithContext(ctx)) + if isStatusCode400(err) { + return "", nil + } + if err != nil { + return "", err + } + return fmt.Sprint(resp.Payload), err +} + +// AlternatorAddress returns node alternator address. +func (c *ConfigClient) AlternatorAddress(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigAlternatorAddress(config.NewFindConfigAlternatorAddressParamsWithContext(ctx)) + if isStatusCode400(err) { + return "", nil + } + if err != nil { + return "", err + } + return resp.Payload, err +} + +// AlternatorHTTPSPort returns node alternator HTTPS port. +func (c *ConfigClient) AlternatorHTTPSPort(ctx context.Context) (string, error) { + resp, err := c.client.Config.FindConfigAlternatorHTTPSPort(config.NewFindConfigAlternatorHTTPSPortParamsWithContext(ctx)) + if isStatusCode400(err) { + return "", nil + } + if err != nil { + return "", err + } + return fmt.Sprint(resp.Payload), err +} + +// UUIDSStableIdentifiers returns if node is using uuid-like sstable naming. +func (c *ConfigClient) UUIDSStableIdentifiers(ctx context.Context) (bool, error) { + resp, err := c.client.Config.FindConfigUUIDSstableIdentifiersEnabled(config.NewFindConfigUUIDSstableIdentifiersEnabledParamsWithContext(ctx)) + if isStatusCode400(err) { + return false, nil + } + if err != nil { + return false, err + } + return resp.Payload, err +} + +// ConsistentClusterManagement returns true if node uses RAFT for cluster management and DDL. +func (c *ConfigClient) ConsistentClusterManagement(ctx context.Context) (bool, error) { + resp, err := c.client.Config.FindConfigConsistentClusterManagement(config.NewFindConfigConsistentClusterManagementParamsWithContext(ctx)) + if isStatusCode400(err) { + return false, nil + } + if err != nil { + return false, err + } + return resp.Payload, err +} + +// AlternatorEnforceAuthorization returns whether alternator requires authorization. +func (c *ConfigClient) AlternatorEnforceAuthorization(ctx context.Context) (bool, error) { + resp, err := c.client.Config.FindConfigAlternatorEnforceAuthorization(config.NewFindConfigAlternatorEnforceAuthorizationParamsWithContext(ctx)) + if isStatusCode400(err) { + return false, nil + } + if err != nil { + return false, err + } + return resp.Payload, err +} + +func isStatusCode400(err error) bool { + // Scylla will return 400 when alternator is disabled, for example: + // {"message": "No such config entry: alternator_port", "code": 400} + return StatusCodeOf(err) == http.StatusBadRequest +} + +// NodeInfo returns aggregated information about Scylla node. +func (c *ConfigClient) NodeInfo(ctx context.Context) (*NodeInfo, error) { + apiAddress, apiPort, err := net.SplitHostPort(c.addr) + if err != nil { + return nil, errors.Wrapf(err, "split %s into host port chunks", c.addr) + } + + ceo, err := c.ClientEncryptionOptions(ctx) + if err != nil { + return nil, errors.Wrap(err, "fetch Scylla config client encryption enabled") + } + + ni := &NodeInfo{ + APIAddress: apiAddress, + APIPort: apiPort, + ClientEncryptionEnabled: strings.EqualFold(ceo.Enabled, "true"), + ClientEncryptionRequireAuth: strings.EqualFold(ceo.RequireClientAuth, "true"), + } + + ffs := []struct { + Field *string + Fetcher func(context.Context) (string, error) + }{ + {Field: &ni.BroadcastAddress, Fetcher: c.BroadcastAddress}, + {Field: &ni.BroadcastRPCAddress, Fetcher: c.BroadcastRPCAddress}, + {Field: &ni.ListenAddress, Fetcher: c.ListenAddress}, + {Field: &ni.NativeTransportPort, Fetcher: c.NativeTransportPort}, + {Field: &ni.NativeTransportPortSsl, Fetcher: c.NativeTransportPortSSL}, + {Field: &ni.PrometheusAddress, Fetcher: c.PrometheusAddress}, + {Field: &ni.PrometheusPort, Fetcher: c.PrometheusPort}, + {Field: &ni.RPCAddress, Fetcher: c.RPCAddress}, + {Field: &ni.RPCPort, Fetcher: c.RPCPort}, + {Field: &ni.AlternatorAddress, Fetcher: c.AlternatorAddress}, + {Field: &ni.AlternatorPort, Fetcher: c.AlternatorPort}, + {Field: &ni.AlternatorHTTPSPort, Fetcher: c.AlternatorHTTPSPort}, + } + + for i, ff := range ffs { + *ff.Field, err = ff.Fetcher(ctx) + if err != nil { + return nil, errors.Wrapf(err, "agent: fetch Scylla config %d", i) + } + } + + ffb := []struct { + Field *bool + Fetcher func(context.Context) (bool, error) + }{ + {Field: &ni.CqlPasswordProtected, Fetcher: c.CQLPasswordProtectionEnabled}, + {Field: &ni.AlternatorEnforceAuthorization, Fetcher: c.AlternatorEnforceAuthorization}, + {Field: &ni.SstableUUIDFormat, Fetcher: c.UUIDSStableIdentifiers}, + {Field: &ni.ConsistentClusterManagement, Fetcher: c.ConsistentClusterManagement}, + } + + for i, ff := range ffb { + *ff.Field, err = ff.Fetcher(ctx) + if err != nil { + return nil, errors.Wrapf(err, "agent: fetch Scylla config %d", i) + } + } + + return ni, nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/context.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/context.go new file mode 100644 index 00000000000..a3ae3cec392 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/context.go @@ -0,0 +1,89 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "context" + "time" +) + +// ctxt is a context key type. +type ctxt byte + +// ctxt enumeration. +const ( + ctxInteractive ctxt = iota + ctxHost + ctxNoRetry + ctxNoTimeout + ctxCustomTimeout + ctxShouldRetryHandler +) + +// Interactive context means that it should be processed fast without too much +// useless waiting. +func Interactive(ctx context.Context) context.Context { + return context.WithValue(ctx, ctxInteractive, true) +} + +func isInteractive(ctx context.Context) bool { + _, ok := ctx.Value(ctxInteractive).(bool) + return ok +} + +// ClientContextWithSelectedHost is a public method that returns copy of the given context, +// but extended with the selected host that will be hit with client calls. +func ClientContextWithSelectedHost(ctx context.Context, host string) context.Context { + return forceHost(ctx, host) +} + +// forceHost makes hostPool middleware use the given host instead of selecting +// one. +func forceHost(ctx context.Context, host string) context.Context { + return context.WithValue(ctx, ctxHost, host) +} + +func isForceHost(ctx context.Context) bool { + _, ok := ctx.Value(ctxHost).(string) + return ok +} + +// noRetry disables retries. +func noRetry(ctx context.Context) context.Context { + return context.WithValue(ctx, ctxNoRetry, true) +} + +// noTimeout disables timeouts - if in doubt do not use it. +// This should only be used by functions that handle timeouts internally. +func noTimeout(ctx context.Context) context.Context { + return context.WithValue(ctx, ctxNoTimeout, true) +} + +// customTimeout allows to pass a custom timeout to timeout middleware. +// +// WARNING: Usually this is a workaround for Scylla or other API slowness +// in field condition i.e. with tons of data. This is the last resort of +// defense please use with care. +func customTimeout(ctx context.Context, d time.Duration) context.Context { + return context.WithValue(ctx, ctxCustomTimeout, d) +} + +func hasCustomTimeout(ctx context.Context) (time.Duration, bool) { + v, ok := ctx.Value(ctxCustomTimeout).(time.Duration) + return v, ok +} + +// shouldRetryHandlerFunc returns +// true if error should be retried, +// false if error is permanent, +// nil if handler cannot decide. +type shouldRetryHandlerFunc func(err error) *bool + +func withShouldRetryHandler(ctx context.Context, f shouldRetryHandlerFunc) context.Context { + return context.WithValue(ctx, ctxShouldRetryHandler, f) +} + +func shouldRetryHandler(ctx context.Context) shouldRetryHandlerFunc { + f, _ := ctx.Value(ctxShouldRetryHandler).(shouldRetryHandlerFunc) + return f +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/hostpool.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/hostpool.go new file mode 100644 index 00000000000..af53c8e080d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/hostpool.go @@ -0,0 +1,66 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "net" + "net/http" + + "github.com/hailocab/go-hostpool" + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/util/httpx" +) + +var errPoolServerError = errors.New("server error") + +// hostPool sets request host from a pool. +func hostPool(next http.RoundTripper, pool hostpool.HostPool, port string) http.RoundTripper { + return httpx.RoundTripperFunc(func(req *http.Request) (*http.Response, error) { + ctx := req.Context() + + var ( + h string + hpr hostpool.HostPoolResponse + ) + + // Get host from context + h, ok := ctx.Value(ctxHost).(string) + + // Get host from pool + if !ok { + hpr = pool.Get() + h = hpr.Host() + } + + // Clone request + r := httpx.CloneRequest(req) + + // Set host and port + hp := net.JoinHostPort(h, port) + r.Host = hp + r.URL.Host = hp + + // RoundTrip shall not modify requests, here we modify it to fix error + // messages see https://github.com/scylladb/scylla-manager/pkg/issues/266. + // This is legit because we own the whole process. The modified request + // is not being sent. + req.Host = h + req.URL.Host = h + + resp, err := next.RoundTrip(r) + + // Mark response + if hpr != nil { + switch { + case err != nil: + hpr.Mark(err) + case resp.StatusCode == 401 || resp.StatusCode == 403 || resp.StatusCode >= 500: + hpr.Mark(errPoolServerError) + default: + hpr.Mark(nil) + } + } + + return resp, err + }) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/log.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/log.go new file mode 100644 index 00000000000..e595739e52f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/log.go @@ -0,0 +1,52 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "fmt" + "net/http" + "net/http/httputil" + "time" + + "github.com/pkg/errors" + "github.com/scylladb/go-log" + "github.com/scylladb/scylla-manager/v3/pkg/util/httpx" + "github.com/scylladb/scylla-manager/v3/pkg/util/timeutc" +) + +// requestLogger logs requests and responses. +func requestLogger(next http.RoundTripper, logger log.Logger) http.RoundTripper { + return httpx.RoundTripperFunc(func(req *http.Request) (resp *http.Response, err error) { + start := timeutc.Now() + resp, err = next.RoundTrip(req) + logReqResp(logger, timeutc.Since(start), req, resp) + return + }) +} + +func logReqResp(logger log.Logger, elapsed time.Duration, req *http.Request, resp *http.Response) { + f := []interface{}{ + "host", req.Host, + "method", req.Method, + "uri", req.URL.RequestURI(), + "duration", fmt.Sprintf("%dms", elapsed.Milliseconds()), + } + logFn := logger.Debug + if resp != nil { + f = append(f, + "status", resp.StatusCode, + "bytes", resp.ContentLength, + ) + + // Dump body of failed requests, ignore 404s + if c := resp.StatusCode; c >= 400 && c != http.StatusNotFound { + if b, err := httputil.DumpResponse(resp, true); err != nil { + f = append(f, "dump", errors.Wrap(err, "dump request")) + } else { + f = append(f, "dump", string(b)) + } + logFn = logger.Info + } + } + logFn(req.Context(), "HTTP", f...) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/model.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/model.go new file mode 100644 index 00000000000..3dc655e783a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/model.go @@ -0,0 +1,243 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "reflect" + + "github.com/gocql/gocql" + "github.com/scylladb/go-set/strset" + "github.com/scylladb/gocqlx/v2" + "github.com/scylladb/scylla-manager/v3/pkg/util/slice" +) + +// NodeStatus represents nodetool Status=Up/Down. +type NodeStatus bool + +// NodeStatus enumeration. +const ( + NodeStatusUp NodeStatus = true + NodeStatusDown NodeStatus = false +) + +func (s NodeStatus) String() string { + if s { + return "U" + } + return "D" +} + +// NodeState represents nodetool State=Normal/Leaving/Joining/Moving. +type NodeState string + +// NodeState enumeration. +const ( + NodeStateNormal NodeState = "" + NodeStateLeaving NodeState = "LEAVING" + NodeStateJoining NodeState = "JOINING" + NodeStateMoving NodeState = "MOVING" +) + +func (s NodeState) String() string { + switch s { + case NodeStateNormal: + return "N" + case NodeStateLeaving: + return "L" + case NodeStateJoining: + return "J" + case NodeStateMoving: + return "M" + } + return "" +} + +// NodeStatusInfo represents a nodetool status line. +type NodeStatusInfo struct { + Datacenter string + HostID string + Addr string + Status NodeStatus + State NodeState +} + +// IsUN returns true if host is Up and NORMAL meaning it's a fully functional +// live node. +func (s NodeStatusInfo) IsUN() bool { + return s.Status == NodeStatusUp && s.State == NodeStateNormal +} + +// NodeStatusInfoSlice adds functionality to Status response. +type NodeStatusInfoSlice []NodeStatusInfo + +// Datacenter returns sub slice containing only nodes from given datacenters. +func (s NodeStatusInfoSlice) Datacenter(dcs []string) NodeStatusInfoSlice { + m := strset.New(dcs...) + return s.filter(func(i int) bool { + return m.Has(s[i].Datacenter) + }) +} + +// DatacenterMap returns dc to nodes mapping. +func (s NodeStatusInfoSlice) DatacenterMap(dc []string) map[string][]string { + dcMap := make(map[string][]string) + for _, h := range s { + if slice.ContainsString(dc, h.Datacenter) { + dcMap[h.Datacenter] = append(dcMap[h.Datacenter], h.Addr) + } + } + return dcMap +} + +// HostDC returns node to dc mapping. +func (s NodeStatusInfoSlice) HostDC() map[string]string { + hostDC := make(map[string]string) + for _, h := range s { + hostDC[h.Addr] = h.Datacenter + } + return hostDC +} + +// Up returns sub slice containing only nodes with status up. +func (s NodeStatusInfoSlice) Up() NodeStatusInfoSlice { + return s.filter(func(i int) bool { + return s[i].Status == NodeStatusUp + }) +} + +// Down returns sub slice containing only nodes with status down. +func (s NodeStatusInfoSlice) Down() NodeStatusInfoSlice { + return s.filter(func(i int) bool { + return s[i].Status == NodeStatusDown + }) +} + +// State returns sub slice containing only nodes in a given state. +func (s NodeStatusInfoSlice) State(state NodeState) NodeStatusInfoSlice { + return s.filter(func(i int) bool { + return s[i].State == state + }) +} + +// Live returns sub slice of nodes in UN state. +func (s NodeStatusInfoSlice) Live() NodeStatusInfoSlice { + return s.filter(func(i int) bool { + return s[i].IsUN() + }) +} + +func (s NodeStatusInfoSlice) filter(f func(i int) bool) NodeStatusInfoSlice { + var filtered NodeStatusInfoSlice + for i, h := range s { + if f(i) { + filtered = append(filtered, h) + } + } + return filtered +} + +// HostIDs returns slice of IDs of all nodes. +func (s NodeStatusInfoSlice) HostIDs() []string { + var ids []string + for _, h := range s { + ids = append(ids, h.HostID) + } + return ids +} + +// Hosts returns slice of address of all nodes. +func (s NodeStatusInfoSlice) Hosts() []string { + var hosts []string + for _, h := range s { + hosts = append(hosts, h.Addr) + } + return hosts +} + +// CommandStatus specifies a result of a command. +type CommandStatus string + +// Command statuses. +const ( + CommandRunning CommandStatus = "RUNNING" + CommandSuccessful CommandStatus = "SUCCESSFUL" + CommandFailed CommandStatus = "FAILED" +) + +// ReplicationStrategy specifies type of keyspace replication strategy. +type ReplicationStrategy string + +// Replication strategies. +const ( + LocalStrategy = "org.apache.cassandra.locator.LocalStrategy" + SimpleStrategy = "org.apache.cassandra.locator.SimpleStrategy" + NetworkTopologyStrategy = "org.apache.cassandra.locator.NetworkTopologyStrategy" +) + +// Ring describes token ring of a keyspace. +type Ring struct { + ReplicaTokens []ReplicaTokenRanges + HostDC map[string]string + Replication ReplicationStrategy +} + +// Datacenters returns a list of datacenters the keyspace is replicated in. +func (r Ring) Datacenters() []string { + v := strset.NewWithSize(len(r.HostDC)) + for _, dc := range r.HostDC { + v.Add(dc) + } + return v.List() +} + +// TokenRange describes the beginning and end of a token range. +type TokenRange struct { + StartToken int64 `db:"start_token"` + EndToken int64 `db:"end_token"` +} + +func (t TokenRange) MarshalUDT(name string, info gocql.TypeInfo) ([]byte, error) { + f := gocqlx.DefaultMapper.FieldByName(reflect.ValueOf(t), name) + return gocql.Marshal(info, f.Interface()) +} + +func (t *TokenRange) UnmarshalUDT(name string, info gocql.TypeInfo, data []byte) error { + f := gocqlx.DefaultMapper.FieldByName(reflect.ValueOf(t), name) + return gocql.Unmarshal(info, data, f.Addr().Interface()) +} + +// ReplicaTokenRanges describes all token ranges belonging to given replica set. +type ReplicaTokenRanges struct { + ReplicaSet []string // Sorted lexicographically + Ranges []TokenRange // Sorted by start token +} + +// Unit describes keyspace and some tables in that keyspace. +type Unit struct { + Keyspace string + Tables []string +} + +// ViewBuildStatus defines build status of a view. +type ViewBuildStatus string + +// ViewBuildStatus enumeration. +const ( + StatusUnknown ViewBuildStatus = "UNKNOWN" + StatusStarted ViewBuildStatus = "STARTED" + StatusSuccess ViewBuildStatus = "SUCCESS" +) + +// ViewBuildStatusOrder lists all view build statuses in the order of their execution. +func ViewBuildStatusOrder() []ViewBuildStatus { + return []ViewBuildStatus{ + StatusUnknown, + StatusStarted, + StatusSuccess, + } +} + +// Index returns status position in ViewBuildStatusOrder. +func (s ViewBuildStatus) Index() int { + return slice.Index(ViewBuildStatusOrder(), s) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/provider.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/provider.go new file mode 100644 index 00000000000..888ffe5b8c6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/provider.go @@ -0,0 +1,95 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "context" + "sync" + "time" + + "github.com/scylladb/go-log" + "github.com/scylladb/scylla-manager/v3/pkg/util/timeutc" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +// ProviderFunc is a function that returns a Client for a given cluster. +type ProviderFunc func(ctx context.Context, clusterID uuid.UUID) (*Client, error) + +type clientTTL struct { + client *Client + ttl time.Time +} + +// CachedProvider is a provider implementation that reuses clients. +type CachedProvider struct { + inner ProviderFunc + validity time.Duration + clients map[uuid.UUID]clientTTL + mu sync.Mutex + logger log.Logger +} + +func NewCachedProvider(f ProviderFunc, cacheInvalidationTimeout time.Duration, logger log.Logger) *CachedProvider { + return &CachedProvider{ + inner: f, + validity: cacheInvalidationTimeout, + clients: make(map[uuid.UUID]clientTTL), + logger: logger.Named("cache-provider"), + } +} + +// Client is the cached ProviderFunc. +func (p *CachedProvider) Client(ctx context.Context, clusterID uuid.UUID) (*Client, error) { + p.mu.Lock() + c, ok := p.clients[clusterID] + p.mu.Unlock() + + // Cache hit + if ok { + // Check if hosts did not change before returning + changed, err := c.client.CheckHostsChanged(ctx) + if err != nil { + p.logger.Error(ctx, "Cannot check if hosts changed", "error", err) + } + if c.ttl.After(timeutc.Now()) && !changed && err == nil { + return c.client, nil + } + } + + // If not found or hosts changed create a new one + client, err := p.inner(ctx, clusterID) + if err != nil { + return nil, err + } + + c = clientTTL{ + client: client, + ttl: timeutc.Now().Add(p.validity), + } + + p.mu.Lock() + p.clients[clusterID] = c + p.mu.Unlock() + + return c.client, nil +} + +// Invalidate removes client for clusterID from cache. +func (p *CachedProvider) Invalidate(clusterID uuid.UUID) { + p.mu.Lock() + defer p.mu.Unlock() + delete(p.clients, clusterID) +} + +// Close removes all clients and closes them to clear up any resources. +func (p *CachedProvider) Close() error { + p.mu.Lock() + defer p.mu.Unlock() + + for clusterID, c := range p.clients { + delete(p.clients, clusterID) + c.client.Close() + } + + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/retry.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/retry.go new file mode 100644 index 00000000000..0092c0036d6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/retry.go @@ -0,0 +1,281 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "context" + "net/http" + "net/url" + "time" + + "github.com/go-openapi/runtime" + "github.com/pkg/errors" + "github.com/scylladb/go-log" + "github.com/scylladb/scylla-manager/v3/pkg/util/retry" +) + +type retryConfig struct { + normal BackoffConfig + interactive BackoffConfig + poolSize int + timeout time.Duration + maxTimeout time.Duration +} + +func newRetryConfig(config Config) *retryConfig { + return &retryConfig{ + normal: config.Backoff, + interactive: config.InteractiveBackoff, + poolSize: len(config.Hosts), + timeout: config.Timeout, + maxTimeout: config.MaxTimeout, + } +} + +func (c *retryConfig) backoff(ctx context.Context) retry.Backoff { + if isForceHost(ctx) { + if isInteractive(ctx) { + return backoff(c.interactive) + } + return backoff(c.normal) + } + + // We want to send request to every host in the pool once. + // The -1 is to avoid reaching out to the first node - that failed. + maxRetries := c.poolSize - 1 + return noBackoff(maxRetries) +} + +func backoff(config BackoffConfig) retry.Backoff { + return retry.WithMaxRetries(retry.NewExponentialBackoff( + config.WaitMin, + 0, + config.WaitMax, + config.Multiplier, + config.Jitter, + ), config.MaxRetries) +} + +func noBackoff(maxRetries int) retry.Backoff { + return retry.WithMaxRetries(retry.BackoffFunc(func() time.Duration { return 0 }), uint64(maxRetries)) +} + +type retryableClient struct { + client *http.Client + config *retryConfig + logger log.Logger +} + +func retryableWrapClient(client *http.Client, config *retryConfig, logger log.Logger) retryableClient { + return retryableClient{ + client: client, + config: config, + logger: logger, + } +} + +func (c retryableClient) Do(id string, req *http.Request) (*http.Response, error) { + // GetBody is auto created for common buffer types when constructing a new request. + if req.Body != nil && req.Body != http.NoBody && req.GetBody == nil { + panic("retryable requests must provide GetBody") + } + + if _, ok := req.Context().Value(ctxNoRetry).(bool); ok { + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + return resp, makeAgentError(resp) + } + + ct, _ := hasCustomTimeout(req.Context()) + o := &retryableOperation{ + config: c.config, + customTimeout: ct, + ctx: req.Context(), + id: id, + logger: c.logger, + } + o.do = func() (interface{}, error) { + r := req.Clone(o.ctx) + if req.GetBody != nil { + body, err := req.GetBody() + if err != nil { + return nil, errors.Wrap(err, "get body") + } + r.Body = body + } + + resp, err := c.client.Do(r) + if err != nil { + return resp, err + } + return resp, makeAgentError(resp) + } + + resp, err := o.submit() + if resp == nil { + return nil, err + } + return resp.(*http.Response), err +} + +type retryableTransport struct { + transport runtime.ClientTransport + config *retryConfig + logger log.Logger +} + +func retryableWrapTransport(transport runtime.ClientTransport, config *retryConfig, logger log.Logger) runtime.ClientTransport { + return retryableTransport{ + transport: transport, + config: config, + logger: logger, + } +} + +func (t retryableTransport) Submit(operation *runtime.ClientOperation) (interface{}, error) { + if _, ok := operation.Context.Value(ctxNoRetry).(bool); ok { + v, err := t.transport.Submit(operation) + return v, unpackURLError(err) + } + + ct, _ := hasCustomTimeout(operation.Context) + o := &retryableOperation{ + config: t.config, + customTimeout: ct, + ctx: operation.Context, + id: operation.ID, + logger: t.logger, + } + o.do = func() (interface{}, error) { + operation.Context = o.ctx + return t.transport.Submit(operation) + } + return o.submit() +} + +type retryableOperation struct { + config *retryConfig + customTimeout time.Duration + ctx context.Context //nolint:containedctx + id string + result interface{} + attempts int + logger log.Logger + + do func() (interface{}, error) +} + +func (o *retryableOperation) submit() (interface{}, error) { + err := retry.WithNotify(o.ctx, o.op, o.config.backoff(o.ctx), o.notify) + if err != nil { + err = unpackURLError(err) + + // Do not print "giving up after 1 attempts" for permanent errors. + if o.attempts > 1 { + err = errors.Wrapf(err, "giving up after %d attempts", o.attempts) + } + return nil, err + } + return o.result, nil +} + +func (o *retryableOperation) op() (err error) { + o.attempts++ + + o.result, err = o.do() + if err != nil { + if !shouldRetry(o.ctx, err) { + err = retry.Permanent(err) + return + } + if shouldIncreaseTimeout(o.ctx, err) { + timeout := o.nextTimeout() + o.logger.Debug(o.ctx, "HTTP increasing timeout", + "operation", o.id, + "timeout", timeout, + ) + o.ctx = customTimeout(o.ctx, timeout) + } + } + + return +} + +func (o *retryableOperation) nextTimeout() time.Duration { + d, _ := hasCustomTimeout(o.ctx) + + d -= o.customTimeout + if d <= 0 { + d = o.config.timeout + } + d *= 2 + d += o.customTimeout + + if o.config.maxTimeout > 0 && d > o.config.maxTimeout { + d = o.config.maxTimeout + } + + return d +} + +func shouldRetry(ctx context.Context, err error) bool { + if ctx.Err() != nil { + return false + } + + // Check if there is a retry handler attached to the context. + // If handler cannot decide move on to the default handler. + if h := shouldRetryHandler(ctx); h != nil { + if shouldRetry := h(err); shouldRetry != nil { + return *shouldRetry + } + } + + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + c := StatusCodeOf(err) + if c == 0 || (c >= 500 && c != 501) { + return true + } + + // Additionally, if request can be resent to a different host retry + // on Unauthorized or Forbidden. + if !isForceHost(ctx) { + if c == 401 || c == 403 { + return true + } + } + + return false +} + +func shouldIncreaseTimeout(ctx context.Context, err error) bool { + return isForceHost(ctx) && !isInteractive(ctx) && errors.Is(err, context.DeadlineExceeded) +} + +func (o *retryableOperation) notify(err error, wait time.Duration) { + if wait == 0 { + o.logger.Info(o.ctx, "HTTP retry now", + "operation", o.id, + "error", unpackURLError(err), + ) + } else { + o.logger.Info(o.ctx, "HTTP retry backoff", + "operation", o.id, + "wait", wait, + "error", unpackURLError(err), + ) + } +} + +func unpackURLError(err error) error { + if e, ok := err.(*url.Error); ok { // nolint: errorlint + return e.Err + } + + return err +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/status.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/status.go new file mode 100644 index 00000000000..3454df455d5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/status.go @@ -0,0 +1,96 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/go-openapi/runtime" + "github.com/pkg/errors" + agentModels "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" + scyllaModels "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" + scylla2Models "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// StatusCodeAndMessageOf returns HTTP status code and it's message carried +// by the error or it's cause. +// If not status can be found it returns 0. +func StatusCodeAndMessageOf(err error) (status int, message string) { + cause := errors.Cause(err) + switch v := cause.(type) { // nolint: errorlint + case *runtime.APIError: + return v.Code, fmt.Sprint(v.Response) + case interface { + GetPayload() *agentModels.ErrorResponse + }: + p := v.GetPayload() + if p != nil { + return int(p.Status), p.Message + } + case interface { + GetPayload() *scylla2Models.ErrorModel + }: + p := v.GetPayload() + if p != nil { + return int(p.Code), p.Message + } + case interface { + GetPayload() *scyllaModels.ErrorModel + }: + p := v.GetPayload() + if p != nil { + return int(p.Code), p.Message + } + case interface { // nolint: gofumpt + Code() int + }: + return v.Code(), "" + } + + return 0, "" +} + +// StatusCodeOf returns HTTP status code carried by the error or it's cause. +// If not status can be found it returns 0. +func StatusCodeOf(err error) int { + s, _ := StatusCodeAndMessageOf(err) + return s +} + +// agentError replicates OpenAPI behaviour in situations where agent needs to +// be accessed manually i.e. with custom logic to handle HTTP request and response. +type agentError struct { + payload *agentModels.ErrorResponse +} + +func makeAgentError(resp *http.Response) error { + if resp.StatusCode/100 == 2 { + return nil + } + + defer resp.Body.Close() + + b, err := io.ReadAll(resp.Body) + if err != nil { + return errors.Wrap(err, "read body") + } + ae := agentError{ + payload: new(agentModels.ErrorResponse), + } + if err := json.Unmarshal(b, ae.payload); err != nil { + return errors.Errorf("agent [HTTP %d] cannot read response: %s", resp.StatusCode, err) + } + + return ae +} + +func (ae agentError) GetPayload() *agentModels.ErrorResponse { + return ae.payload +} + +func (ae agentError) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", ae.payload.Status, ae.payload.Message) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/timeout.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/timeout.go new file mode 100644 index 00000000000..7cb341e13b4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/scyllaclient/timeout.go @@ -0,0 +1,56 @@ +// Copyright (C) 2017 ScyllaDB + +package scyllaclient + +import ( + "context" + "io" + "net/http" + "time" + + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/util/httpx" +) + +// ErrTimeout is returned when request times out. +var ErrTimeout = errors.New("timeout") + +// body defers context cancellation until response body is closed. +type body struct { + io.ReadCloser + cancel context.CancelFunc +} + +func (b body) Close() error { + defer b.cancel() + return b.ReadCloser.Close() +} + +// timeout sets request context timeout for individual requests. +func timeout(next http.RoundTripper, timeout time.Duration) http.RoundTripper { + return httpx.RoundTripperFunc(func(req *http.Request) (resp *http.Response, err error) { + if _, ok := req.Context().Value(ctxNoTimeout).(bool); ok { + return next.RoundTrip(req) + } + + d, ok := hasCustomTimeout(req.Context()) + if !ok { + d = timeout + } + + ctx, cancel := context.WithTimeout(req.Context(), d) + defer func() { + if resp != nil { + resp.Body = body{ + ReadCloser: resp.Body, + cancel: cancel, + } + } + + if err != nil && errors.Is(ctx.Err(), context.DeadlineExceeded) { + err = errors.Wrapf(err, "after %s", d) + } + }() + return next.RoundTrip(req.WithContext(ctx)) + }) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/flags.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/flags.go new file mode 100644 index 00000000000..5bfb683646f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/flags.go @@ -0,0 +1,48 @@ +// Copyright (C) 2017 ScyllaDB + +package backupspec + +type LocationValue Location + +var nilLocation = Location{} + +func (v *LocationValue) String() string { + if v.Value() == nilLocation { + return "" + } + return Location(*v).String() +} + +func (v *LocationValue) Set(s string) error { + return (*Location)(v).UnmarshalText([]byte(s)) +} + +func (v *LocationValue) Type() string { + return "string" +} + +func (v *LocationValue) Value() Location { + return Location(*v) +} + +type SnapshotTagValue string + +func (v *SnapshotTagValue) String() string { + return string(*v) +} + +func (v *SnapshotTagValue) Set(s string) error { + if !IsSnapshotTag(s) { + return errInvalidSnapshotTag + } + *v = SnapshotTagValue(s) + return nil +} + +func (v *SnapshotTagValue) Type() string { + return "string" +} + +func (v *SnapshotTagValue) Value() string { + return string(*v) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/location.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/location.go new file mode 100644 index 00000000000..eab03ed3cf0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/location.go @@ -0,0 +1,153 @@ +// Copyright (C) 2017 ScyllaDB + +package backupspec + +import ( + "path" + "regexp" + "unsafe" + + "github.com/gocql/gocql" + "github.com/pkg/errors" +) + +// Provider specifies type of remote storage like S3 etc. +type Provider string + +// Provider enumeration. +const ( + S3 = Provider("s3") + GCS = Provider("gcs") + Azure = Provider("azure") +) + +var providers = []Provider{S3, GCS, Azure} + +// Providers returns a list of all supported providers as a list of strings. +func Providers() []string { + return *(*[]string)(unsafe.Pointer(&providers)) +} + +var testProviders = []string{"testdata"} + +// AddTestProvider adds a provider for unit testing purposes. +// The provider is not returned in Providers() call but you can parse a Location +// with a test provider. +func AddTestProvider(name string) { + testProviders = append(testProviders, name) +} + +func hasProvider(s string) bool { + for i := range providers { + if providers[i].String() == s { + return true + } + } + for i := range testProviders { + if testProviders[i] == s { + return true + } + } + return false +} + +func (p Provider) String() string { + return string(p) +} + +// MarshalText implements encoding.TextMarshaler. +func (p Provider) MarshalText() (text []byte, err error) { + return []byte(p.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (p *Provider) UnmarshalText(text []byte) error { + if s := string(text); !hasProvider(s) { + return errors.Errorf("unrecognised provider %q", text) + } + *p = Provider(text) + return nil +} + +// Location specifies storage provider and container/resource for a DC. +type Location struct { + DC string `json:"dc"` + Provider Provider `json:"provider"` + Path string `json:"path"` +} + +// ErrInvalid means that location is not adhering to scylla manager required +// [dc:]: format. +var ErrInvalid = errors.Errorf("invalid location, the format is [dc:]: ex. s3:my-bucket, the bucket name must be DNS compliant") + +// Providers require that resource names are DNS compliant. +// The following is a super simplified DNS (plus provider prefix) +// matching regexp. +var pattern = regexp.MustCompile(`^(([a-zA-Z0-9\-\_\.]+):)?([a-z0-9]+):([a-z0-9\-\.]+)$`) + +// NewLocation first checks if location string conforms to valid pattern. +// It then returns the location split into three components dc, remote, and +// bucket. +func NewLocation(location string) (l Location, err error) { + m := pattern.FindStringSubmatch(location) + if m == nil { + return Location{}, ErrInvalid + } + + return Location{ + DC: m[2], + Provider: Provider(m[3]), + Path: m[4], + }, nil +} + +func (l Location) String() string { + p := l.Provider.String() + ":" + l.Path + if l.DC != "" { + p = l.DC + ":" + p + } + return p +} + +func (l Location) MarshalText() (text []byte, err error) { + return []byte(l.String()), nil +} + +func (l *Location) UnmarshalText(text []byte) error { + m := pattern.FindSubmatch(text) + if m == nil { + return errors.Errorf("invalid location %q, the format is [dc:]: ex. s3:my-bucket, the path must be DNS compliant", string(text)) + } + + if err := l.Provider.UnmarshalText(m[3]); err != nil { + return errors.Wrapf(err, "invalid location %q", string(text)) + } + + l.DC = string(m[2]) + l.Path = string(m[4]) + + return nil +} + +func (l Location) MarshalCQL(_ gocql.TypeInfo) ([]byte, error) { + return l.MarshalText() +} + +func (l *Location) UnmarshalCQL(_ gocql.TypeInfo, data []byte) error { + return l.UnmarshalText(data) +} + +// RemoteName returns the rclone remote name for that location. +func (l Location) RemoteName() string { + return l.Provider.String() +} + +// RemotePath returns string that can be used with rclone to specify a path in +// the given location. +func (l Location) RemotePath(p string) string { + r := l.RemoteName() + if r != "" { + r += ":" + } + return path.Join(r+l.Path, p) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/manifest.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/manifest.go new file mode 100644 index 00000000000..14aab10c69d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/manifest.go @@ -0,0 +1,307 @@ +// Copyright (C) 2017 ScyllaDB + +package backupspec + +import ( + "compress/gzip" + "encoding/json" + "io" + "os" + "path" + "runtime" + "strings" + + jsoniter "github.com/json-iterator/go" + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/util/inexlist/ksfilter" + "github.com/scylladb/scylla-manager/v3/pkg/util/pathparser" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" + "go.uber.org/multierr" +) + +// ManifestInfo represents manifest on remote location. +type ManifestInfo struct { + Location Location + DC string + ClusterID uuid.UUID + NodeID string + TaskID uuid.UUID + SnapshotTag string + Temporary bool +} + +// Path returns path to the file that manifest points to. +func (m *ManifestInfo) Path() string { + f := RemoteManifestFile(m.ClusterID, m.TaskID, m.SnapshotTag, m.DC, m.NodeID) + if m.Temporary { + f = TempFile(f) + } + return f +} + +// SchemaPath returns path to the schema file that manifest points to. +func (m *ManifestInfo) SchemaPath() string { + return RemoteSchemaFile(m.ClusterID, m.TaskID, m.SnapshotTag) +} + +// SSTableVersionDir returns path to the sstable version directory. +func (m *ManifestInfo) SSTableVersionDir(keyspace, table, version string) string { + return RemoteSSTableVersionDir(m.ClusterID, m.DC, m.NodeID, keyspace, table, version) +} + +// LocationSSTableVersionDir returns path to the sstable version directory with location remote path prefix. +func (m *ManifestInfo) LocationSSTableVersionDir(keyspace, table, version string) string { + return m.Location.RemotePath(RemoteSSTableVersionDir(m.ClusterID, m.DC, m.NodeID, keyspace, table, version)) +} + +// ParsePath extracts properties from full remote path to manifest. +func (m *ManifestInfo) ParsePath(s string) error { + // Clear values + *m = ManifestInfo{} + + // Clean path for usage with strings.Split + s = strings.TrimPrefix(path.Clean(s), sep) + + parsers := []pathparser.Parser{ + pathparser.Static("backup"), + pathparser.Static(string(MetaDirKind)), + pathparser.Static("cluster"), + pathparser.ID(&m.ClusterID), + pathparser.Static("dc"), + pathparser.String(&m.DC), + pathparser.Static("node"), + pathparser.String(&m.NodeID), + m.fileNameParser, + } + n, err := pathparser.New(s, sep).Parse(parsers...) + if err != nil { + return err + } + if n < len(parsers) { + return errors.Errorf("no input at position %d", n) + } + + m.Temporary = strings.HasSuffix(s, TempFileExt) + + return nil +} + +func (m *ManifestInfo) fileNameParser(v string) error { + parsers := []pathparser.Parser{ + pathparser.Static("task"), + pathparser.ID(&m.TaskID), + pathparser.Static("tag"), + pathparser.Static("sm"), + func(v string) error { + tag := "sm_" + v + if !IsSnapshotTag(tag) { + return errors.Errorf("invalid snapshot tag %s", tag) + } + m.SnapshotTag = tag + return nil + }, + pathparser.Static(Manifest, TempFile(Manifest)), + } + + n, err := pathparser.New(v, "_").Parse(parsers...) + if err != nil { + return err + } + if n < len(parsers) { + return errors.Errorf("input too short") + } + return nil +} + +// ManifestContent is structure containing information about the backup. +type ManifestContent struct { + Version string `json:"version"` + ClusterName string `json:"cluster_name"` + IP string `json:"ip"` + Size int64 `json:"size"` + Tokens []int64 `json:"tokens"` + Schema string `json:"schema"` +} + +// ManifestContentWithIndex is structure containing information about the backup +// and the index. +type ManifestContentWithIndex struct { + ManifestContent + Index []FilesMeta `json:"index"` + + indexFile string +} + +// Read loads the ManifestContent from JSON and tees the Index to a file. +func (m *ManifestContentWithIndex) Read(r io.Reader) error { + f, err := os.CreateTemp(os.TempDir(), "manifestIndex") + if err != nil { + return err + } + + defer f.Close() + + m.indexFile = f.Name() + + runtime.SetFinalizer(m, func(m *ManifestContentWithIndex) { + os.Remove(m.indexFile) // nolint: errcheck + }) + + gr, err := gzip.NewReader(io.TeeReader(r, f)) + if err != nil { + return err + } + + if err := json.NewDecoder(gr).Decode(&m.ManifestContent); err != nil { + return err + } + return gr.Close() +} + +// Write writes the ManifestContentWithIndex as compressed JSON. +func (m *ManifestContentWithIndex) Write(w io.Writer) error { + gw := gzip.NewWriter(w) + + if err := json.NewEncoder(gw).Encode(m); err != nil { + return err + } + + return gw.Close() +} + +// ReadIndex loads the index from the indexfile into the struct. +func (m *ManifestContentWithIndex) ReadIndex() ([]FilesMeta, error) { + if m.indexFile == "" { + return nil, errors.New("index file not set, did not perform a successful Read") + } + + f, err := os.Open(m.indexFile) + if err != nil { + return nil, err + } + defer f.Close() + + gr, err := gzip.NewReader(f) + if err != nil { + return nil, err + } + + tempM := new(ManifestContentWithIndex) + dec := json.NewDecoder(gr) + err = dec.Decode(&tempM) + + return tempM.Index, err +} + +// LoadIndex loads the entire index into memory so that it can be filtered or marshalled. +func (m *ManifestContentWithIndex) LoadIndex() (err error) { + m.Index, err = m.ReadIndex() + return +} + +// IndexLength reads the indexes from the Indexfile and returns the length. +func (m *ManifestContentWithIndex) IndexLength() (n int, err error) { + if m.Index != nil { + n = len(m.Index) + return + } + + err = m.ForEachIndexIter(nil, func(fm FilesMeta) { + n++ + }) + return +} + +// ForEachIndexIterWithError streams the indexes from the Manifest JSON, filters them and performs a +// callback on each as they are read in. It stops iteration after callback returns an error. +func (m *ManifestContentWithIndex) ForEachIndexIterWithError(keyspace []string, cb func(fm FilesMeta) error) (err error) { + f, err := os.Open(m.indexFile) + if err != nil { + return err + } + defer func() { + err = multierr.Append(err, f.Close()) + }() + + gr, err := gzip.NewReader(f) + if err != nil { + return err + } + + filter, err := ksfilter.NewFilter(keyspace) + if err != nil { + return errors.Wrap(err, "create filter") + } + + iter := jsoniter.Parse(jsoniter.ConfigDefault, gr, 1024) + + for k := iter.ReadObject(); iter.Error == nil; k = iter.ReadObject() { + if k != "index" { + iter.Skip() + continue + } + + iter.ReadArrayCB(func(it *jsoniter.Iterator) bool { + var m FilesMeta + it.ReadVal(&m) + if filter.Check(m.Keyspace, m.Table) { + err = cb(m) + } + return err == nil + }) + break + } + + return multierr.Append(iter.Error, err) +} + +// ForEachIndexIter is a wrapper for ForEachIndexIterWithError +// that takes callback which doesn't return an error. +func (m *ManifestContentWithIndex) ForEachIndexIter(keyspace []string, cb func(fm FilesMeta)) error { + return m.ForEachIndexIterWithError(keyspace, func(fm FilesMeta) error { + cb(fm) + return nil + }) +} + +// ForEachIndexIterFiles performs an action for each filtered file in the index. +func (m *ManifestContentWithIndex) ForEachIndexIterFiles(keyspace []string, mi *ManifestInfo, cb func(dir string, files []string)) error { + return m.ForEachIndexIter(keyspace, func(fm FilesMeta) { + dir := RemoteSSTableVersionDir(mi.ClusterID, mi.DC, mi.NodeID, fm.Keyspace, fm.Table, fm.Version) + cb(dir, fm.Files) + }) +} + +// ManifestInfoWithContent is intended for passing manifest with its content. +type ManifestInfoWithContent struct { + *ManifestInfo + *ManifestContentWithIndex +} + +func NewManifestInfoWithContent() ManifestInfoWithContent { + return ManifestInfoWithContent{ + ManifestInfo: new(ManifestInfo), + ManifestContentWithIndex: new(ManifestContentWithIndex), + } +} + +// FilesInfo specifies paths to files backed up for a table (and node) within +// a location. +// Note that a backup for a table usually consists of multiple instances of +// FilesInfo since data is replicated across many nodes. +type FilesInfo struct { + Location Location `json:"location"` + Schema string `json:"schema"` + Files []FilesMeta `json:"files"` +} + +// FilesMeta contains information about SST files of particular keyspace/table. +type FilesMeta struct { + Keyspace string `json:"keyspace"` + Table string `json:"table"` + Version string `json:"version"` + Files []string `json:"files"` + Size int64 `json:"size"` + + Path string `json:"path,omitempty"` +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/paths.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/paths.go new file mode 100644 index 00000000000..2295613c8cf --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/paths.go @@ -0,0 +1,168 @@ +// Copyright (C) 2017 ScyllaDB + +package backupspec + +import ( + "os" + "path" + "strings" + + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +const ( + // MetadataVersion is the suffix for version file. + MetadataVersion = ".version" + // Manifest is name of the manifest file. + Manifest = "manifest.json.gz" + // Schema is the name of the schema file. + Schema = "schema.tar.gz" + // TempFileExt is suffix for the temporary files. + TempFileExt = ".tmp" + + sep = string(os.PathSeparator) +) + +type dirKind string + +// Enumeration of dirKinds. +const ( + SchemaDirKind = dirKind("schema") + SSTDirKind = dirKind("sst") + MetaDirKind = dirKind("meta") +) + +// RemoteManifestLevel calculates maximal depth of recursive listing starting at +// baseDir to list all manifests. +func RemoteManifestLevel(baseDir string) int { + a := len(strings.Split(remoteManifestDir(uuid.Nil, "a", "b"), sep)) + b := len(strings.Split(baseDir, sep)) + return a - b +} + +// RemoteManifestFile returns path to the manifest file. +func RemoteManifestFile(clusterID, taskID uuid.UUID, snapshotTag, dc, nodeID string) string { + manifestName := strings.Join([]string{ + "task", + taskID.String(), + "tag", + snapshotTag, + Manifest, + }, "_") + + return path.Join( + remoteManifestDir(clusterID, dc, nodeID), + manifestName, + ) +} + +func remoteManifestDir(clusterID uuid.UUID, dc, nodeID string) string { + return path.Join( + "backup", + string(MetaDirKind), + "cluster", + clusterID.String(), + "dc", + dc, + "node", + nodeID, + ) +} + +// RemoteMetaClusterDCDir returns path to DC dir for the provided cluster. +func RemoteMetaClusterDCDir(clusterID uuid.UUID) string { + return path.Join( + "backup", + string(MetaDirKind), + "cluster", + clusterID.String(), + "dc", + ) +} + +// RemoteSchemaFile returns path to the schema file. +func RemoteSchemaFile(clusterID, taskID uuid.UUID, snapshotTag string) string { + manifestName := strings.Join([]string{ + "task", + taskID.String(), + "tag", + snapshotTag, + Schema, + }, "_") + + return path.Join( + remoteSchemaDir(clusterID), + manifestName, + ) +} + +func remoteSchemaDir(clusterID uuid.UUID) string { + return path.Join( + "backup", + string(SchemaDirKind), + "cluster", + clusterID.String(), + ) +} + +// RemoteSSTableVersionDir returns path to the sstable version directory. +func RemoteSSTableVersionDir(clusterID uuid.UUID, dc, nodeID, keyspace, table, version string) string { + return path.Join( + RemoteSSTableDir(clusterID, dc, nodeID, keyspace, table), + version, + ) +} + +// RemoteSSTableBaseDir returns path to the sstable base directory. +func RemoteSSTableBaseDir(clusterID uuid.UUID, dc, nodeID string) string { + return path.Join( + "backup", + string(SSTDirKind), + "cluster", + clusterID.String(), + "dc", + dc, + "node", + nodeID, + ) +} + +// RemoteSSTableDir returns path to given table's sstable directory. +func RemoteSSTableDir(clusterID uuid.UUID, dc, nodeID, keyspace, table string) string { + return path.Join( + RemoteSSTableBaseDir(clusterID, dc, nodeID), + "keyspace", + keyspace, + "table", + table, + ) +} + +// TempFile returns temporary path for the provided file. +func TempFile(f string) string { + return f + TempFileExt +} + +const ( + // DataDir is the data dir prefix. + DataDir = "data:" + + // ScyllaManifest defines the name of backup manifest file. + ScyllaManifest = "manifest.json" + // ScyllaSchema defines the name of backup CQL schema file. + ScyllaSchema = "schema.cql" +) + +// KeyspaceDir return keyspace directory. +func KeyspaceDir(keyspace string) string { + return DataDir + keyspace +} + +// UploadTableDir returns table upload directory. +func UploadTableDir(keyspace, table, version string) string { + return path.Join( + KeyspaceDir(keyspace), + table+"-"+version, + "upload", + ) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/tags.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/tags.go new file mode 100644 index 00000000000..177a801c646 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/tags.go @@ -0,0 +1,41 @@ +// Copyright (C) 2017 ScyllaDB + +package backupspec + +import ( + "regexp" + "time" + + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/util/timeutc" +) + +var ( + tagDateFormat = "20060102150405" + tagRegexp = regexp.MustCompile("^sm_([0-9]{14})UTC$") + errInvalidSnapshotTag = errors.New("not a Scylla Manager snapshot tag, expected format is sm_20060102150405UTC") +) + +// NewSnapshotTag creates new snapshot tag for the current time. +func NewSnapshotTag() string { + return SnapshotTagAt(timeutc.Now()) +} + +// SnapshotTagAt creates new snapshot tag for specified time. +func SnapshotTagAt(t time.Time) string { + return "sm_" + t.UTC().Format(tagDateFormat) + "UTC" +} + +// IsSnapshotTag returns true if provided string has valid snapshot tag format. +func IsSnapshotTag(tag string) bool { + return tagRegexp.MatchString(tag) +} + +// SnapshotTagTime returns time of the provided snapshot tag. +func SnapshotTagTime(tag string) (time.Time, error) { + m := tagRegexp.FindStringSubmatch(tag) + if m == nil { + return time.Time{}, errInvalidSnapshotTag + } + return timeutc.Parse(tagDateFormat, m[1]) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/versioning.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/versioning.go new file mode 100644 index 00000000000..103502d130a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec/versioning.go @@ -0,0 +1,120 @@ +// Copyright (C) 2023 ScyllaDB + +package backupspec + +import ( + "context" + "path" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/scyllaclient" +) + +// Issue #3288 showed that we need to be able to store multiple different SSTables +// with the same name and from the same node ID. In order to do that, we use rclone +// 'suffix' option to rename otherwise overwritten files during upload. +// Choosing snapshot tag as the suffix allows us to determine when to purge/restore versioned files. + +// VersionedSSTable represents older version of SSTable that we still need to store in a backup. +// (e.g. older version of 'md-2-big-Data.db' could be 'md-2-big-Data.db.sm_20230114183231UTC') +// Note, that the newest version of SSTable does not have snapshot tag extension. +type VersionedSSTable struct { + Name string // Original SSTable name (e.g. md-2-big-Data.db) + Version string // Snapshot tag extension representing backup that introduced newer version of this SSTable (e.g. sm_20230114183231UTC) + Size int64 +} + +// FullName returns versioned file name. +func (vt VersionedSSTable) FullName() string { + return vt.Name + "." + vt.Version +} + +// VersionedMap maps SSTable name to its versions with respect to currently restored snapshot tag. +type VersionedMap map[string]VersionedSSTable + +// VersionedFileExt returns the snapshot tag extension of versioned file. +// If using alongside with RcloneMoveDir or RcloneCopyDir as suffix option, +// this extension will be added to files otherwise overwritten or deleted in the process. +func VersionedFileExt(snapshotTag string) string { + return "." + snapshotTag +} + +// VersionedFileCreationTime returns the time of versioned file creation +// (the time when the newer version of the file has been uploaded to the backup location). +func VersionedFileCreationTime(versioned string) (time.Time, error) { + snapshotExt := path.Ext(versioned)[1:] + return SnapshotTagTime(snapshotExt) +} + +// IsVersionedFileRemovable checks if versioned file can be safely purged. +// In order to decide that, the time of the oldest stored backup is required. +func IsVersionedFileRemovable(oldest time.Time, versioned string) (bool, error) { + t, err := VersionedFileCreationTime(versioned) + if err != nil { + return false, err + } + // Versioned file can only belong to backups STRICTLY older than itself. + // If it is older (or equally old) to the oldest, currently stored backup in remote location, it can be deleted. + if !t.After(oldest) { + return true, nil + } + return false, nil +} + +// SplitNameAndVersion splits versioned file name into its original name and its version. +func SplitNameAndVersion(versioned string) (name, version string) { + versionExt := path.Ext(versioned) + baseName := strings.TrimSuffix(versioned, versionExt) + return baseName, versionExt[1:] +} + +// ListVersionedFiles gathers information about versioned files from specified dir. +func ListVersionedFiles(ctx context.Context, client *scyllaclient.Client, snapshotTag, host, dir string) (VersionedMap, error) { + versionedFiles := make(VersionedMap) + allVersions := make(map[string][]VersionedSSTable) + + opts := &scyllaclient.RcloneListDirOpts{ + FilesOnly: true, + VersionedOnly: true, + } + f := func(item *scyllaclient.RcloneListDirItem) { + name, version := SplitNameAndVersion(item.Name) + allVersions[name] = append(allVersions[name], VersionedSSTable{ + Name: name, + Version: version, + Size: item.Size, + }) + } + + if err := client.RcloneListDirIter(ctx, host, dir, opts, f); err != nil { + return nil, errors.Wrapf(err, "host %s: listing versioned files", host) + } + + restoreT, err := SnapshotTagTime(snapshotTag) + if err != nil { + return nil, err + } + // Chose correct version with respect to currently restored snapshot tag + for _, versions := range allVersions { + var candidate VersionedSSTable + for _, v := range versions { + tagT, err := SnapshotTagTime(v.Version) + if err != nil { + return nil, err + } + if tagT.After(restoreT) { + if candidate.Version == "" || v.Version < candidate.Version { + candidate = v + } + } + } + + if candidate.Version != "" { + versionedFiles[candidate.Name] = candidate + } + } + + return versionedFiles, nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/README.md b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/README.md new file mode 100644 index 00000000000..397540f698d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/README.md @@ -0,0 +1,16 @@ +# Scheduler service + +### Useful links + +- `sctool tasks` command to list all scheduled tasks https://manager.docs.scylladb.com/stable/sctool/task.html +- `sctool info` to see the details about particular task https://manager.docs.scylladb.com/stable/sctool/info.html +- `sctool progress` to see the task execution progress https://manager.docs.scylladb.com/stable/sctool/progress.html + +### General picture + +![Scheduling](scylla-manager-scheduler.drawio.svg) + +Scylla Manager's scheduler service is responsible for proper scheduling of all the maintenance tasks defined for the cluster with scylla-manager.
+Tasks are created in asynchronous way. First, it's the manager REST API consumer to define the task and send it to scylla-manager. Later, it's the scheduler service to trigger its execution according to the cron definition.
+ +`sctool` is one of the manager's API consumer.
diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/details.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/details.go new file mode 100644 index 00000000000..869c367b64c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/details.go @@ -0,0 +1,17 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "github.com/scylladb/scylla-manager/v3/pkg/scheduler" +) + +func details(t *Task) scheduler.Details { + return scheduler.Details{ + Properties: t.Properties, + Trigger: t.Sched.trigger(), + Backoff: t.Sched.backoff(), + Window: t.Sched.Window.Window(), + Location: t.Sched.Timezone.Location(), + } +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/listener.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/listener.go new file mode 100644 index 00000000000..4565808ae2d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/listener.go @@ -0,0 +1,89 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "context" + "time" + + "github.com/scylladb/go-log" + "github.com/scylladb/scylla-manager/v3/pkg/scheduler" +) + +// Listener instantiated with Key type. +type Listener = scheduler.Listener[Key] + +type schedulerListener struct { + Listener + find func(key Key) (taskInfo, bool) + logger log.Logger +} + +func newSchedulerListener(find func(key Key) (taskInfo, bool), logger log.Logger) schedulerListener { + return schedulerListener{ + Listener: scheduler.ErrorLogListener[Key](logger), + find: find, + logger: logger, + } +} + +func (l schedulerListener) OnSchedule(ctx context.Context, key Key, begin, end time.Time, retno int8) { + in := begin.Sub(now()).Truncate(time.Second) + if end.IsZero() { + l.logKey(ctx, key, "Schedule", + "in", in, + "begin", begin, + "retry", retno, + ) + } else { + l.logKey(ctx, key, "Schedule in window", + "in", in, + "begin", begin, + "end", end, + "retry", retno, + ) + } +} + +func (l schedulerListener) OnUnschedule(ctx context.Context, key Key) { + l.logKey(ctx, key, "Unschedule") +} + +func (l schedulerListener) Trigger(ctx context.Context, key Key, success bool) { + l.logKey(ctx, key, "Trigger", "success", success) +} + +func (l schedulerListener) OnStop(ctx context.Context, key Key) { + l.logKey(ctx, key, "Stop") +} + +func (l schedulerListener) OnRetryBackoff(ctx context.Context, key Key, backoff time.Duration, retno int8) { + l.logKey(ctx, key, "Retry backoff", "backoff", backoff, "retry", retno) +} + +func (l schedulerListener) OnNoTrigger(ctx context.Context, key Key) { + l.logKey(ctx, key, "No trigger") +} + +func (l schedulerListener) OnSleep(ctx context.Context, key Key, d time.Duration) { + l.logger.Debug(ctx, "OnSleep", "task_id", key, "duration", d) +} + +func (l schedulerListener) logKey(ctx context.Context, key Key, msg string, keyvals ...interface{}) { + ti, ok := l.find(key) + if !ok { + return + } + if ti.TaskType == HealthCheckTask { + l.logger.Debug(ctx, msg, prependTaskInfo(ti, keyvals)...) + } else { + l.logger.Info(ctx, msg, prependTaskInfo(ti, keyvals)...) + } +} + +func prependTaskInfo(ti taskInfo, i []interface{}) []interface{} { + v := make([]interface{}, len(i)+2) + v[0], v[1] = "task", ti + copy(v[2:], i) + return v +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/model.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/model.go new file mode 100644 index 00000000000..c33cdc9d32f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/model.go @@ -0,0 +1,462 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/gocql/gocql" + "github.com/pkg/errors" + "github.com/scylladb/gocqlx/v2" + "github.com/scylladb/scylla-manager/v3/pkg/scheduler" + "github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger" + "github.com/scylladb/scylla-manager/v3/pkg/service" + "github.com/scylladb/scylla-manager/v3/pkg/util/duration" + "github.com/scylladb/scylla-manager/v3/pkg/util/retry" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" + "go.uber.org/multierr" +) + +// TaskType specifies the type of Task. +type TaskType string + +// TaskType enumeration. +const ( + UnknownTask TaskType = "unknown" + BackupTask TaskType = "backup" + RestoreTask TaskType = "restore" + HealthCheckTask TaskType = "healthcheck" + RepairTask TaskType = "repair" + SuspendTask TaskType = "suspend" + ValidateBackupTask TaskType = "validate_backup" + + mockTask TaskType = "mock" +) + +func (t TaskType) String() string { + return string(t) +} + +func (t TaskType) MarshalText() (text []byte, err error) { + return []byte(t.String()), nil +} + +func (t *TaskType) UnmarshalText(text []byte) error { + switch TaskType(text) { + case UnknownTask: + *t = UnknownTask + case BackupTask: + *t = BackupTask + case RestoreTask: + *t = RestoreTask + case HealthCheckTask: + *t = HealthCheckTask + case RepairTask: + *t = RepairTask + case SuspendTask: + *t = SuspendTask + case ValidateBackupTask: + *t = ValidateBackupTask + case mockTask: + *t = mockTask + default: + return fmt.Errorf("unrecognized TaskType %q", text) + } + return nil +} + +// Cron implements a trigger based on cron expression. +// It supports the extended syntax including @monthly, @weekly, @daily, @midnight, @hourly, @every . +type Cron struct { + CronSpecification + inner scheduler.Trigger +} + +// CronSpecification combines specification for cron together with the start dates +// that defines the moment when the cron is being started. +type CronSpecification struct { + Spec string `json:"spec"` + StartDate time.Time `json:"start_date"` +} + +func NewCron(spec string, startDate time.Time) (Cron, error) { + t, err := trigger.NewCron(spec) + if err != nil { + return Cron{}, err + } + + return Cron{ + CronSpecification: CronSpecification{ + Spec: spec, + StartDate: startDate, + }, + inner: t, + }, nil +} + +func NewCronEvery(d time.Duration, startDate time.Time) Cron { + c, _ := NewCron("@every "+d.String(), startDate) // nolint: errcheck + return c +} + +// MustCron calls NewCron and panics on error. +func MustCron(spec string, startDate time.Time) Cron { + c, err := NewCron(spec, startDate) + if err != nil { + panic(err) + } + return c +} + +// Next implements scheduler.Trigger. +func (c Cron) Next(now time.Time) time.Time { + if c.inner == nil { + return time.Time{} + } + if c.StartDate.After(now) { + return c.inner.Next(c.StartDate) + } + return c.inner.Next(now) +} + +func (c Cron) MarshalText() (text []byte, err error) { + bytes, err := json.Marshal(c.CronSpecification) + if err != nil { + return nil, errors.Wrapf(err, "cannot json marshal {%v}", c.CronSpecification) + } + return bytes, nil +} + +func (c *Cron) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + + var cronSpec CronSpecification + err := json.Unmarshal(text, &cronSpec) + if err != nil { + // fallback to the < 3.2.6 approach where cron was not coupled with start date + cronSpec = CronSpecification{ + Spec: string(text), + } + } + + if cronSpec.Spec == "" { + return nil + } + v, err2 := NewCron(cronSpec.Spec, cronSpec.StartDate) + if err2 != nil { + return errors.Wrap(multierr.Combine(err, err2), "cron") + } + + *c = v + return nil +} + +func (c Cron) MarshalCQL(info gocql.TypeInfo) ([]byte, error) { + if i := info.Type(); i != gocql.TypeText && i != gocql.TypeVarchar { + return nil, errors.Errorf("invalid gocql type %s expected %s", info.Type(), gocql.TypeText) + } + return c.MarshalText() +} + +func (c *Cron) UnmarshalCQL(info gocql.TypeInfo, data []byte) error { + if i := info.Type(); i != gocql.TypeText && i != gocql.TypeVarchar { + return errors.Errorf("invalid gocql type %s expected %s", info.Type(), gocql.TypeText) + } + return c.UnmarshalText(data) +} + +func (c Cron) IsZero() bool { + return c.inner == nil +} + +// WeekdayTime adds CQL capabilities to scheduler.WeekdayTime. +type WeekdayTime struct { + scheduler.WeekdayTime +} + +func (w WeekdayTime) MarshalCQL(info gocql.TypeInfo) ([]byte, error) { + if i := info.Type(); i != gocql.TypeText && i != gocql.TypeVarchar { + return nil, errors.Errorf("invalid gocql type %s expected %s", info.Type(), gocql.TypeText) + } + return w.MarshalText() +} + +func (w *WeekdayTime) UnmarshalCQL(info gocql.TypeInfo, data []byte) error { + if i := info.Type(); i != gocql.TypeText && i != gocql.TypeVarchar { + return errors.Errorf("invalid gocql type %s expected %s", info.Type(), gocql.TypeText) + } + return w.UnmarshalText(data) +} + +// Window adds JSON validation to scheduler.Window. +type Window []WeekdayTime + +func (w *Window) UnmarshalJSON(data []byte) error { + var wdt []scheduler.WeekdayTime + if err := json.Unmarshal(data, &wdt); err != nil { + return errors.Wrap(err, "window") + } + if len(wdt) == 0 { + return nil + } + + if _, err := scheduler.NewWindow(wdt...); err != nil { + return errors.Wrap(err, "window") + } + + s := make([]WeekdayTime, len(wdt)) + for i := range wdt { + s[i] = WeekdayTime{wdt[i]} + } + *w = s + + return nil +} + +// Window returns this window as scheduler.Window. +func (w Window) Window() scheduler.Window { + if len(w) == 0 { + return nil + } + wdt := make([]scheduler.WeekdayTime, len(w)) + for i := range w { + wdt[i] = w[i].WeekdayTime + } + sw, _ := scheduler.NewWindow(wdt...) // nolint: errcheck + return sw +} + +// location adds CQL capabilities and validation to time.Location. +type location struct { + *time.Location +} + +func (l location) MarshalText() (text []byte, err error) { + if l.Location == nil { + return nil, nil + } + return []byte(l.Location.String()), nil +} + +func (l *location) UnmarshalText(text []byte) error { + if len(text) == 0 { + return nil + } + t, err := time.LoadLocation(string(text)) + if err != nil { + return err + } + l.Location = t + return nil +} + +func (l location) MarshalCQL(info gocql.TypeInfo) ([]byte, error) { + if i := info.Type(); i != gocql.TypeText && i != gocql.TypeVarchar { + return nil, errors.Errorf("invalid gocql type %s expected %s", info.Type(), gocql.TypeText) + } + return l.MarshalText() +} + +func (l *location) UnmarshalCQL(info gocql.TypeInfo, data []byte) error { + if i := info.Type(); i != gocql.TypeText && i != gocql.TypeVarchar { + return errors.Errorf("invalid gocql type %s expected %s", info.Type(), gocql.TypeText) + } + return l.UnmarshalText(data) +} + +// Timezone adds JSON validation to time.Location. +type Timezone struct { + location +} + +func NewTimezone(tz *time.Location) Timezone { + return Timezone{location{tz}} +} + +func (tz *Timezone) UnmarshalJSON(data []byte) error { + return errors.Wrap(json.Unmarshal(data, &tz.location), "timezone") +} + +// Location returns this timezone as time.Location pointer. +func (tz Timezone) Location() *time.Location { + return tz.location.Location +} + +// Schedule specify task schedule. +type Schedule struct { + gocqlx.UDT `json:"-"` + + Cron Cron `json:"cron"` + Window Window `json:"window"` + Timezone Timezone `json:"timezone"` + StartDate time.Time `json:"start_date"` + // Deprecated: use cron instead + Interval duration.Duration `json:"interval" db:"interval_seconds"` + NumRetries int `json:"num_retries"` + RetryWait duration.Duration `json:"retry_wait"` +} + +func (s Schedule) trigger() scheduler.Trigger { + if !s.Cron.IsZero() { + return s.Cron + } + return trigger.NewLegacy(s.StartDate, s.Interval.Duration()) +} + +func (s Schedule) backoff() retry.Backoff { + if s.NumRetries == 0 { + return nil + } + w := s.RetryWait + if w == 0 { + w = duration.Duration(10 * time.Minute) + } + + b := retry.NewExponentialBackoff(w.Duration(), 0, 3*time.Hour, 2, 0) + b = retry.WithMaxRetries(b, uint64(s.NumRetries)) + return b +} + +// Task specify task type, properties and schedule. +type Task struct { + ClusterID uuid.UUID `json:"cluster_id"` + Type TaskType `json:"type"` + ID uuid.UUID `json:"id"` + Name string `json:"name"` + Tags []string `json:"tags,omitempty"` + Enabled bool `json:"enabled,omitempty"` + Deleted bool `json:"deleted,omitempty"` + Sched Schedule `json:"schedule,omitempty"` + Properties json.RawMessage `json:"properties,omitempty"` + + Status Status `json:"status"` + SuccessCount int `json:"success_count"` + ErrorCount int `json:"error_count"` + LastSuccess *time.Time `json:"last_success"` + LastError *time.Time `json:"last_error"` +} + +func (t *Task) String() string { + return fmt.Sprintf("%s/%s", t.Type, t.ID) +} + +func (t *Task) Validate() error { + if t == nil { + return service.ErrNilPtr + } + + var errs error + if t.ID == uuid.Nil { + errs = multierr.Append(errs, errors.New("missing ID")) + } + if t.ClusterID == uuid.Nil { + errs = multierr.Append(errs, errors.New("missing ClusterID")) + } + if _, e := uuid.Parse(t.Name); e == nil { + errs = multierr.Append(errs, errors.New("name cannot be an UUID")) + } + switch t.Type { + case "", UnknownTask: + errs = multierr.Append(errs, errors.New("no TaskType specified")) + default: + var tp TaskType + errs = multierr.Append(errs, tp.UnmarshalText([]byte(t.Type))) + } + + return service.ErrValidate(errors.Wrap(errs, "invalid task")) +} + +// Status specifies the status of a Task. +type Status string + +// Status enumeration. +const ( + StatusNew Status = "NEW" + StatusRunning Status = "RUNNING" + StatusStopping Status = "STOPPING" + StatusStopped Status = "STOPPED" + StatusWaiting Status = "WAITING" + StatusDone Status = "DONE" + StatusError Status = "ERROR" + StatusAborted Status = "ABORTED" +) + +var allStatuses = []Status{ + StatusNew, + StatusRunning, + StatusStopping, + StatusStopped, + StatusWaiting, + StatusDone, + StatusError, + StatusAborted, +} + +func (s Status) String() string { + return string(s) +} + +func (s Status) MarshalText() (text []byte, err error) { + return []byte(s.String()), nil +} + +func (s *Status) UnmarshalText(text []byte) error { + switch Status(text) { + case StatusNew: + *s = StatusNew + case StatusRunning: + *s = StatusRunning + case StatusStopping: + *s = StatusStopping + case StatusStopped: + *s = StatusStopped + case StatusWaiting: + *s = StatusWaiting + case StatusDone: + *s = StatusDone + case StatusError: + *s = StatusError + case StatusAborted: + *s = StatusAborted + default: + return fmt.Errorf("unrecognized Status %q", text) + } + return nil +} + +var healthCheckActiveRunID = uuid.NewFromTime(time.Unix(0, 0)) + +// Run describes a running instance of a Task. +type Run struct { + ClusterID uuid.UUID `json:"cluster_id"` + Type TaskType `json:"type"` + TaskID uuid.UUID `json:"task_id"` + ID uuid.UUID `json:"id"` + Status Status `json:"status"` + Cause string `json:"cause,omitempty"` + Owner string `json:"owner"` + StartTime time.Time `json:"start_time"` + EndTime *time.Time `json:"end_time,omitempty"` +} + +func newRunFromTaskInfo(ti taskInfo) *Run { + var id uuid.UUID + if ti.TaskType == HealthCheckTask { + id = healthCheckActiveRunID + } else { + id = uuid.NewTime() + } + + return &Run{ + ClusterID: ti.ClusterID, + Type: ti.TaskType, + TaskID: ti.TaskID, + ID: id, + StartTime: now(), + Status: StatusRunning, + } +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/now.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/now.go new file mode 100644 index 00000000000..de24bac5f07 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/now.go @@ -0,0 +1,7 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import "github.com/scylladb/scylla-manager/v3/pkg/util/timeutc" + +var now = timeutc.Now diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/policy.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/policy.go new file mode 100644 index 00000000000..4dca9b5b9ee --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/policy.go @@ -0,0 +1,67 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "context" + "encoding/json" + "sync" + + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +// Policy decides if given task can be run. +type Policy interface { + PreRun(clusterID, taskID, runID uuid.UUID) error + PostRun(clusterID, taskID, runID uuid.UUID) +} + +// PolicyRunner is a runner that uses policy to check if a task can be run. +type PolicyRunner struct { + Policy Policy + Runner Runner +} + +// Run implements Runner. +func (pr PolicyRunner) Run(ctx context.Context, clusterID, taskID, runID uuid.UUID, properties json.RawMessage) error { + if err := pr.Policy.PreRun(clusterID, taskID, runID); err != nil { + return err + } + defer pr.Policy.PostRun(clusterID, taskID, runID) + return pr.Runner.Run(ctx, clusterID, taskID, runID, properties) +} + +var errClusterBusy = errors.New("another task is running") + +// LockClusterPolicy is a policy that can execute only one task at a time. +type LockClusterPolicy struct { + mu sync.Mutex + busy map[uuid.UUID]struct{} +} + +func NewLockClusterPolicy() *LockClusterPolicy { + return &LockClusterPolicy{ + busy: make(map[uuid.UUID]struct{}), + } +} + +// PreRun implements Policy. +func (p *LockClusterPolicy) PreRun(clusterID, _, _ uuid.UUID) error { + p.mu.Lock() + defer p.mu.Unlock() + + if _, ok := p.busy[clusterID]; ok { + return errClusterBusy + } + + p.busy[clusterID] = struct{}{} + return nil +} + +// PostRun implements Policy. +func (p *LockClusterPolicy) PostRun(clusterID, _, _ uuid.UUID) { + p.mu.Lock() + defer p.mu.Unlock() + delete(p.busy, clusterID) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/resolver.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/resolver.go new file mode 100644 index 00000000000..42eb98ecca6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/resolver.go @@ -0,0 +1,81 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "fmt" + + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +type taskInfo struct { + ClusterID uuid.UUID + TaskType TaskType + TaskID uuid.UUID + TaskName string +} + +func newTaskInfoFromTask(t *Task) taskInfo { + return taskInfo{ + ClusterID: t.ClusterID, + TaskType: t.Type, + TaskID: t.ID, + TaskName: t.Name, + } +} + +func (ti taskInfo) idKey() taskInfo { + ti.TaskID = uuid.Nil + return ti +} + +func (ti taskInfo) String() string { + return fmt.Sprintf("%s/%s", ti.TaskType, ti.TaskID) +} + +type resolver struct { + taskInfo map[uuid.UUID]taskInfo + id map[taskInfo]uuid.UUID +} + +func newResolver() resolver { + return resolver{ + taskInfo: make(map[uuid.UUID]taskInfo), + id: make(map[taskInfo]uuid.UUID), + } +} + +func (r resolver) Put(ti taskInfo) { + old := r.taskInfo[ti.TaskID] + delete(r.id, old.idKey()) + + r.taskInfo[ti.TaskID] = ti + if ti.TaskName != "" { + r.id[ti.idKey()] = ti.TaskID + } +} + +func (r resolver) Remove(taskID uuid.UUID) { + ti, ok := r.taskInfo[taskID] + if !ok { + return + } + delete(r.taskInfo, taskID) + delete(r.id, ti.idKey()) +} + +func (r resolver) FindByID(taskID uuid.UUID) (taskInfo, bool) { + ti, ok := r.taskInfo[taskID] + return ti, ok +} + +func (r resolver) FillTaskID(ti *taskInfo) bool { + v := *ti + v.TaskID = uuid.Nil + + taskID, ok := r.id[v.idKey()] + if ok { + ti.TaskID = taskID + } + return ok +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/runner.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/runner.go new file mode 100644 index 00000000000..a258d2ec0dc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/runner.go @@ -0,0 +1,19 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "context" + "encoding/json" + + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +// Runner is a glue between scheduler and agents doing work. +// There can be one Runner per TaskType registered in Service. +// Run ID needs to be preserved whenever agent wants to persist the running state. +// The run can be stopped by cancelling the context. +// In that case runner must return error reported by the context. +type Runner interface { + Run(ctx context.Context, clusterID, taskID, runID uuid.UUID, properties json.RawMessage) error +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/scylla-manager-scheduler.drawio b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/scylla-manager-scheduler.drawio new file mode 100644 index 00000000000..d2d3a8c0de8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/scylla-manager-scheduler.drawio @@ -0,0 +1,217 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/scylla-manager-scheduler.drawio.svg b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/scylla-manager-scheduler.drawio.svg new file mode 100644 index 00000000000..e53056dbbe6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/scylla-manager-scheduler.drawio.svg @@ -0,0 +1,4 @@ + + + +
sctool CLI / manager API client
sctool CLI / manager AP...
manager REST API server
manager REST API ser...
DB
DB
Scheduler SVC
Scheduler SVC
SVC (backup, restore, repair, healthcheck)
SVC (backup, restore...
POST
 /api/v1/cluster/{ID}/tasks
POST...
new task
new task
validate & save to
scheduler_task
validate &...
task id
task id
start scheduled task
start scheduled task
update the task progress 
run_progress
update the...
update the task progress 
run_progress
update the...
DONE update the task progress 
run_progress
DONE updat...
GET
/api/v1/cluster/{ID}/task/{type}/{task_ID}/{run_ID}
GET...
query run_progress
query run_...
GET
/api/v1/cluster/{ID}/task/{type}/{task_ID}/{run_ID}
GET...
query run_progress
query run_...
Schedule the backup task
Schedule the backup task
Check the progress
Check the progress
Text is not SVG - cannot display
\ No newline at end of file diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/service.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/service.go new file mode 100644 index 00000000000..e572d9a319f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/service.go @@ -0,0 +1,680 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "context" + "encoding/json" + "sync" + "time" + "unsafe" + + "github.com/pkg/errors" + "github.com/scylladb/go-log" + "github.com/scylladb/go-set/b16set" + "github.com/scylladb/gocqlx/v2" + "github.com/scylladb/gocqlx/v2/qb" + "github.com/scylladb/scylla-manager/v3/pkg/metrics" + "github.com/scylladb/scylla-manager/v3/pkg/scheduler" + "github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger" + "github.com/scylladb/scylla-manager/v3/pkg/schema/table" + "github.com/scylladb/scylla-manager/v3/pkg/service" + "github.com/scylladb/scylla-manager/v3/pkg/store" + "github.com/scylladb/scylla-manager/v3/pkg/util/jsonutil" + "github.com/scylladb/scylla-manager/v3/pkg/util/pointer" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +type ( + // Key is unique identifier of a task in scheduler. + Key = uuid.UUID + + // Properties are defined task parameters. + // They are JSON encoded. + Properties = json.RawMessage + + // Scheduler instantiated with Key type. + Scheduler = scheduler.Scheduler[Key] + + // RunContext instantiated with Key type. + RunContext = scheduler.RunContext[Key] +) + +// PropertiesDecorator modifies task properties before running. +type PropertiesDecorator func(ctx context.Context, clusterID, taskID uuid.UUID, properties json.RawMessage) (json.RawMessage, error) + +type Service struct { + session gocqlx.Session + metrics metrics.SchedulerMetrics + drawer store.Store + logger log.Logger + + decorators map[TaskType]PropertiesDecorator + runners map[TaskType]Runner + runs map[uuid.UUID]Run + resolver resolver + scheduler map[uuid.UUID]*Scheduler + suspended *b16set.Set + noContinue map[uuid.UUID]time.Time + closed bool + mu sync.Mutex +} + +func NewService(session gocqlx.Session, metrics metrics.SchedulerMetrics, drawer store.Store, logger log.Logger) (*Service, error) { + s := &Service{ + session: session, + metrics: metrics, + drawer: drawer, + logger: logger, + + decorators: make(map[TaskType]PropertiesDecorator), + runners: make(map[TaskType]Runner), + runs: make(map[uuid.UUID]Run), + resolver: newResolver(), + scheduler: make(map[uuid.UUID]*Scheduler), + suspended: b16set.New(), + noContinue: make(map[uuid.UUID]time.Time), + } + s.runners[SuspendTask] = suspendRunner{service: s} + + if err := s.initSuspended(); err != nil { + return nil, errors.Wrap(err, "init suspended") + } + + return s, nil +} + +// SetPropertiesDecorator sets optional decorator of properties for a given +// task type. +func (s *Service) SetPropertiesDecorator(tp TaskType, d PropertiesDecorator) { + s.mu.Lock() + s.decorators[tp] = d + s.mu.Unlock() +} + +// PropertiesDecorator returns the PropertiesDecorator for a task type. +func (s *Service) PropertiesDecorator(tp TaskType) PropertiesDecorator { + s.mu.Lock() + defer s.mu.Unlock() + return s.decorators[tp] +} + +// SetRunner assigns runner for a given task type. +// All runners need to be registered prior to running the service. +// The registration is separated from constructor to loosen coupling between services. +func (s *Service) SetRunner(tp TaskType, r Runner) { + s.mu.Lock() + s.runners[tp] = r + s.mu.Unlock() +} + +func (s *Service) mustRunner(tp TaskType) Runner { + s.mu.Lock() + r, ok := s.runners[tp] + s.mu.Unlock() + + if !ok { + panic("no runner") + } + return r +} + +// LoadTasks should be called on start it loads and schedules task from database. +func (s *Service) LoadTasks(ctx context.Context) error { + s.logger.Info(ctx, "Loading tasks from database") + + endTime := now() + err := s.forEachTask(func(t *Task) error { + s.initMetrics(t) + r, err := s.markRunningAsAborted(t, endTime) + if err != nil { + return errors.Wrap(err, "fix last run status") + } + if needsOneShotRun(t) { + r = true + } + s.schedule(ctx, t, r) + return nil + }) + if err != nil { + s.logger.Info(ctx, "Failed to load task from database") + } else { + s.logger.Info(ctx, "All tasks scheduled") + } + return err +} + +func (s *Service) forEachTask(f func(t *Task) error) error { + q := qb.Select(table.SchedulerTask.Name()).Query(s.session) + defer q.Release() + return forEachTaskWithQuery(q, f) +} + +func (s *Service) markRunningAsAborted(t *Task, endTime time.Time) (bool, error) { + r, err := s.getLastRun(t) + if err != nil { + if errors.Is(err, service.ErrNotFound) { + return false, nil + } + return false, err + } + + if r.Status == StatusAborted { + return true, nil + } + if r.Status == StatusRunning { + r.Status = StatusAborted + r.Cause = "service stopped" + r.EndTime = &endTime + return true, s.putRunAndUpdateTask(r) + } + + return false, nil +} + +func (s *Service) getLastRun(t *Task) (*Run, error) { + q := s.getLastRunQuery(t, 1) + var run Run + return &run, q.GetRelease(&run) +} + +func (s *Service) getLastRunQuery(t *Task, n int, columns ...string) *gocqlx.Queryx { + return table.SchedulerTaskRun.SelectBuilder(columns...). + Limit(uint(n)). + Query(s.session). + BindMap(qb.M{ + "cluster_id": t.ClusterID, + "type": t.Type, + "task_id": t.ID, + }) +} + +// GetLastRuns returns n last runs of a task. +func (s *Service) GetLastRuns(ctx context.Context, t *Task, n int) ([]*Run, error) { + s.logger.Debug(ctx, "GetLastRuns", "task", t, "n", n) + q := s.getLastRunQuery(t, n) + var runs []*Run + return runs, q.SelectRelease(&runs) +} + +// GetNthLastRun returns the n-th last task run, 0 is the last run, 1 is one run before that. +func (s *Service) GetNthLastRun(ctx context.Context, t *Task, n int) (*Run, error) { + s.logger.Debug(ctx, "GetNthLastRun", "task", t, "n", n) + + if n < 0 { + return nil, errors.New("index out of bounds") + } + if n == 0 { + return s.getLastRun(t) + } + + runID, err := s.nthRunID(t, n) + if err != nil { + return nil, err + } + return s.GetRun(ctx, t, runID) +} + +func (s *Service) nthRunID(t *Task, n int) (uuid.UUID, error) { + q := s.getLastRunQuery(t, n+1, "id") + defer q.Release() + + var ( + id uuid.UUID + i int + ) + iter := q.Iter() + for iter.Scan(&id) { + if i == n { + return id, iter.Close() + } + i++ + } + if err := iter.Close(); err != nil { + return uuid.Nil, err + } + + return uuid.Nil, service.ErrNotFound +} + +// GetRun returns a run based on ID. If nothing was found ErrNotFound is returned. +func (s *Service) GetRun(ctx context.Context, t *Task, runID uuid.UUID) (*Run, error) { + s.logger.Debug(ctx, "GetRun", "task", t, "run_id", runID) + + if err := t.Validate(); err != nil { + return nil, err + } + + r := &Run{ + ClusterID: t.ClusterID, + Type: t.Type, + TaskID: t.ID, + ID: runID, + } + q := table.SchedulerTaskRun.GetQuery(s.session).BindStruct(r) + return r, q.GetRelease(r) +} + +// PutTask upserts a task. +func (s *Service) PutTask(ctx context.Context, t *Task) error { + create := false + + if t != nil && t.ID == uuid.Nil { + id, err := uuid.NewRandom() + if err != nil { + return errors.Wrap(err, "couldn't generate random UUID for task") + } + t.ID = id + t.Status = StatusNew + create = true + } + s.logger.Info(ctx, "PutTask", "task", t, "schedule", t.Sched, "properties", t.Properties, "create", create) + + if err := t.Validate(); err != nil { + return err + } + if err := s.shouldPutTask(create, t); err != nil { + return err + } + + if create { // nolint: nestif + // Force run if there is no start date and cron. + // Note that tasks with '--start-date now' have StartDate set to zero value. + run := false + if t.Sched.StartDate.IsZero() { + t.Sched.StartDate = now() + if t.Sched.Cron.IsZero() { + run = true + } + } else if t.Sched.StartDate.Before(now()) && t.Sched.Interval != 0 { + return errors.New("start date of scheduled task cannot be in the past") + } + + if err := table.SchedulerTask.InsertQuery(s.session).BindStruct(t).ExecRelease(); err != nil { + return err + } + s.initMetrics(t) + + s.schedule(ctx, t, run) + } else { + if err := table.SchedulerTaskUpdate.InsertQuery(s.session).BindStruct(t).ExecRelease(); err != nil { + return err + } + s.schedule(ctx, t, false) + } + + return nil +} + +func (s *Service) shouldPutTask(create bool, t *Task) error { + s.mu.Lock() + defer s.mu.Unlock() + + if create && s.isSuspendedLocked(t.ClusterID) { + return service.ErrValidate(errors.New("cluster is suspended, scheduling tasks is not allowed")) + } + + if t.Name != "" { + ti := newTaskInfoFromTask(t) + if s.resolver.FillTaskID(&ti) && ti.TaskID != t.ID { + return errors.Errorf("task name %s is already used", t.Name) + } + } + + return nil +} + +func (s *Service) initMetrics(t *Task) { + s.metrics.Init(t.ClusterID, t.Type.String(), t.ID, *(*[]string)(unsafe.Pointer(&allStatuses))...) +} + +func (s *Service) schedule(ctx context.Context, t *Task, run bool) { + s.mu.Lock() + if s.isSuspendedLocked(t.ClusterID) && t.Type != HealthCheckTask && t.Type != SuspendTask { + s.mu.Unlock() + return + } + + s.resolver.Put(newTaskInfoFromTask(t)) + l, lok := s.scheduler[t.ClusterID] + if !lok { + l = s.newScheduler(t.ClusterID) + s.scheduler[t.ClusterID] = l + } + s.mu.Unlock() + + if t.Enabled { + d := details(t) + if run { + d.Trigger = trigger.NewMulti(trigger.NewOnce(), d.Trigger) + } + l.Schedule(ctx, t.ID, d) + } else { + l.Unschedule(ctx, t.ID) + } +} + +func (s *Service) newScheduler(clusterID uuid.UUID) *Scheduler { + l := scheduler.NewScheduler[Key](now, s.run, newSchedulerListener(s.findTaskByID, s.logger.Named(clusterID.String()[0:8]))) + go l.Start(context.Background()) + return l +} + +const noContinueThreshold = 500 * time.Millisecond + +func (s *Service) run(ctx RunContext) (runErr error) { + s.mu.Lock() + ti, ok := s.resolver.FindByID(ctx.Key) + c, cok := s.noContinue[ti.TaskID] + if cok { + delete(s.noContinue, ti.TaskID) + } + d := s.decorators[ti.TaskType] + r := newRunFromTaskInfo(ti) + s.runs[ti.TaskID] = *r + s.mu.Unlock() + + defer func() { + s.mu.Lock() + delete(s.runs, ti.TaskID) + s.mu.Unlock() + }() + + runCtx := log.WithTraceID(ctx) + logger := s.logger.Named(ti.ClusterID.String()[0:8]) + + if ti.TaskType != HealthCheckTask { + logger.Info(runCtx, "Run started", + "task", ti, + "retry", ctx.Retry, + ) + defer func() { + if r.Status == StatusError { + logger.Info(runCtx, "Run ended with ERROR", + "task", ti, + "status", r.Status, + "cause", r.Cause, + "duration", r.EndTime.Sub(r.StartTime), + ) + } else { + logger.Info(runCtx, "Run ended", + "task", ti, + "status", r.Status, + "duration", r.EndTime.Sub(r.StartTime), + ) + } + }() + } + + if !ok { + return service.ErrNotFound + } + if err := s.putRunAndUpdateTask(r); err != nil { + return errors.Wrap(err, "put run") + } + s.metrics.BeginRun(ti.ClusterID, ti.TaskType.String(), ti.TaskID) + + defer func() { + r.Status = statusFromError(runErr) + if r.Status == StatusError { + r.Cause = runErr.Error() + } + if r.Status == StatusStopped && s.isClosed() { + r.Status = StatusAborted + } + r.EndTime = pointer.TimePtr(now()) + + if ti.TaskType == HealthCheckTask { + if r.Status != StatusDone { + r.ID = uuid.NewTime() + } + } + if err := s.putRunAndUpdateTask(r); err != nil { + logger.Error(runCtx, "Cannot update the run", "task", ti, "run", r, "error", err) + } + s.metrics.EndRun(ti.ClusterID, ti.TaskType.String(), ti.TaskID, r.Status.String(), r.StartTime.Unix()) + }() + + if ctx.Properties.(Properties) == nil { + ctx.Properties = json.RawMessage("{}") + } + if ctx.Retry == 0 && now().Sub(c) < noContinueThreshold { + ctx.Properties = jsonutil.Set(ctx.Properties.(Properties), "continue", false) + } + if d != nil { + p, err := d(runCtx, ti.ClusterID, ti.TaskID, ctx.Properties.(Properties)) + if err != nil { + return errors.Wrap(err, "decorate properties") + } + ctx.Properties = p + } + return s.mustRunner(ti.TaskType).Run(runCtx, ti.ClusterID, ti.TaskID, r.ID, ctx.Properties.(Properties)) +} + +func (s *Service) putRunAndUpdateTask(r *Run) error { + if err := s.putRun(r); err != nil { + return err + } + return s.updateTaskWithRun(r) +} + +func (s *Service) putRun(r *Run) error { + return table.SchedulerTaskRun.InsertQuery(s.session).BindStruct(r).ExecRelease() +} + +func (s *Service) updateTaskWithRun(r *Run) error { + t := Task{ + ClusterID: r.ClusterID, + Type: r.Type, + ID: r.TaskID, + Status: r.Status, + } + b := table.SchedulerTask.UpdateBuilder("status") + + var u *gocqlx.Queryx + switch r.Status { + case StatusDone: + q := table.SchedulerTask.GetQuery(s.session, "success_count").BindStruct(&t) + if err := q.GetRelease(&t); err != nil { + return err + } + + u = b.Set("success_count", "last_success").Query(s.session) + t.SuccessCount++ + t.LastSuccess = r.EndTime + case StatusError: + q := table.SchedulerTask.GetQuery(s.session, "error_count").BindStruct(&t) + if err := q.GetRelease(&t); err != nil { + return err + } + + u = b.Set("error_count", "last_error").Query(s.session) + t.ErrorCount++ + t.LastError = r.EndTime + default: + u = b.Query(s.session) + } + + return u.BindStruct(&t).ExecRelease() +} + +func statusFromError(err error) Status { + switch { + case err == nil: + return StatusDone + case errors.Is(err, context.Canceled): + return StatusStopped + case errors.Is(err, context.DeadlineExceeded): + return StatusWaiting + default: + return StatusError + } +} + +// GetTaskByID returns a task based on ID and type. If nothing was found +// scylla-manager.ErrNotFound is returned. +func (s *Service) GetTaskByID(ctx context.Context, clusterID uuid.UUID, tp TaskType, id uuid.UUID) (*Task, error) { + s.logger.Debug(ctx, "GetTaskByID", "cluster_id", clusterID, "id", id) + t := &Task{ + ClusterID: clusterID, + Type: tp, + ID: id, + } + q := table.SchedulerTask.GetQuery(s.session).BindStruct(t) + return t, q.GetRelease(t) +} + +func (s *Service) findTaskByID(key Key) (taskInfo, bool) { + s.mu.Lock() + ti, ok := s.resolver.FindByID(key) + s.mu.Unlock() + return ti, ok +} + +// DeleteTask removes and stops task based on ID. +func (s *Service) DeleteTask(ctx context.Context, t *Task) error { + s.logger.Debug(ctx, "DeleteTask", "task", t) + + t.Deleted = true + t.Enabled = false + + // Remove the deleted task's name so that new tasks can use it + t.Name = "" + + q := table.SchedulerTask.UpdateQuery(s.session, "deleted", "enabled", "name").BindStruct(t) + + if err := q.ExecRelease(); err != nil { + return err + } + + s.mu.Lock() + l, lok := s.scheduler[t.ClusterID] + s.resolver.Remove(t.ID) + s.mu.Unlock() + if lok { + l.Unschedule(ctx, t.ID) + } + + s.logger.Info(ctx, "Task deleted", + "cluster_id", t.ClusterID, + "task_type", t.Type, + "task_id", t.ID, + ) + return nil +} + +// StartTask starts execution of a task immediately. +func (s *Service) StartTask(ctx context.Context, t *Task) error { + return s.startTask(ctx, t, false) +} + +// StartTaskNoContinue starts execution of a task immediately and adds the +// "no_continue" flag to properties of the next run. +// The possible retries would not have the flag enabled. +func (s *Service) StartTaskNoContinue(ctx context.Context, t *Task) error { + return s.startTask(ctx, t, true) +} + +func (s *Service) startTask(ctx context.Context, t *Task, noContinue bool) error { + s.logger.Debug(ctx, "StartTask", "task", t, "no_continue", noContinue) + + s.mu.Lock() + if s.isSuspendedLocked(t.ClusterID) { + s.mu.Unlock() + return service.ErrValidate(errors.New("cluster is suspended")) + } + l, lok := s.scheduler[t.ClusterID] + if !lok { + l = s.newScheduler(t.ClusterID) + s.scheduler[t.ClusterID] = l + } + if noContinue { + s.noContinue[t.ID] = now() + } + s.mu.Unlock() + + // For regular tasks trigger will be enough but for one shot or disabled + // tasks we need to reschedule them to run once. + if !l.Trigger(ctx, t.ID) { + d := details(t) + d.Trigger = trigger.NewOnce() + l.Schedule(ctx, t.ID, d) + } + return nil +} + +// StopTask stops task execution of immediately, task is rescheduled according +// to its run interval. +func (s *Service) StopTask(ctx context.Context, t *Task) error { + s.logger.Debug(ctx, "StopTask", "task", t) + + s.mu.Lock() + l, lok := s.scheduler[t.ClusterID] + r, rok := s.runs[t.ID] + s.mu.Unlock() + + if !lok || !rok { + return nil + } + + r.Status = StatusStopping + if err := s.updateRunStatus(&r); err != nil { + return err + } + l.Stop(ctx, t.ID) + + return nil +} + +func (s *Service) updateRunStatus(r *Run) error { + // Only update if running as there is a race between manually stopping + // a run and the run returning normally. + return table.SchedulerTaskRun. + UpdateBuilder("status"). + If(qb.EqNamed("status", "from_status")). + Query(s.session). + BindStructMap(r, qb.M{"from_status": StatusRunning}). + ExecRelease() +} + +func (s *Service) isClosed() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.closed +} + +// Close cancels all tasks and waits for them to terminate. +func (s *Service) Close() { + s.mu.Lock() + s.closed = true + v := make([]*Scheduler, 0, len(s.scheduler)) + for _, l := range s.scheduler { + v = append(v, l) + l.Close() + } + s.mu.Unlock() + + for _, l := range v { + l.Wait() + } +} + +func forEachTaskWithQuery(q *gocqlx.Queryx, f func(t *Task) error) error { + var t Task + iter := q.Iter() + for iter.StructScan(&t) { + if err := f(&t); err != nil { + iter.Close() + return err + } + t = Task{} + } + return iter.Close() +} + +func needsOneShotRun(t *Task) bool { + return t.Sched.Cron.IsZero() && + t.Sched.Interval == 0 && + t.Sched.Window != nil && + t.SuccessCount+t.ErrorCount == 0 +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/service_list.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/service_list.go new file mode 100644 index 00000000000..a8aeb8323d3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/service_list.go @@ -0,0 +1,123 @@ +// Copyright (C) 2017 ScyllaDB + +package scheduler + +import ( + "context" + "time" + + "github.com/scylladb/gocqlx/v2/qb" + "github.com/scylladb/scylla-manager/v3/pkg/schema/table" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +// TaskListItem decorates Task with information about task runs and scheduled +// activations. +type TaskListItem struct { + Task + Suspended bool `json:"suspended"` + NextActivation *time.Time `json:"next_activation"` + Retry int `json:"retry"` +} + +// ListFilter specifies filtering parameters to ListTasks. +type ListFilter struct { + TaskType []TaskType + Status []Status + Disabled bool + Deleted bool + Short bool + TaskID uuid.UUID +} + +// ListTasks returns cluster tasks given the filtering criteria. +func (s *Service) ListTasks(ctx context.Context, clusterID uuid.UUID, filter ListFilter) ([]*TaskListItem, error) { + s.logger.Debug(ctx, "ListTasks", "filter", filter) + + b := qb.Select(table.SchedulerTask.Name()) + b.Where(qb.Eq("cluster_id")) + m := qb.M{ + "cluster_id": clusterID, + } + if len(filter.TaskType) > 0 { + b.Where(qb.Eq("type")) + } + if !filter.Disabled { + b.Where(qb.EqLit("enabled", "true")) + b.AllowFiltering() + } + if !filter.Deleted { + b.Where(qb.EqLit("deleted", "false")) + b.AllowFiltering() + } + if filter.Short { + m := table.SchedulerTask.Metadata() + var cols []string + cols = append(cols, m.PartKey...) + cols = append(cols, m.SortKey...) + cols = append(cols, "name") + b.Columns(cols...) + } + if len(filter.Status) > 0 { + b.Where(qb.In("status")) + b.AllowFiltering() + m["status"] = filter.Status + } + if filter.TaskID != uuid.Nil { + b.Where(qb.EqLit("id", filter.TaskID.String())) + } + + q := b.Query(s.session) + defer q.Release() + + // This is workaround for the following error using IN keyword + // Cannot restrict clustering columns by IN relations when a collection is selected by the query + var tasks []*TaskListItem + if len(filter.TaskType) == 0 { + q.BindMap(m) + if err := q.Select(&tasks); err != nil { + return nil, err + } + } else { + for _, tt := range filter.TaskType { + m["type"] = tt + q.BindMap(m) + if err := q.Select(&tasks); err != nil { + return nil, err + } + } + } + + if filter.Short { + return tasks, nil + } + + s.decorateTaskListItems(clusterID, tasks) + + return tasks, nil +} + +func (s *Service) decorateTaskListItems(clusterID uuid.UUID, tasks []*TaskListItem) { + s.mu.Lock() + l, lok := s.scheduler[clusterID] + suspended := s.isSuspendedLocked(clusterID) + s.mu.Unlock() + + if !lok { + return + } + + keys := make([]uuid.UUID, len(tasks)) + for i := range tasks { + keys[i] = tasks[i].ID + } + a := l.Activations(keys...) + for i, t := range tasks { + if a[i].IsZero() { + t.Suspended = suspended + } else { + t.NextActivation = &a[i].Time + } + t.Retry = int(a[i].Retry) + } +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/service_suspend.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/service_suspend.go new file mode 100644 index 00000000000..39028f2e838 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/service/scheduler/service_suspend.go @@ -0,0 +1,303 @@ +// Copyright (C) 2022 ScyllaDB + +package scheduler + +import ( + "context" + "encoding/json" + "time" + + "github.com/pkg/errors" + "github.com/scylladb/go-log" + "github.com/scylladb/go-set/b16set" + "github.com/scylladb/gocqlx/v2/qb" + "github.com/scylladb/scylla-manager/v3/pkg/schema/table" + "github.com/scylladb/scylla-manager/v3/pkg/service" + "github.com/scylladb/scylla-manager/v3/pkg/store" + "github.com/scylladb/scylla-manager/v3/pkg/util/duration" + "github.com/scylladb/scylla-manager/v3/pkg/util/timeutc" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +type suspendInfo struct { + ClusterID uuid.UUID `json:"-"` + StartedAt time.Time `json:"started_at"` + PendingTasks []uuid.UUID `json:"pending_tasks"` + RunningTask []uuid.UUID `json:"running_tasks"` +} + +var _ store.Entry = &suspendInfo{} + +func (v *suspendInfo) Key() (clusterID uuid.UUID, key string) { + return v.ClusterID, "scheduler_suspended" +} + +func (v *suspendInfo) MarshalBinary() (data []byte, err error) { + return json.Marshal(v) +} + +func (v *suspendInfo) UnmarshalBinary(data []byte) error { + return json.Unmarshal(data, v) +} + +// SuspendProperties specify properties of Suspend task. +type SuspendProperties struct { + Resume bool `json:"resume"` + Duration duration.Duration `json:"duration"` + StartTasks bool `json:"start_tasks"` +} + +// GetSuspendProperties unmarshals suspend properties and validates them. +func GetSuspendProperties(data []byte) (SuspendProperties, error) { + properties := SuspendProperties{} + if err := json.Unmarshal(data, &properties); err != nil { + return properties, err + } + + if properties.StartTasks { + if properties.Duration == 0 { + return properties, errors.New("can't use startTasks without a duration") + } + } + + return properties, nil +} + +func (s *Service) initSuspended() error { + var clusters []uuid.UUID + if err := qb.Select(table.SchedulerTask.Name()).Distinct("cluster_id").Query(s.session).SelectRelease(&clusters); err != nil { + return errors.Wrap(err, "list clusters") + } + + for _, c := range clusters { + si := &suspendInfo{ClusterID: c} + if err := s.drawer.Get(si); err != nil { + if !errors.Is(err, service.ErrNotFound) { + return err + } + } else { + s.suspended.Add(c.Bytes16()) + s.metrics.Suspend(c) + } + } + + return nil +} + +// IsSuspended returns true iff cluster is suspended. +func (s *Service) IsSuspended(ctx context.Context, clusterID uuid.UUID) bool { + s.logger.Debug(ctx, "IsSuspended", "clusterID", clusterID) + s.mu.Lock() + defer s.mu.Unlock() + return s.isSuspendedLocked(clusterID) +} + +func (s *Service) isSuspendedLocked(clusterID uuid.UUID) bool { + return s.suspended.Has(clusterID.Bytes16()) +} + +// Suspend stops scheduler for a given cluster. +// Running tasks will be stopped. +// Scheduled task executions will be canceled. +// Scheduler can be later resumed, see `Resume` function. +func (s *Service) Suspend(ctx context.Context, clusterID uuid.UUID) error { + wait, err := s.suspend(ctx, clusterID, SuspendProperties{}) + if wait != nil { + wait() + } + return err +} + +func (s *Service) suspend(ctx context.Context, clusterID uuid.UUID, p SuspendProperties) (func(), error) { + if p.Duration > 0 { + s.logger.Info(ctx, "Suspending cluster", "cluster_id", clusterID, "target", p) + } else { + s.logger.Info(ctx, "Suspending cluster", "cluster_id", clusterID) + } + + si := &suspendInfo{ + ClusterID: clusterID, + StartedAt: timeutc.Now(), + } + + s.mu.Lock() + if s.isSuspendedLocked(clusterID) { + s.logger.Info(ctx, "Cluster already suspended", "cluster_id", clusterID) + s.mu.Unlock() + return nil, nil // nolint: nilnil + } + s.suspended.Add(clusterID.Bytes16()) + s.metrics.Suspend(clusterID) + l := s.resetSchedulerLocked(si) + s.mu.Unlock() + + if err := s.forEachClusterHealthCheckTask(clusterID, func(t *Task) error { + s.schedule(ctx, t, false) + return nil + }); err != nil { + return nil, errors.Wrap(err, "schedule") + } + + if p.Duration > 0 { + rt, err := newResumeTask(si, p) + if err != nil { + return nil, errors.Wrap(err, "new resume task") + } + if err := table.SchedulerTask.InsertQuery(s.session).BindStruct(rt).ExecRelease(); err != nil { + return nil, errors.Wrap(err, "put task") + } + s.schedule(ctx, rt, false) + } + + if err := s.drawer.Put(si); err != nil { + return nil, errors.Wrap(err, "save canceled tasks") + } + + var wait func() + if l != nil { + wait = l.Wait + } + return wait, nil +} + +// resetSchedulerLocked closes the current scheduler, records the information on running tasks, and creates a new empty scheduler. +// It returns the old closed scheduler. +func (s *Service) resetSchedulerLocked(si *suspendInfo) *Scheduler { + cid := si.ClusterID + l := s.scheduler[cid] + if l != nil { + si.RunningTask, si.PendingTasks = l.Close() + } + s.scheduler[cid] = s.newScheduler(cid) + return l +} + +// ResumeTaskID is a special task ID reserved for scheduled resume of suspended cluster. +// It can be reused for different suspend tasks at different times. +// Note that a suspended cluster cannot be suspended. +var ResumeTaskID = uuid.MustParse("805E43B0-2C0A-481E-BAB8-9C2418940D67") + +func newResumeTask(si *suspendInfo, p SuspendProperties) (*Task, error) { + p.Resume = true + + b, err := json.Marshal(p) + if err != nil { + return nil, err + } + + return &Task{ + ClusterID: si.ClusterID, + Type: SuspendTask, + ID: ResumeTaskID, + Name: "resume", + Enabled: true, + Sched: Schedule{ + StartDate: si.StartedAt.Add(p.Duration.Duration()), + NumRetries: 3, + RetryWait: duration.Duration(5 * time.Second), + }, + Status: StatusNew, + Properties: b, + }, nil +} + +func newDisabledResumeTask(clusterID uuid.UUID) *Task { + return &Task{ + ClusterID: clusterID, + Type: SuspendTask, + ID: ResumeTaskID, + Name: "resume", + } +} + +// Resume resumes scheduler for a suspended cluster. +func (s *Service) Resume(ctx context.Context, clusterID uuid.UUID, startTasks bool) error { + s.logger.Info(ctx, "Resuming cluster", "cluster_id", clusterID) + + s.mu.Lock() + if !s.suspended.Has(clusterID.Bytes16()) { + s.mu.Unlock() + s.logger.Info(ctx, "Cluster not suspended", "cluster_id", clusterID) + return nil + } + si := &suspendInfo{ClusterID: clusterID} + if err := s.drawer.Get(si); err != nil { + if errors.Is(err, service.ErrNotFound) { + s.logger.Error(ctx, "Expected canceled tasks got none") + } else { + s.mu.Unlock() + return errors.Wrap(err, "get canceled tasks") + } + } + if err := s.drawer.Delete(si); err != nil { + s.logger.Error(ctx, "Failed to delete canceled tasks", "error", err) + } + s.suspended.Remove(clusterID.Bytes16()) + s.metrics.Resume(clusterID) + s.mu.Unlock() + + running := b16set.New() + if startTasks { + for _, u := range si.RunningTask { + running.Add(u.Bytes16()) + } + } + if err := s.forEachClusterTask(clusterID, func(t *Task) error { + r := running.Has(t.ID.Bytes16()) + if needsOneShotRun(t) { + r = true + } + if t.Type == SuspendTask { + r = false + } + s.schedule(ctx, t, r) + return nil + }); err != nil { + return errors.Wrap(err, "schedule") + } + + if err := s.PutTask(ctx, newDisabledResumeTask(clusterID)); err != nil { + return errors.Wrap(err, "disable resume task") + } + + return nil +} + +func (s *Service) forEachClusterHealthCheckTask(clusterID uuid.UUID, f func(t *Task) error) error { + q := qb.Select(table.SchedulerTask.Name()). + Where(qb.Eq("cluster_id"), qb.Eq("type")). + Query(s.session). + Bind(clusterID, HealthCheckTask) + defer q.Release() + + return forEachTaskWithQuery(q, f) +} + +func (s *Service) forEachClusterTask(clusterID uuid.UUID, f func(t *Task) error) error { + q := qb.Select(table.SchedulerTask.Name()).Where(qb.Eq("cluster_id")).Query(s.session).Bind(clusterID) + defer q.Release() + return forEachTaskWithQuery(q, f) +} + +type suspendRunner struct { + service *Service +} + +func (s suspendRunner) Run(ctx context.Context, clusterID, _, _ uuid.UUID, properties json.RawMessage) error { + p, err := GetSuspendProperties(properties) + if err != nil { + return service.ErrValidate(err) + } + + if p.Resume { + err = s.service.Resume(ctx, clusterID, p.StartTasks) + } else { + // Suspend close scheduler while running for this reason we need to + // - detach from the context + // - ignore wait for tasks completion + ctx = log.CopyTraceID(context.Background(), ctx) + _, err = s.service.suspend(ctx, clusterID, p) + } + + return err +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/store/rollback.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/store/rollback.go new file mode 100644 index 00000000000..7e0c1a6ea1f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/store/rollback.go @@ -0,0 +1,43 @@ +// Copyright (C) 2017 ScyllaDB + +package store + +import ( + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/service" +) + +type entryHolder struct { + Entry + data []byte +} + +func (v *entryHolder) MarshalBinary() (data []byte, err error) { + return v.data, nil +} + +func (v *entryHolder) UnmarshalBinary(data []byte) error { + v.data = data + return nil +} + +// PutWithRollback gets former value of entry and returns a function to restore it. +func PutWithRollback(store Store, e Entry) (func(), error) { + old := &entryHolder{ + Entry: e, + } + + if err := store.Get(old); err != nil && !errors.Is(err, service.ErrNotFound) { + return nil, errors.Wrap(err, "get") + } + if err := store.Put(e); err != nil { + return nil, errors.Wrap(err, "put") + } + return func() { + if old.data == nil { + store.Delete(old) // nolint: errcheck + } else { + store.Put(old) // nolint: errcheck + } + }, nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/store/store.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/store/store.go new file mode 100644 index 00000000000..59b18876550 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/store/store.go @@ -0,0 +1,123 @@ +// Copyright (C) 2017 ScyllaDB + +package store + +import ( + "encoding" + + "github.com/pkg/errors" + "github.com/scylladb/gocqlx/v2" + "github.com/scylladb/gocqlx/v2/qb" + "github.com/scylladb/gocqlx/v2/table" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +// ErrInvalidKey is returned when Entry have nil clusterID or empty key. +var ErrInvalidKey = errors.New("Missing clusterID or key") + +// Entry is key value pair that can be persisted in Store. +// Key is obtained by Key method. Value is obtained by marshalling. +type Entry interface { + Key() (clusterID uuid.UUID, key string) + encoding.BinaryMarshaler + encoding.BinaryUnmarshaler +} + +// Store is specifies interface used for storing arbitrary entries. +// In order to store value, provided type must implement KeyValue interface. +type Store interface { + Put(v Entry) error + Get(v Entry) error + Check(v Entry) (bool, error) + Delete(v Entry) error + DeleteAll(clusterID uuid.UUID) error +} + +// TableStore stores entries in a table, table must. +type TableStore struct { + session gocqlx.Session + table *table.Table +} + +var _ Store = &TableStore{} + +func NewTableStore(session gocqlx.Session, table *table.Table) *TableStore { + return &TableStore{ + session: session, + table: table, + } +} + +// Put saves provided entry into table. +func (s *TableStore) Put(e Entry) error { + clusterID, key := e.Key() + if clusterID == uuid.Nil || key == "" { + return ErrInvalidKey + } + value, err := e.MarshalBinary() + if err != nil { + return errors.Wrap(err, "marshal") + } + + return s.table.InsertQuery(s.session).BindMap(qb.M{ + "cluster_id": clusterID, + "key": key, + "value": value, + }).ExecRelease() +} + +// Get marshals value into existing entry based on it's key. +func (s *TableStore) Get(e Entry) error { + clusterID, key := e.Key() + if clusterID == uuid.Nil || key == "" { + return ErrInvalidKey + } + + var v []byte + err := s.table.GetQuery(s.session, "value").BindMap(qb.M{ + "cluster_id": clusterID, + "key": key, + }).GetRelease(&v) + if err != nil { + return err + } + return e.UnmarshalBinary(v) +} + +// Check if entry with given key exists. +func (s *TableStore) Check(e Entry) (bool, error) { + clusterID, key := e.Key() + if clusterID == uuid.Nil || key == "" { + return false, ErrInvalidKey + } + + var v int + err := s.table.GetQuery(s.session, "COUNT(value)").BindMap(qb.M{ + "cluster_id": clusterID, + "key": key, + }).GetRelease(&v) + if err != nil { + return false, err + } + return v > 0, nil +} + +// Delete removes entry for a given cluster and key. +func (s *TableStore) Delete(e Entry) error { + clusterID, key := e.Key() + if clusterID == uuid.Nil || key == "" { + return ErrInvalidKey + } + + return s.table.DeleteQuery(s.session).BindMap(qb.M{ + "cluster_id": clusterID, + "key": key, + }).ExecRelease() +} + +// DeleteAll removes all entries for a cluster. +func (s *TableStore) DeleteAll(clusterID uuid.UUID) error { + return qb.Delete(s.table.Name()).Where(qb.Eq("cluster_id")).Query(s.session).BindMap(qb.M{ + "cluster_id": clusterID, + }).ExecRelease() +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/duration/duration.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/duration/duration.go new file mode 100644 index 00000000000..7a65e3117ea --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/duration/duration.go @@ -0,0 +1,55 @@ +// Copyright (C) 2017 ScyllaDB + +package duration + +import ( + "time" + + "github.com/gocql/gocql" +) + +// Duration adds marshalling to time.Duration. +type Duration time.Duration + +// MarshalCQL implements gocql.Marshaler. +func (d Duration) MarshalCQL(info gocql.TypeInfo) ([]byte, error) { + return gocql.Marshal(info, int(time.Duration(d).Seconds())) +} + +// UnmarshalCQL implements gocql.Unmarshaler. +func (d *Duration) UnmarshalCQL(info gocql.TypeInfo, data []byte) error { + var t time.Duration + if err := gocql.Unmarshal(info, data, &t); err != nil { + return err + } + *d = Duration(t * time.Second) + return nil +} + +// MarshalText implements text.Marshaller. +func (d Duration) MarshalText() ([]byte, error) { + if d == 0 { + return []byte{}, nil + } + return []byte(d.String()), nil +} + +// UnmarshalText implements text.Marshaller. +func (d *Duration) UnmarshalText(b []byte) error { + if len(b) == 0 { + *d = 0 + return nil + } + + t, err := ParseDuration(string(b)) + if err != nil { + return err + } + *d = t + return nil +} + +// Duration returns this duration as time.Duration. +func (d Duration) Duration() time.Duration { + return time.Duration(d) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/duration/duration_string.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/duration/duration_string.go new file mode 100644 index 00000000000..ded50076174 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/duration/duration_string.go @@ -0,0 +1,153 @@ +// Copyright (C) 2017 ScyllaDB + +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains a modified version of time.Duration String function from +// go stdlib src/time/duration.go file together with all required helper functions. + +package duration + +import "time" + +// String is a modified time.Duration String function that supports printing +// days with d suffix, note that day is equal to 24h. This is not supported by +// time.Duration because of issues with leap year and different time zones. We +// decide to ignore that as we are working with UTC. +// +// The implementation is changed not to print units if value is 0 to keep the +// representation shorter i.e. instead of 7d0h0m0s we print just 7d. +// +// String returns a string representing the duration in the form "72h3m0.5s". +// Leading zero units are omitted. As a special case, durations less than one +// second format use a smaller unit (milli-, micro-, or nanoseconds) to ensure +// that the leading digit is non-zero. The zero duration formats as 0s. +func (d Duration) String() string { + // Largest time is 2540400h10m10.000000000s + var buf [32]byte + w := len(buf) + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + if u < uint64(time.Second) { + // Special case: if duration is smaller than a second, + // use smaller units, like 1.2ms + var prec int + w-- + buf[w] = 's' + w-- + switch { + case u == 0: + return "0s" + case u < uint64(time.Microsecond): + // print nanoseconds + prec = 0 + buf[w] = 'n' + case u < uint64(time.Millisecond): + // print microseconds + prec = 3 + // U+00B5 'µ' micro sign == 0xC2 0xB5 + w-- // Need room for two bytes. + copy(buf[w:], "µ") + default: + // print milliseconds + prec = 6 + buf[w] = 'm' + } + w, u = fmtFrac(buf[:w], u, prec) + w = fmtInt(buf[:w], u) + } else { + w-- + buf[w] = 's' + + w, u = fmtFrac(buf[:w], u, 9) + + // u is now integer seconds + if u%60 != 0 || w != len(buf)-1 { + w = fmtInt(buf[:w], u%60) + } else { + w = len(buf) + } + u /= 60 + + // u is now integer minutes + if u > 0 { + if u%60 != 0 { + w-- + buf[w] = 'm' + w = fmtInt(buf[:w], u%60) + } + u /= 60 + + // u is now integer hours + if u > 0 { + if u%24 != 0 { + w-- + buf[w] = 'h' + w = fmtInt(buf[:w], u%24) + } + u /= 24 + + // u is now integer days + if u > 0 { + w-- + buf[w] = 'd' + w = fmtInt(buf[:w], u) + } + } + } + } + + if neg { + w-- + buf[w] = '-' + } + + return string(buf[w:]) +} + +// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the +// tail of buf, omitting trailing zeros. It omits the decimal +// point too when the fraction is 0. It returns the index where the +// output bytes begin and the value v/10**prec. +func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) { + // Omit trailing zeros up to and including decimal point. + w := len(buf) + print := false + for i := 0; i < prec; i++ { + digit := v % 10 + print = print || digit != 0 + if print { + w-- + buf[w] = byte(digit) + '0' + } + v /= 10 + } + if print { + w-- + buf[w] = '.' + } + return w, v +} + +// fmtInt formats v into the tail of buf. +// It returns the index where the output begins. +func fmtInt(buf []byte, v uint64) int { + w := len(buf) + if v == 0 { + w-- + buf[w] = '0' + } else { + for v > 0 { + w-- + buf[w] = byte(v%10) + '0' + v /= 10 + } + } + return w +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/duration/format.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/duration/format.go new file mode 100644 index 00000000000..c84ebf07fdd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/duration/format.go @@ -0,0 +1,181 @@ +// Copyright (C) 2017 ScyllaDB + +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains a modified version of ParseDuration function from +// go stdlib src/time/format.go file together with all required helper functions. + +package duration + +import ( + "errors" + "time" +) + +// unitMap is modified by eliminating ms and below units and by adding d that +// denotes 24 hours. +var unitMap = map[string]int64{ + "s": int64(time.Second), + "m": int64(time.Minute), + "h": int64(time.Hour), + "d": int64(24 * time.Hour), +} + +// ParseDuration parses a duration string. +// A duration string is a possibly signed sequence of +// decimal numbers, each with optional fraction and a unit suffix, +// such as "300ms", "-1.5h" or "2h45m". +// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". +func ParseDuration(s string) (Duration, error) { // nolint: gocognit + // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ + orig := s + var d int64 + neg := false + + // Consume [-+]? + if s != "" { + c := s[0] + if c == '-' || c == '+' { + neg = c == '-' + s = s[1:] + } + } + // Special case: if all that is left is "0", this is zero. + if s == "0" { + return 0, nil + } + if s == "" { + return 0, errors.New("time: invalid duration " + orig) + } + for s != "" { + var ( + v, f int64 // integers before, after decimal point + scale float64 = 1 // value = v + f/scale + ) + + var err error + + // The next character must be [0-9.] + if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') { + return 0, errors.New("time: invalid duration " + orig) + } + // Consume [0-9]* + pl := len(s) + v, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("time: invalid duration " + orig) + } + pre := pl != len(s) // whether we consumed anything before a period + + // Consume (\.[0-9]*)? + post := false + if s != "" && s[0] == '.' { + s = s[1:] + pl := len(s) + f, scale, s = leadingFraction(s) + post = pl != len(s) + } + if !pre && !post { + // no digits (e.g. ".s" or "-.s") + return 0, errors.New("time: invalid duration " + orig) + } + + // Consume unit. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c == '.' || '0' <= c && c <= '9' { + break + } + } + if i == 0 { + return 0, errors.New("time: missing unit in duration " + orig) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, errors.New("time: unknown unit " + u + " in duration " + orig) + } + if v > (1<<63-1)/unit { + // overflow + return 0, errors.New("time: invalid duration " + orig) + } + v *= unit + if f > 0 { + // float64 is needed to be nanosecond accurate for fractions of hours. + // v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit) + v += int64(float64(f) * (float64(unit) / scale)) + if v < 0 { + // overflow + return 0, errors.New("time: invalid duration " + orig) + } + } + d += v + if d < 0 { + // overflow + return 0, errors.New("time: invalid duration " + orig) + } + } + + if neg { + d = -d + } + return Duration(d), nil +} + +// leadingFraction consumes the leading [0-9]* from s. +// It is used only for fractions, so does not return an error on overflow, +// it just stops accumulating precision. +func leadingFraction(s string) (x int64, scale float64, rem string) { + i := 0 + scale = 1 + overflow := false + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if overflow { + continue + } + if x > (1<<63-1)/10 { + // It's possible for overflow to give a positive number, so take care. + overflow = true + continue + } + y := x*10 + int64(c) - '0' + if y < 0 { + overflow = true + continue + } + x = y + scale *= 10 + } + return x, scale, s[i:] +} + +var errLeadingInt = errors.New("time: bad [0-9]*") // never printed + +// leadingInt consumes the leading [0-9]* from s. +func leadingInt(s string) (x int64, rem string, err error) { + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if x > (1<<63-1)/10 { + // overflow + return 0, "", errLeadingInt + } + x = x*10 + int64(c) - '0' + if x < 0 { + // overflow + return 0, "", errLeadingInt + } + } + return x, s[i:], nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/httpx/clone.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/httpx/clone.go new file mode 100644 index 00000000000..4f8a54588f7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/httpx/clone.go @@ -0,0 +1,40 @@ +// Copyright (C) 2017 ScyllaDB + +package httpx + +import ( + "net/http" + "net/url" +) + +// CloneRequest creates a shallow copy of the request along with a deep copy +// of the Headers and URL. +func CloneRequest(req *http.Request) *http.Request { + r := new(http.Request) + + // Shallow clone + *r = *req + + // Copy ctx + r = r.WithContext(req.Context()) + + // Deep copy headers + r.Header = CloneHeader(req.Header) + + // Deep copy URL + r.URL = new(url.URL) + *r.URL = *req.URL + + return r +} + +// CloneHeader creates a deep copy of an http.Header. +func CloneHeader(in http.Header) http.Header { + out := make(http.Header, len(in)) + for key, values := range in { + newValues := make([]string, len(values)) + copy(newValues, values) + out[key] = newValues + } + return out +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/httpx/resp.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/httpx/resp.go new file mode 100644 index 00000000000..b6f56f384b5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/httpx/resp.go @@ -0,0 +1,32 @@ +// Copyright (C) 2017 ScyllaDB + +package httpx + +import ( + "bytes" + "fmt" + "io" + "net/http" +) + +// MakeResponse creates a minimal response for a request and status code. +func MakeResponse(req *http.Request, statusCode int) *http.Response { + return &http.Response{ + Status: http.StatusText(statusCode), + StatusCode: statusCode, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Request: req, + Header: make(http.Header), + Body: http.NoBody, + } +} + +// MakeAgentErrorResponse creates an agent error response that can be used in testing. +func MakeAgentErrorResponse(req *http.Request, statusCode int, msg string) *http.Response { + resp := MakeResponse(req, statusCode) + resp.Header.Set("Content-type", "application/json") + resp.Body = io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"status": %d, "message": "%s"}`, statusCode, msg))) // nolint: gocritic + return resp +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/httpx/rt.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/httpx/rt.go new file mode 100644 index 00000000000..1cd0aaf06ab --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/httpx/rt.go @@ -0,0 +1,15 @@ +// Copyright (C) 2017 ScyllaDB + +package httpx + +import "net/http" + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/inexlist/ksfilter/ksfilter.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/inexlist/ksfilter/ksfilter.go new file mode 100644 index 00000000000..fd3b23bb8de --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/inexlist/ksfilter/ksfilter.go @@ -0,0 +1,182 @@ +// Copyright (C) 2017 ScyllaDB + +package ksfilter + +import ( + "sort" + "strings" + + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/service" + "github.com/scylladb/scylla-manager/v3/pkg/util/inexlist" + "go.uber.org/multierr" +) + +// Unit specifies keyspace and it's tables. +type Unit struct { + Keyspace string + Tables []string + AllTables bool +} + +// Filter is a builder that let's you filter keyspaces and tables by adding them +// on keyspace by keyspace basis. +type Filter struct { + filters []string + inex inexlist.InExList + units []Unit + keyspaces []string +} + +func NewFilter(filters []string) (*Filter, error) { + // Validate filters + var errs error + for i, f := range filters { + err := validate(filters[i]) + if err != nil { + errs = multierr.Append(errs, errors.Wrapf(err, "%q on position %d", f, i)) + continue + } + } + if errs != nil { + return nil, service.ErrValidate(errors.Wrap(errs, "invalid filters")) + } + + // Decorate filters and create inexlist + inex, err := inexlist.ParseInExList(decorate(filters)) + if err != nil { + return nil, err + } + + return &Filter{ + filters: filters, + inex: inex, + }, nil +} + +func validate(filter string) error { + if filter == "*" || filter == "!*" { + return nil + } + if strings.HasPrefix(filter, ".") { + return errors.New("missing keyspace") + } + return nil +} + +func decorate(filters []string) []string { + if len(filters) == 0 { + filters = append(filters, "*.*") + } + + for i, f := range filters { + if strings.Contains(f, ".") { + continue + } + if strings.HasSuffix(f, "*") { + filters[i] = strings.TrimSuffix(f, "*") + "*.*" + } else { + filters[i] += ".*" + } + } + + return filters +} + +// Add filters the keyspace and tables, if they match it adds a new unit to the +// Filter. +func (f *Filter) Add(keyspace string, tables []string) { + // Add prefix + prefix := keyspace + "." + for i := 0; i < len(tables); i++ { + tables[i] = prefix + tables[i] + } + + // Filter + filtered := f.inex.Filter(tables) + + // No data, skip the keyspace + if len(filtered) == 0 { + return + } + + // Remove prefix + for i := 0; i < len(filtered); i++ { + filtered[i] = strings.TrimPrefix(filtered[i], prefix) + } + + // Add unit + u := Unit{ + Keyspace: keyspace, + Tables: filtered, + AllTables: len(filtered) == len(tables), + } + f.units = append(f.units, u) + + f.keyspaces = append(f.keyspaces, keyspace) +} + +// Check returns true iff table matches filter. +func (f *Filter) Check(keyspace, table string) bool { + key := keyspace + "." + table + return len(f.inex.Filter([]string{key})) > 0 +} + +// Apply returns the resulting units. The units are sorted by position of a +// first match in the filters. +// If no units are found or a filter is invalid a validation error is returned. +// The validation error may be disabled by providing the force=true. +func (f *Filter) Apply(force bool) ([]Unit, error) { + if len(f.units) == 0 && !force { + return nil, service.ErrValidate(errors.Errorf("no keyspace matched criteria %s - available keyspaces are: %s", f.filters, f.keyspaces)) + } + + // Sort units by the presence + sortUnits(f.units, f.inex) + + return f.units, nil +} + +func sortUnits(units []Unit, inclExcl inexlist.InExList) { + positions := make(map[string]int) + for _, u := range units { + min := inclExcl.Size() + for _, t := range u.Tables { + if p := inclExcl.FirstMatch(u.Keyspace + "." + t); p >= 0 && p < min { + min = p + } + } + positions[u.Keyspace] = min + } + + sort.Slice(units, func(i, j int) bool { + // order by position + switch { + case positions[units[i].Keyspace] < positions[units[j].Keyspace]: + return true + case positions[units[i].Keyspace] > positions[units[j].Keyspace]: + return false + default: + // promote system keyspaces + l := strings.HasPrefix(units[i].Keyspace, "system") + r := strings.HasPrefix(units[j].Keyspace, "system") + switch { + case l && !r: + return true + case !l && r: + return false + default: + // order by name + return units[i].Keyspace < units[j].Keyspace + } + } + }) +} + +// Filters returns the original filters used to create the instance. +func (f *Filter) Filters() []string { + if f == nil { + return nil + } + return f.filters +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/jsonutil/set.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/jsonutil/set.go new file mode 100644 index 00000000000..cf0eb299f29 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/jsonutil/set.go @@ -0,0 +1,21 @@ +// Copyright (C) 2017 ScyllaDB + +package jsonutil + +import ( + "encoding/json" +) + +// Set returns a copy of the message where key=value. +func Set(msg json.RawMessage, key string, value interface{}) json.RawMessage { + m := map[string]interface{}{} + if err := json.Unmarshal(msg, &m); err != nil { + panic(err) + } + m[key] = value + v, err := json.Marshal(m) + if err != nil { + panic(err) + } + return v +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/parallel/parallel.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/parallel/parallel.go new file mode 100644 index 00000000000..27548651788 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/parallel/parallel.go @@ -0,0 +1,95 @@ +// Copyright (C) 2017 ScyllaDB + +package parallel + +import ( + "go.uber.org/atomic" +) + +// NoLimit means full parallelism mode. +const NoLimit = 0 + +// ErrAbort is a special kind of error that aborts all further execution. +// Function calls that are in progress will continue to execute but no new +// functions will be called. +type ErrAbort struct { + error +} + +// Abort is special kind of error that aborts all further execution. +func Abort(err error) ErrAbort { + return ErrAbort{error: err} +} + +func isErrAbort(err error) (bool, error) { + a, ok := err.(ErrAbort) // nolint: errorlint + if !ok { + return false, nil + } + return true, a.error +} + +// NopNotify does not perform any operation when encountering an error during Run. +func NopNotify(int, error) {} + +// Run executes function f with arguments ranging from 0 to n-1 executing at +// most limit in parallel. +// If limit is 0 it runs f(0),f(1),...,f(n-1) in parallel. +// Notify is called when worker i encounters error err. +func Run(n, limit int, f func(i int) error, notify func(i int, err error)) error { + if n == 0 { + return nil + } + if n == 1 { + return f(0) + } + + if limit <= 0 || limit > n { + limit = n + } + + var ( + idx = atomic.NewInt32(0) + out = make(chan error) + abrt = atomic.NewBool(false) + ) + for j := 0; j < limit; j++ { + go func() { + for { + // Exit when there is nothing to do + i := int(idx.Inc()) - 1 + if i >= n { + return + } + + // Exit if aborted + if abrt.Load() { + out <- nil + continue + } + + // Execute + err := f(i) + if ok, inner := isErrAbort(err); ok { + abrt.Store(true) + err = inner + } + out <- err + if err != nil { + notify(i, err) + } + } + }() + } + + var retErr error + for i := 0; i < n; i++ { + err := <-out + // Appending all errors reduces readability, so it's enough to return only one of them + // (the other errors has already been logged via notify). + if retErr == nil { + retErr = err + } + } + return retErr +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/pathparser/parser.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/pathparser/parser.go new file mode 100644 index 00000000000..c3d25f26f7a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/pathparser/parser.go @@ -0,0 +1,83 @@ +// Copyright (C) 2017 ScyllaDB + +package pathparser + +import ( + "strings" + + "github.com/pkg/errors" + "github.com/scylladb/scylla-manager/v3/pkg/util/uuid" +) + +// PathParser can be used to parse parts of string separated by provided sep. +type PathParser struct { + value string + sep string +} + +// New returns instance of PathParser. +func New(v, sep string) *PathParser { + return &PathParser{ + value: v, + sep: sep, + } +} + +// Parser describes interface of part parser, user can implement his own parsers. +// PartGetter is used for part retrieval, single parser may consume multiple parts. +type Parser func(string) error + +// Parse iterates over provided parsers which consumes string parts. +func (p PathParser) Parse(parsers ...Parser) (int, error) { + parts := strings.Split(p.value, p.sep) + for i, p := range parsers { + if i >= len(parts) { + return i, nil + } + if err := p(parts[i]); err != nil { + return i, errors.Wrapf(err, "invalid path element at position %d", i) + } + } + return len(parsers), nil +} + +// ID parser saves UUID value under given ptr. +func ID(ptr *uuid.UUID) Parser { + return func(v string) error { + return ptr.UnmarshalText([]byte(v)) + } +} + +// String parser saves string under given ptr. +func String(ptr *string) Parser { + return func(v string) error { + *ptr = v + return nil + } +} + +// Static validates a path part is one of the provided values. +func Static(s ...string) Parser { + if len(s) == 0 { + panic("Expected list of values") + } + return func(v string) error { + if len(s) == 1 { + if v != s[0] { + return errors.Errorf("expected %s got %s", s[0], v) + } + } else { + var ok bool + for i := range s { + if v == s[i] { + ok = true + break + } + } + if !ok { + return errors.Errorf("expected one of %s got %s", s, v) + } + } + return nil + } +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/prom/parse.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/prom/parse.go new file mode 100644 index 00000000000..c919a7eb4c0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/prom/parse.go @@ -0,0 +1,19 @@ +// Copyright (C) 2017 ScyllaDB + +package prom + +import ( + "io" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" +) + +// MetricFamily represent single prometheus metric. +type MetricFamily = dto.MetricFamily + +// ParseText parses text format of prometheus metrics from input reader. +func ParseText(r io.Reader) (map[string]*MetricFamily, error) { + var p expfmt.TextParser + return p.TextToMetricFamilies(r) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/retry/backoff.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/retry/backoff.go new file mode 100644 index 00000000000..30982ba4f96 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/retry/backoff.go @@ -0,0 +1,55 @@ +// Copyright (C) 2017 ScyllaDB + +package retry + +import ( + "time" + + "github.com/cenkalti/backoff/v4" +) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request, to determine the amount of time +// that should pass before trying again. +type Backoff = backoff.BackOff + +// Stop indicates that no more retries should be made. +const Stop time.Duration = -1 + +// NewExponentialBackoff returns Backoff implementation that increases each +// wait period exponentially. +// Multiplier controls how fast each wait period grows, and randomizationFactor +// allows to inject some jitter between periods. +func NewExponentialBackoff(initialInterval, maxElapsedTime, maxInterval time.Duration, multiplier, randomizationFactor float64) Backoff { + b := backoff.NewExponentialBackOff() + b.InitialInterval = initialInterval + b.MaxElapsedTime = maxElapsedTime + b.MaxInterval = maxInterval + + b.Multiplier = multiplier + b.RandomizationFactor = randomizationFactor + b.Reset() + return b +} + +// WithMaxRetries allows to set maximum number of retries for given backoff strategy. +func WithMaxRetries(b Backoff, maxRetries uint64) Backoff { + return backoff.WithMaxRetries(b, maxRetries) +} + +// BackoffFunc type is an adapter to allow the use of ordinary +// functions as Backoff. +type BackoffFunc func() time.Duration + +// NextBackOff returns the duration to wait before retrying the operation. +func (b BackoffFunc) NextBackOff() time.Duration { + return b() +} + +// Reset to initial state. +func (b BackoffFunc) Reset() {} + +// Clone returns a copy of BackoffFunc. +func (b BackoffFunc) Clone() Backoff { + return b +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/retry/retry.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/retry/retry.go new file mode 100644 index 00000000000..2ca25e14f47 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/retry/retry.go @@ -0,0 +1,36 @@ +// Copyright (C) 2017 ScyllaDB + +package retry + +import ( + "context" + + "github.com/cenkalti/backoff/v4" + "github.com/pkg/errors" +) + +// An Operation is executing by WithNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation = backoff.Operation + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +type Notify = backoff.Notify + +// WithNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func WithNotify(ctx context.Context, op Operation, b Backoff, n Notify) error { + return backoff.RetryNotify(op, backoff.WithContext(b, ctx), n) +} + +// Permanent wraps the given err in a *backoff.PermanentError. +// This error interrupts further retries and causes retrying mechanism. +func Permanent(err error) error { + return backoff.Permanent(err) +} + +// IsPermanent checks if an error is a permanent error created with Permanent. +func IsPermanent(err error) bool { + var perr *backoff.PermanentError + return errors.As(err, &perr) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/slice/contains.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/slice/contains.go new file mode 100644 index 00000000000..b93c6c8ce00 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/util/slice/contains.go @@ -0,0 +1,35 @@ +// Copyright (C) 2017 ScyllaDB + +package slice + +import "fmt" + +// Contains is a general purpose function to check if a slice contains element. +// It has a linear complexity, and does not assume any structure of data. +// Most likely you want to use one of the typed functions `ContainsX` from this +// package instead of this function. +func Contains(n int, match func(i int) bool) bool { + for i := 0; i < n; i++ { + if match(i) { + return true + } + } + return false +} + +// ContainsString returns true iff one of elements of a is s. +func ContainsString(a []string, s string) bool { + return Contains(len(a), func(i int) bool { + return a[i] == s + }) +} + +// Index returns position of e in s or panics if e can't be found in s. +func Index[T comparable](s []T, e T) int { + for i := range s { + if e == s[i] { + return i + } + } + panic(fmt.Sprintf("unknown element: %v in slice: %v", e, s)) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/pkg/version.go b/vendor/github.com/scylladb/scylla-manager/v3/pkg/version.go new file mode 100644 index 00000000000..ac0af4973ed --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/pkg/version.go @@ -0,0 +1,10 @@ +// Copyright (C) 2017 ScyllaDB + +package pkg + +var version = "Snapshot" + +// Version returns the version of the build. +func Version() string { + return version +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/agent_client.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/agent_client.go new file mode 100644 index 00000000000..c1fadfbb1e1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/agent_client.go @@ -0,0 +1,111 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package client + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations" +) + +// Default agent HTTP client. +var Default = NewHTTPClient(nil) + +const ( + // DefaultHost is the default Host + // found in Meta (info) section of spec file + DefaultHost string = "scylla-manager.magic.host" + // DefaultBasePath is the default BasePath + // found in Meta (info) section of spec file + DefaultBasePath string = "/agent" +) + +// DefaultSchemes are the default schemes found in Meta (info) section of spec file +var DefaultSchemes = []string{"http"} + +// NewHTTPClient creates a new agent HTTP client. +func NewHTTPClient(formats strfmt.Registry) *Agent { + return NewHTTPClientWithConfig(formats, nil) +} + +// NewHTTPClientWithConfig creates a new agent HTTP client, +// using a customizable transport config. +func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *Agent { + // ensure nullable parameters have default + if cfg == nil { + cfg = DefaultTransportConfig() + } + + // create transport and client + transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) + return New(transport, formats) +} + +// New creates a new agent client +func New(transport runtime.ClientTransport, formats strfmt.Registry) *Agent { + // ensure nullable parameters have default + if formats == nil { + formats = strfmt.Default + } + + cli := new(Agent) + cli.Transport = transport + cli.Operations = operations.New(transport, formats) + return cli +} + +// DefaultTransportConfig creates a TransportConfig with the +// default settings taken from the meta section of the spec file. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{ + Host: DefaultHost, + BasePath: DefaultBasePath, + Schemes: DefaultSchemes, + } +} + +// TransportConfig contains the transport related info, +// found in the meta section of the spec file. +type TransportConfig struct { + Host string + BasePath string + Schemes []string +} + +// WithHost overrides the default host, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithHost(host string) *TransportConfig { + cfg.Host = host + return cfg +} + +// WithBasePath overrides the default basePath, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { + cfg.BasePath = basePath + return cfg +} + +// WithSchemes overrides the default schemes, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { + cfg.Schemes = schemes + return cfg +} + +// Agent is a client for agent +type Agent struct { + Operations operations.ClientService + + Transport runtime.ClientTransport +} + +// SetTransport changes the transport on the client and all its subresources +func (c *Agent) SetTransport(transport runtime.ClientTransport) { + c.Transport = transport + c.Operations.SetTransport(transport) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_bwlimit_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_bwlimit_parameters.go new file mode 100644 index 00000000000..62ac422409d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_bwlimit_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewCoreBwlimitParams creates a new CoreBwlimitParams object +// with the default values initialized. +func NewCoreBwlimitParams() *CoreBwlimitParams { + var () + return &CoreBwlimitParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCoreBwlimitParamsWithTimeout creates a new CoreBwlimitParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCoreBwlimitParamsWithTimeout(timeout time.Duration) *CoreBwlimitParams { + var () + return &CoreBwlimitParams{ + + timeout: timeout, + } +} + +// NewCoreBwlimitParamsWithContext creates a new CoreBwlimitParams object +// with the default values initialized, and the ability to set a context for a request +func NewCoreBwlimitParamsWithContext(ctx context.Context) *CoreBwlimitParams { + var () + return &CoreBwlimitParams{ + + Context: ctx, + } +} + +// NewCoreBwlimitParamsWithHTTPClient creates a new CoreBwlimitParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCoreBwlimitParamsWithHTTPClient(client *http.Client) *CoreBwlimitParams { + var () + return &CoreBwlimitParams{ + HTTPClient: client, + } +} + +/* +CoreBwlimitParams contains all the parameters to send to the API endpoint +for the core bwlimit operation typically these are written to a http.Request +*/ +type CoreBwlimitParams struct { + + /*BandwidthRate + bandwidth rate + + */ + BandwidthRate *models.Bandwidth + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the core bwlimit params +func (o *CoreBwlimitParams) WithTimeout(timeout time.Duration) *CoreBwlimitParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the core bwlimit params +func (o *CoreBwlimitParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the core bwlimit params +func (o *CoreBwlimitParams) WithContext(ctx context.Context) *CoreBwlimitParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the core bwlimit params +func (o *CoreBwlimitParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the core bwlimit params +func (o *CoreBwlimitParams) WithHTTPClient(client *http.Client) *CoreBwlimitParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the core bwlimit params +func (o *CoreBwlimitParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBandwidthRate adds the bandwidthRate to the core bwlimit params +func (o *CoreBwlimitParams) WithBandwidthRate(bandwidthRate *models.Bandwidth) *CoreBwlimitParams { + o.SetBandwidthRate(bandwidthRate) + return o +} + +// SetBandwidthRate adds the bandwidthRate to the core bwlimit params +func (o *CoreBwlimitParams) SetBandwidthRate(bandwidthRate *models.Bandwidth) { + o.BandwidthRate = bandwidthRate +} + +// WriteToRequest writes these params to a swagger request +func (o *CoreBwlimitParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.BandwidthRate != nil { + if err := r.SetBodyParam(o.BandwidthRate); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_bwlimit_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_bwlimit_responses.go new file mode 100644 index 00000000000..d2b5c33a648 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_bwlimit_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// CoreBwlimitReader is a Reader for the CoreBwlimit structure. +type CoreBwlimitReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CoreBwlimitReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCoreBwlimitOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCoreBwlimitDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCoreBwlimitOK creates a CoreBwlimitOK with default headers values +func NewCoreBwlimitOK() *CoreBwlimitOK { + return &CoreBwlimitOK{} +} + +/* +CoreBwlimitOK handles this case with default header values. + +bandwidth rate +*/ +type CoreBwlimitOK struct { + Payload *models.Bandwidth + JobID int64 +} + +func (o *CoreBwlimitOK) GetPayload() *models.Bandwidth { + return o.Payload +} + +func (o *CoreBwlimitOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Bandwidth) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewCoreBwlimitDefault creates a CoreBwlimitDefault with default headers values +func NewCoreBwlimitDefault(code int) *CoreBwlimitDefault { + return &CoreBwlimitDefault{ + _statusCode: code, + } +} + +/* +CoreBwlimitDefault handles this case with default header values. + +Server error +*/ +type CoreBwlimitDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the core bwlimit default response +func (o *CoreBwlimitDefault) Code() int { + return o._statusCode +} + +func (o *CoreBwlimitDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CoreBwlimitDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *CoreBwlimitDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_delete_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_delete_parameters.go new file mode 100644 index 00000000000..e7291504759 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_delete_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewCoreStatsDeleteParams creates a new CoreStatsDeleteParams object +// with the default values initialized. +func NewCoreStatsDeleteParams() *CoreStatsDeleteParams { + var () + return &CoreStatsDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCoreStatsDeleteParamsWithTimeout creates a new CoreStatsDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCoreStatsDeleteParamsWithTimeout(timeout time.Duration) *CoreStatsDeleteParams { + var () + return &CoreStatsDeleteParams{ + + timeout: timeout, + } +} + +// NewCoreStatsDeleteParamsWithContext creates a new CoreStatsDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewCoreStatsDeleteParamsWithContext(ctx context.Context) *CoreStatsDeleteParams { + var () + return &CoreStatsDeleteParams{ + + Context: ctx, + } +} + +// NewCoreStatsDeleteParamsWithHTTPClient creates a new CoreStatsDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCoreStatsDeleteParamsWithHTTPClient(client *http.Client) *CoreStatsDeleteParams { + var () + return &CoreStatsDeleteParams{ + HTTPClient: client, + } +} + +/* +CoreStatsDeleteParams contains all the parameters to send to the API endpoint +for the core stats delete operation typically these are written to a http.Request +*/ +type CoreStatsDeleteParams struct { + + /*StatsParams + Stats parameters + + */ + StatsParams *models.StatsParams + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the core stats delete params +func (o *CoreStatsDeleteParams) WithTimeout(timeout time.Duration) *CoreStatsDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the core stats delete params +func (o *CoreStatsDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the core stats delete params +func (o *CoreStatsDeleteParams) WithContext(ctx context.Context) *CoreStatsDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the core stats delete params +func (o *CoreStatsDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the core stats delete params +func (o *CoreStatsDeleteParams) WithHTTPClient(client *http.Client) *CoreStatsDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the core stats delete params +func (o *CoreStatsDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithStatsParams adds the statsParams to the core stats delete params +func (o *CoreStatsDeleteParams) WithStatsParams(statsParams *models.StatsParams) *CoreStatsDeleteParams { + o.SetStatsParams(statsParams) + return o +} + +// SetStatsParams adds the statsParams to the core stats delete params +func (o *CoreStatsDeleteParams) SetStatsParams(statsParams *models.StatsParams) { + o.StatsParams = statsParams +} + +// WriteToRequest writes these params to a swagger request +func (o *CoreStatsDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.StatsParams != nil { + if err := r.SetBodyParam(o.StatsParams); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_delete_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_delete_responses.go new file mode 100644 index 00000000000..ada73d24a01 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_delete_responses.go @@ -0,0 +1,133 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// CoreStatsDeleteReader is a Reader for the CoreStatsDelete structure. +type CoreStatsDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CoreStatsDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCoreStatsDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCoreStatsDeleteDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCoreStatsDeleteOK creates a CoreStatsDeleteOK with default headers values +func NewCoreStatsDeleteOK() *CoreStatsDeleteOK { + return &CoreStatsDeleteOK{} +} + +/* +CoreStatsDeleteOK handles this case with default header values. + +Empty object +*/ +type CoreStatsDeleteOK struct { + Payload interface{} + JobID int64 +} + +func (o *CoreStatsDeleteOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CoreStatsDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewCoreStatsDeleteDefault creates a CoreStatsDeleteDefault with default headers values +func NewCoreStatsDeleteDefault(code int) *CoreStatsDeleteDefault { + return &CoreStatsDeleteDefault{ + _statusCode: code, + } +} + +/* +CoreStatsDeleteDefault handles this case with default header values. + +Server error +*/ +type CoreStatsDeleteDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the core stats delete default response +func (o *CoreStatsDeleteDefault) Code() int { + return o._statusCode +} + +func (o *CoreStatsDeleteDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CoreStatsDeleteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *CoreStatsDeleteDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_reset_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_reset_parameters.go new file mode 100644 index 00000000000..8a0fcc09eb8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_reset_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewCoreStatsResetParams creates a new CoreStatsResetParams object +// with the default values initialized. +func NewCoreStatsResetParams() *CoreStatsResetParams { + var () + return &CoreStatsResetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCoreStatsResetParamsWithTimeout creates a new CoreStatsResetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCoreStatsResetParamsWithTimeout(timeout time.Duration) *CoreStatsResetParams { + var () + return &CoreStatsResetParams{ + + timeout: timeout, + } +} + +// NewCoreStatsResetParamsWithContext creates a new CoreStatsResetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCoreStatsResetParamsWithContext(ctx context.Context) *CoreStatsResetParams { + var () + return &CoreStatsResetParams{ + + Context: ctx, + } +} + +// NewCoreStatsResetParamsWithHTTPClient creates a new CoreStatsResetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCoreStatsResetParamsWithHTTPClient(client *http.Client) *CoreStatsResetParams { + var () + return &CoreStatsResetParams{ + HTTPClient: client, + } +} + +/* +CoreStatsResetParams contains all the parameters to send to the API endpoint +for the core stats reset operation typically these are written to a http.Request +*/ +type CoreStatsResetParams struct { + + /*StatsParams + Stats parameters + + */ + StatsParams *models.StatsParams + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the core stats reset params +func (o *CoreStatsResetParams) WithTimeout(timeout time.Duration) *CoreStatsResetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the core stats reset params +func (o *CoreStatsResetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the core stats reset params +func (o *CoreStatsResetParams) WithContext(ctx context.Context) *CoreStatsResetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the core stats reset params +func (o *CoreStatsResetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the core stats reset params +func (o *CoreStatsResetParams) WithHTTPClient(client *http.Client) *CoreStatsResetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the core stats reset params +func (o *CoreStatsResetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithStatsParams adds the statsParams to the core stats reset params +func (o *CoreStatsResetParams) WithStatsParams(statsParams *models.StatsParams) *CoreStatsResetParams { + o.SetStatsParams(statsParams) + return o +} + +// SetStatsParams adds the statsParams to the core stats reset params +func (o *CoreStatsResetParams) SetStatsParams(statsParams *models.StatsParams) { + o.StatsParams = statsParams +} + +// WriteToRequest writes these params to a swagger request +func (o *CoreStatsResetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.StatsParams != nil { + if err := r.SetBodyParam(o.StatsParams); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_reset_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_reset_responses.go new file mode 100644 index 00000000000..a954eea6540 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/core_stats_reset_responses.go @@ -0,0 +1,133 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// CoreStatsResetReader is a Reader for the CoreStatsReset structure. +type CoreStatsResetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CoreStatsResetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCoreStatsResetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCoreStatsResetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCoreStatsResetOK creates a CoreStatsResetOK with default headers values +func NewCoreStatsResetOK() *CoreStatsResetOK { + return &CoreStatsResetOK{} +} + +/* +CoreStatsResetOK handles this case with default header values. + +Empty object +*/ +type CoreStatsResetOK struct { + Payload interface{} + JobID int64 +} + +func (o *CoreStatsResetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CoreStatsResetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewCoreStatsResetDefault creates a CoreStatsResetDefault with default headers values +func NewCoreStatsResetDefault(code int) *CoreStatsResetDefault { + return &CoreStatsResetDefault{ + _statusCode: code, + } +} + +/* +CoreStatsResetDefault handles this case with default header values. + +Server error +*/ +type CoreStatsResetDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the core stats reset default response +func (o *CoreStatsResetDefault) Code() int { + return o._statusCode +} + +func (o *CoreStatsResetDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *CoreStatsResetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *CoreStatsResetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/free_o_s_memory_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/free_o_s_memory_parameters.go new file mode 100644 index 00000000000..587403775c4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/free_o_s_memory_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFreeOSMemoryParams creates a new FreeOSMemoryParams object +// with the default values initialized. +func NewFreeOSMemoryParams() *FreeOSMemoryParams { + + return &FreeOSMemoryParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFreeOSMemoryParamsWithTimeout creates a new FreeOSMemoryParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFreeOSMemoryParamsWithTimeout(timeout time.Duration) *FreeOSMemoryParams { + + return &FreeOSMemoryParams{ + + timeout: timeout, + } +} + +// NewFreeOSMemoryParamsWithContext creates a new FreeOSMemoryParams object +// with the default values initialized, and the ability to set a context for a request +func NewFreeOSMemoryParamsWithContext(ctx context.Context) *FreeOSMemoryParams { + + return &FreeOSMemoryParams{ + + Context: ctx, + } +} + +// NewFreeOSMemoryParamsWithHTTPClient creates a new FreeOSMemoryParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFreeOSMemoryParamsWithHTTPClient(client *http.Client) *FreeOSMemoryParams { + + return &FreeOSMemoryParams{ + HTTPClient: client, + } +} + +/* +FreeOSMemoryParams contains all the parameters to send to the API endpoint +for the free o s memory operation typically these are written to a http.Request +*/ +type FreeOSMemoryParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the free o s memory params +func (o *FreeOSMemoryParams) WithTimeout(timeout time.Duration) *FreeOSMemoryParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the free o s memory params +func (o *FreeOSMemoryParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the free o s memory params +func (o *FreeOSMemoryParams) WithContext(ctx context.Context) *FreeOSMemoryParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the free o s memory params +func (o *FreeOSMemoryParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the free o s memory params +func (o *FreeOSMemoryParams) WithHTTPClient(client *http.Client) *FreeOSMemoryParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the free o s memory params +func (o *FreeOSMemoryParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FreeOSMemoryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/free_o_s_memory_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/free_o_s_memory_responses.go new file mode 100644 index 00000000000..51313ddac85 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/free_o_s_memory_responses.go @@ -0,0 +1,133 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// FreeOSMemoryReader is a Reader for the FreeOSMemory structure. +type FreeOSMemoryReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FreeOSMemoryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFreeOSMemoryOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFreeOSMemoryDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFreeOSMemoryOK creates a FreeOSMemoryOK with default headers values +func NewFreeOSMemoryOK() *FreeOSMemoryOK { + return &FreeOSMemoryOK{} +} + +/* +FreeOSMemoryOK handles this case with default header values. + +Empty object +*/ +type FreeOSMemoryOK struct { + Payload interface{} + JobID int64 +} + +func (o *FreeOSMemoryOK) GetPayload() interface{} { + return o.Payload +} + +func (o *FreeOSMemoryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewFreeOSMemoryDefault creates a FreeOSMemoryDefault with default headers values +func NewFreeOSMemoryDefault(code int) *FreeOSMemoryDefault { + return &FreeOSMemoryDefault{ + _statusCode: code, + } +} + +/* +FreeOSMemoryDefault handles this case with default header values. + +Server error +*/ +type FreeOSMemoryDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the free o s memory default response +func (o *FreeOSMemoryDefault) Code() int { + return o._statusCode +} + +func (o *FreeOSMemoryDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *FreeOSMemoryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *FreeOSMemoryDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_info_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_info_parameters.go new file mode 100644 index 00000000000..294e47e8eef --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_info_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewJobInfoParams creates a new JobInfoParams object +// with the default values initialized. +func NewJobInfoParams() *JobInfoParams { + var () + return &JobInfoParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewJobInfoParamsWithTimeout creates a new JobInfoParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewJobInfoParamsWithTimeout(timeout time.Duration) *JobInfoParams { + var () + return &JobInfoParams{ + + timeout: timeout, + } +} + +// NewJobInfoParamsWithContext creates a new JobInfoParams object +// with the default values initialized, and the ability to set a context for a request +func NewJobInfoParamsWithContext(ctx context.Context) *JobInfoParams { + var () + return &JobInfoParams{ + + Context: ctx, + } +} + +// NewJobInfoParamsWithHTTPClient creates a new JobInfoParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewJobInfoParamsWithHTTPClient(client *http.Client) *JobInfoParams { + var () + return &JobInfoParams{ + HTTPClient: client, + } +} + +/* +JobInfoParams contains all the parameters to send to the API endpoint +for the job info operation typically these are written to a http.Request +*/ +type JobInfoParams struct { + + /*Jobinfo + Job info params with id and long polling + + */ + Jobinfo *models.JobInfoParams + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the job info params +func (o *JobInfoParams) WithTimeout(timeout time.Duration) *JobInfoParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the job info params +func (o *JobInfoParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the job info params +func (o *JobInfoParams) WithContext(ctx context.Context) *JobInfoParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the job info params +func (o *JobInfoParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the job info params +func (o *JobInfoParams) WithHTTPClient(client *http.Client) *JobInfoParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the job info params +func (o *JobInfoParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithJobinfo adds the jobinfo to the job info params +func (o *JobInfoParams) WithJobinfo(jobinfo *models.JobInfoParams) *JobInfoParams { + o.SetJobinfo(jobinfo) + return o +} + +// SetJobinfo adds the jobinfo to the job info params +func (o *JobInfoParams) SetJobinfo(jobinfo *models.JobInfoParams) { + o.Jobinfo = jobinfo +} + +// WriteToRequest writes these params to a swagger request +func (o *JobInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Jobinfo != nil { + if err := r.SetBodyParam(o.Jobinfo); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_info_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_info_responses.go new file mode 100644 index 00000000000..06df084dc85 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_info_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// JobInfoReader is a Reader for the JobInfo structure. +type JobInfoReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *JobInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewJobInfoOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewJobInfoDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewJobInfoOK creates a JobInfoOK with default headers values +func NewJobInfoOK() *JobInfoOK { + return &JobInfoOK{} +} + +/* +JobInfoOK handles this case with default header values. + +Aggregated info about job transfers +*/ +type JobInfoOK struct { + Payload *models.JobInfo + JobID int64 +} + +func (o *JobInfoOK) GetPayload() *models.JobInfo { + return o.Payload +} + +func (o *JobInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.JobInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewJobInfoDefault creates a JobInfoDefault with default headers values +func NewJobInfoDefault(code int) *JobInfoDefault { + return &JobInfoDefault{ + _statusCode: code, + } +} + +/* +JobInfoDefault handles this case with default header values. + +Server error +*/ +type JobInfoDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the job info default response +func (o *JobInfoDefault) Code() int { + return o._statusCode +} + +func (o *JobInfoDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *JobInfoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *JobInfoDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_progress_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_progress_parameters.go new file mode 100644 index 00000000000..e50bcfe3818 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_progress_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewJobProgressParams creates a new JobProgressParams object +// with the default values initialized. +func NewJobProgressParams() *JobProgressParams { + var () + return &JobProgressParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewJobProgressParamsWithTimeout creates a new JobProgressParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewJobProgressParamsWithTimeout(timeout time.Duration) *JobProgressParams { + var () + return &JobProgressParams{ + + timeout: timeout, + } +} + +// NewJobProgressParamsWithContext creates a new JobProgressParams object +// with the default values initialized, and the ability to set a context for a request +func NewJobProgressParamsWithContext(ctx context.Context) *JobProgressParams { + var () + return &JobProgressParams{ + + Context: ctx, + } +} + +// NewJobProgressParamsWithHTTPClient creates a new JobProgressParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewJobProgressParamsWithHTTPClient(client *http.Client) *JobProgressParams { + var () + return &JobProgressParams{ + HTTPClient: client, + } +} + +/* +JobProgressParams contains all the parameters to send to the API endpoint +for the job progress operation typically these are written to a http.Request +*/ +type JobProgressParams struct { + + /*Jobinfo + Job info params with id and long polling + + */ + Jobinfo *models.JobInfoParams + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the job progress params +func (o *JobProgressParams) WithTimeout(timeout time.Duration) *JobProgressParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the job progress params +func (o *JobProgressParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the job progress params +func (o *JobProgressParams) WithContext(ctx context.Context) *JobProgressParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the job progress params +func (o *JobProgressParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the job progress params +func (o *JobProgressParams) WithHTTPClient(client *http.Client) *JobProgressParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the job progress params +func (o *JobProgressParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithJobinfo adds the jobinfo to the job progress params +func (o *JobProgressParams) WithJobinfo(jobinfo *models.JobInfoParams) *JobProgressParams { + o.SetJobinfo(jobinfo) + return o +} + +// SetJobinfo adds the jobinfo to the job progress params +func (o *JobProgressParams) SetJobinfo(jobinfo *models.JobInfoParams) { + o.Jobinfo = jobinfo +} + +// WriteToRequest writes these params to a swagger request +func (o *JobProgressParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Jobinfo != nil { + if err := r.SetBodyParam(o.Jobinfo); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_progress_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_progress_responses.go new file mode 100644 index 00000000000..a3f2ee6c6ce --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_progress_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// JobProgressReader is a Reader for the JobProgress structure. +type JobProgressReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *JobProgressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewJobProgressOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewJobProgressDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewJobProgressOK creates a JobProgressOK with default headers values +func NewJobProgressOK() *JobProgressOK { + return &JobProgressOK{} +} + +/* +JobProgressOK handles this case with default header values. + +Aggregated info about job transfers +*/ +type JobProgressOK struct { + Payload *models.JobProgress + JobID int64 +} + +func (o *JobProgressOK) GetPayload() *models.JobProgress { + return o.Payload +} + +func (o *JobProgressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.JobProgress) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewJobProgressDefault creates a JobProgressDefault with default headers values +func NewJobProgressDefault(code int) *JobProgressDefault { + return &JobProgressDefault{ + _statusCode: code, + } +} + +/* +JobProgressDefault handles this case with default header values. + +Server error +*/ +type JobProgressDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the job progress default response +func (o *JobProgressDefault) Code() int { + return o._statusCode +} + +func (o *JobProgressDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *JobProgressDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *JobProgressDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_stop_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_stop_parameters.go new file mode 100644 index 00000000000..ade24bf1c2a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_stop_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewJobStopParams creates a new JobStopParams object +// with the default values initialized. +func NewJobStopParams() *JobStopParams { + var () + return &JobStopParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewJobStopParamsWithTimeout creates a new JobStopParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewJobStopParamsWithTimeout(timeout time.Duration) *JobStopParams { + var () + return &JobStopParams{ + + timeout: timeout, + } +} + +// NewJobStopParamsWithContext creates a new JobStopParams object +// with the default values initialized, and the ability to set a context for a request +func NewJobStopParamsWithContext(ctx context.Context) *JobStopParams { + var () + return &JobStopParams{ + + Context: ctx, + } +} + +// NewJobStopParamsWithHTTPClient creates a new JobStopParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewJobStopParamsWithHTTPClient(client *http.Client) *JobStopParams { + var () + return &JobStopParams{ + HTTPClient: client, + } +} + +/* +JobStopParams contains all the parameters to send to the API endpoint +for the job stop operation typically these are written to a http.Request +*/ +type JobStopParams struct { + + /*Jobid + jobid + + */ + Jobid *models.Jobid + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the job stop params +func (o *JobStopParams) WithTimeout(timeout time.Duration) *JobStopParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the job stop params +func (o *JobStopParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the job stop params +func (o *JobStopParams) WithContext(ctx context.Context) *JobStopParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the job stop params +func (o *JobStopParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the job stop params +func (o *JobStopParams) WithHTTPClient(client *http.Client) *JobStopParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the job stop params +func (o *JobStopParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithJobid adds the jobid to the job stop params +func (o *JobStopParams) WithJobid(jobid *models.Jobid) *JobStopParams { + o.SetJobid(jobid) + return o +} + +// SetJobid adds the jobid to the job stop params +func (o *JobStopParams) SetJobid(jobid *models.Jobid) { + o.Jobid = jobid +} + +// WriteToRequest writes these params to a swagger request +func (o *JobStopParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Jobid != nil { + if err := r.SetBodyParam(o.Jobid); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_stop_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_stop_responses.go new file mode 100644 index 00000000000..7dd272a47f9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/job_stop_responses.go @@ -0,0 +1,133 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// JobStopReader is a Reader for the JobStop structure. +type JobStopReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *JobStopReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewJobStopOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewJobStopDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewJobStopOK creates a JobStopOK with default headers values +func NewJobStopOK() *JobStopOK { + return &JobStopOK{} +} + +/* +JobStopOK handles this case with default header values. + +Empty object +*/ +type JobStopOK struct { + Payload interface{} + JobID int64 +} + +func (o *JobStopOK) GetPayload() interface{} { + return o.Payload +} + +func (o *JobStopOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewJobStopDefault creates a JobStopDefault with default headers values +func NewJobStopDefault(code int) *JobStopDefault { + return &JobStopDefault{ + _statusCode: code, + } +} + +/* +JobStopDefault handles this case with default header values. + +Server error +*/ +type JobStopDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the job stop default response +func (o *JobStopDefault) Code() int { + return o._statusCode +} + +func (o *JobStopDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *JobStopDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *JobStopDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/node_info_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/node_info_parameters.go new file mode 100644 index 00000000000..22b2ccb83c8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/node_info_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewNodeInfoParams creates a new NodeInfoParams object +// with the default values initialized. +func NewNodeInfoParams() *NodeInfoParams { + + return &NodeInfoParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewNodeInfoParamsWithTimeout creates a new NodeInfoParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewNodeInfoParamsWithTimeout(timeout time.Duration) *NodeInfoParams { + + return &NodeInfoParams{ + + timeout: timeout, + } +} + +// NewNodeInfoParamsWithContext creates a new NodeInfoParams object +// with the default values initialized, and the ability to set a context for a request +func NewNodeInfoParamsWithContext(ctx context.Context) *NodeInfoParams { + + return &NodeInfoParams{ + + Context: ctx, + } +} + +// NewNodeInfoParamsWithHTTPClient creates a new NodeInfoParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewNodeInfoParamsWithHTTPClient(client *http.Client) *NodeInfoParams { + + return &NodeInfoParams{ + HTTPClient: client, + } +} + +/* +NodeInfoParams contains all the parameters to send to the API endpoint +for the node info operation typically these are written to a http.Request +*/ +type NodeInfoParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the node info params +func (o *NodeInfoParams) WithTimeout(timeout time.Duration) *NodeInfoParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the node info params +func (o *NodeInfoParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the node info params +func (o *NodeInfoParams) WithContext(ctx context.Context) *NodeInfoParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the node info params +func (o *NodeInfoParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the node info params +func (o *NodeInfoParams) WithHTTPClient(client *http.Client) *NodeInfoParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the node info params +func (o *NodeInfoParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *NodeInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/node_info_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/node_info_responses.go new file mode 100644 index 00000000000..48671cc35f6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/node_info_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NodeInfoReader is a Reader for the NodeInfo structure. +type NodeInfoReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *NodeInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewNodeInfoOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewNodeInfoDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewNodeInfoOK creates a NodeInfoOK with default headers values +func NewNodeInfoOK() *NodeInfoOK { + return &NodeInfoOK{} +} + +/* +NodeInfoOK handles this case with default header values. + +node information +*/ +type NodeInfoOK struct { + Payload *models.NodeInfo + JobID int64 +} + +func (o *NodeInfoOK) GetPayload() *models.NodeInfo { + return o.Payload +} + +func (o *NodeInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.NodeInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewNodeInfoDefault creates a NodeInfoDefault with default headers values +func NewNodeInfoDefault(code int) *NodeInfoDefault { + return &NodeInfoDefault{ + _statusCode: code, + } +} + +/* +NodeInfoDefault handles this case with default header values. + +Server error +*/ +type NodeInfoDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the node info default response +func (o *NodeInfoDefault) Code() int { + return o._statusCode +} + +func (o *NodeInfoDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *NodeInfoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *NodeInfoDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_about_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_about_parameters.go new file mode 100644 index 00000000000..4b2d93957f3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_about_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewOperationsAboutParams creates a new OperationsAboutParams object +// with the default values initialized. +func NewOperationsAboutParams() *OperationsAboutParams { + var () + return &OperationsAboutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewOperationsAboutParamsWithTimeout creates a new OperationsAboutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewOperationsAboutParamsWithTimeout(timeout time.Duration) *OperationsAboutParams { + var () + return &OperationsAboutParams{ + + timeout: timeout, + } +} + +// NewOperationsAboutParamsWithContext creates a new OperationsAboutParams object +// with the default values initialized, and the ability to set a context for a request +func NewOperationsAboutParamsWithContext(ctx context.Context) *OperationsAboutParams { + var () + return &OperationsAboutParams{ + + Context: ctx, + } +} + +// NewOperationsAboutParamsWithHTTPClient creates a new OperationsAboutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewOperationsAboutParamsWithHTTPClient(client *http.Client) *OperationsAboutParams { + var () + return &OperationsAboutParams{ + HTTPClient: client, + } +} + +/* +OperationsAboutParams contains all the parameters to send to the API endpoint +for the operations about operation typically these are written to a http.Request +*/ +type OperationsAboutParams struct { + + /*RemotePath + Remote path + + */ + RemotePath *models.RemotePath + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the operations about params +func (o *OperationsAboutParams) WithTimeout(timeout time.Duration) *OperationsAboutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the operations about params +func (o *OperationsAboutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the operations about params +func (o *OperationsAboutParams) WithContext(ctx context.Context) *OperationsAboutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the operations about params +func (o *OperationsAboutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the operations about params +func (o *OperationsAboutParams) WithHTTPClient(client *http.Client) *OperationsAboutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the operations about params +func (o *OperationsAboutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRemotePath adds the remotePath to the operations about params +func (o *OperationsAboutParams) WithRemotePath(remotePath *models.RemotePath) *OperationsAboutParams { + o.SetRemotePath(remotePath) + return o +} + +// SetRemotePath adds the remotePath to the operations about params +func (o *OperationsAboutParams) SetRemotePath(remotePath *models.RemotePath) { + o.RemotePath = remotePath +} + +// WriteToRequest writes these params to a swagger request +func (o *OperationsAboutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.RemotePath != nil { + if err := r.SetBodyParam(o.RemotePath); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_about_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_about_responses.go new file mode 100644 index 00000000000..b9a45732922 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_about_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// OperationsAboutReader is a Reader for the OperationsAbout structure. +type OperationsAboutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *OperationsAboutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewOperationsAboutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewOperationsAboutDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewOperationsAboutOK creates a OperationsAboutOK with default headers values +func NewOperationsAboutOK() *OperationsAboutOK { + return &OperationsAboutOK{} +} + +/* +OperationsAboutOK handles this case with default header values. + +File system details +*/ +type OperationsAboutOK struct { + Payload *models.FileSystemDetails + JobID int64 +} + +func (o *OperationsAboutOK) GetPayload() *models.FileSystemDetails { + return o.Payload +} + +func (o *OperationsAboutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.FileSystemDetails) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewOperationsAboutDefault creates a OperationsAboutDefault with default headers values +func NewOperationsAboutDefault(code int) *OperationsAboutDefault { + return &OperationsAboutDefault{ + _statusCode: code, + } +} + +/* +OperationsAboutDefault handles this case with default header values. + +Server error +*/ +type OperationsAboutDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the operations about default response +func (o *OperationsAboutDefault) Code() int { + return o._statusCode +} + +func (o *OperationsAboutDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *OperationsAboutDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *OperationsAboutDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_check_permissions_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_check_permissions_parameters.go new file mode 100644 index 00000000000..ca746a02705 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_check_permissions_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewOperationsCheckPermissionsParams creates a new OperationsCheckPermissionsParams object +// with the default values initialized. +func NewOperationsCheckPermissionsParams() *OperationsCheckPermissionsParams { + var () + return &OperationsCheckPermissionsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewOperationsCheckPermissionsParamsWithTimeout creates a new OperationsCheckPermissionsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewOperationsCheckPermissionsParamsWithTimeout(timeout time.Duration) *OperationsCheckPermissionsParams { + var () + return &OperationsCheckPermissionsParams{ + + timeout: timeout, + } +} + +// NewOperationsCheckPermissionsParamsWithContext creates a new OperationsCheckPermissionsParams object +// with the default values initialized, and the ability to set a context for a request +func NewOperationsCheckPermissionsParamsWithContext(ctx context.Context) *OperationsCheckPermissionsParams { + var () + return &OperationsCheckPermissionsParams{ + + Context: ctx, + } +} + +// NewOperationsCheckPermissionsParamsWithHTTPClient creates a new OperationsCheckPermissionsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewOperationsCheckPermissionsParamsWithHTTPClient(client *http.Client) *OperationsCheckPermissionsParams { + var () + return &OperationsCheckPermissionsParams{ + HTTPClient: client, + } +} + +/* +OperationsCheckPermissionsParams contains all the parameters to send to the API endpoint +for the operations check permissions operation typically these are written to a http.Request +*/ +type OperationsCheckPermissionsParams struct { + + /*RemotePath + Remote path + + */ + RemotePath *models.RemotePath + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the operations check permissions params +func (o *OperationsCheckPermissionsParams) WithTimeout(timeout time.Duration) *OperationsCheckPermissionsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the operations check permissions params +func (o *OperationsCheckPermissionsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the operations check permissions params +func (o *OperationsCheckPermissionsParams) WithContext(ctx context.Context) *OperationsCheckPermissionsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the operations check permissions params +func (o *OperationsCheckPermissionsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the operations check permissions params +func (o *OperationsCheckPermissionsParams) WithHTTPClient(client *http.Client) *OperationsCheckPermissionsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the operations check permissions params +func (o *OperationsCheckPermissionsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRemotePath adds the remotePath to the operations check permissions params +func (o *OperationsCheckPermissionsParams) WithRemotePath(remotePath *models.RemotePath) *OperationsCheckPermissionsParams { + o.SetRemotePath(remotePath) + return o +} + +// SetRemotePath adds the remotePath to the operations check permissions params +func (o *OperationsCheckPermissionsParams) SetRemotePath(remotePath *models.RemotePath) { + o.RemotePath = remotePath +} + +// WriteToRequest writes these params to a swagger request +func (o *OperationsCheckPermissionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.RemotePath != nil { + if err := r.SetBodyParam(o.RemotePath); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_check_permissions_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_check_permissions_responses.go new file mode 100644 index 00000000000..abaf73fb8a4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_check_permissions_responses.go @@ -0,0 +1,133 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// OperationsCheckPermissionsReader is a Reader for the OperationsCheckPermissions structure. +type OperationsCheckPermissionsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *OperationsCheckPermissionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewOperationsCheckPermissionsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewOperationsCheckPermissionsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewOperationsCheckPermissionsOK creates a OperationsCheckPermissionsOK with default headers values +func NewOperationsCheckPermissionsOK() *OperationsCheckPermissionsOK { + return &OperationsCheckPermissionsOK{} +} + +/* +OperationsCheckPermissionsOK handles this case with default header values. + +Empty object +*/ +type OperationsCheckPermissionsOK struct { + Payload interface{} + JobID int64 +} + +func (o *OperationsCheckPermissionsOK) GetPayload() interface{} { + return o.Payload +} + +func (o *OperationsCheckPermissionsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewOperationsCheckPermissionsDefault creates a OperationsCheckPermissionsDefault with default headers values +func NewOperationsCheckPermissionsDefault(code int) *OperationsCheckPermissionsDefault { + return &OperationsCheckPermissionsDefault{ + _statusCode: code, + } +} + +/* +OperationsCheckPermissionsDefault handles this case with default header values. + +Server error +*/ +type OperationsCheckPermissionsDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the operations check permissions default response +func (o *OperationsCheckPermissionsDefault) Code() int { + return o._statusCode +} + +func (o *OperationsCheckPermissionsDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *OperationsCheckPermissionsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *OperationsCheckPermissionsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_client.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_client.go new file mode 100644 index 00000000000..98a561c4daf --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_client.go @@ -0,0 +1,774 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new operations API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for operations API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientService is the interface for Client methods +type ClientService interface { + CoreBwlimit(params *CoreBwlimitParams) (*CoreBwlimitOK, error) + + CoreStatsDelete(params *CoreStatsDeleteParams) (*CoreStatsDeleteOK, error) + + CoreStatsReset(params *CoreStatsResetParams) (*CoreStatsResetOK, error) + + FreeOSMemory(params *FreeOSMemoryParams) (*FreeOSMemoryOK, error) + + JobInfo(params *JobInfoParams) (*JobInfoOK, error) + + JobProgress(params *JobProgressParams) (*JobProgressOK, error) + + JobStop(params *JobStopParams) (*JobStopOK, error) + + NodeInfo(params *NodeInfoParams) (*NodeInfoOK, error) + + OperationsAbout(params *OperationsAboutParams) (*OperationsAboutOK, error) + + OperationsCheckPermissions(params *OperationsCheckPermissionsParams) (*OperationsCheckPermissionsOK, error) + + OperationsCopyfile(params *OperationsCopyfileParams) (*OperationsCopyfileOK, error) + + OperationsDeletefile(params *OperationsDeletefileParams) (*OperationsDeletefileOK, error) + + OperationsFileInfo(params *OperationsFileInfoParams) (*OperationsFileInfoOK, error) + + OperationsList(params *OperationsListParams) (*OperationsListOK, error) + + OperationsMovefile(params *OperationsMovefileParams) (*OperationsMovefileOK, error) + + OperationsPurge(params *OperationsPurgeParams) (*OperationsPurgeOK, error) + + Reload(params *ReloadParams) (*ReloadOK, error) + + SyncCopyDir(params *SyncCopyDirParams) (*SyncCopyDirOK, error) + + SyncCopyPaths(params *SyncCopyPathsParams) (*SyncCopyPathsOK, error) + + SyncMoveDir(params *SyncMoveDirParams) (*SyncMoveDirOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +CoreBwlimit sets the bandwidth limit + +This sets the bandwidth limit to that passed in +*/ +func (a *Client) CoreBwlimit(params *CoreBwlimitParams) (*CoreBwlimitOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCoreBwlimitParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CoreBwlimit", + Method: "POST", + PathPattern: "/rclone/core/bwlimit", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CoreBwlimitReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CoreBwlimitOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CoreBwlimitDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CoreStatsDelete deletes specific stats group + +Delete stats +*/ +func (a *Client) CoreStatsDelete(params *CoreStatsDeleteParams) (*CoreStatsDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCoreStatsDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CoreStatsDelete", + Method: "POST", + PathPattern: "/rclone/core/stats-delete", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CoreStatsDeleteReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CoreStatsDeleteOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CoreStatsDeleteDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CoreStatsReset resets all or specific stats group + +Resets stats +*/ +func (a *Client) CoreStatsReset(params *CoreStatsResetParams) (*CoreStatsResetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCoreStatsResetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CoreStatsReset", + Method: "POST", + PathPattern: "/rclone/core/stats-reset", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CoreStatsResetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CoreStatsResetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CoreStatsResetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FreeOSMemory returns memory to o s + +Run debug.FreeOSMemory on the agent +*/ +func (a *Client) FreeOSMemory(params *FreeOSMemoryParams) (*FreeOSMemoryOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFreeOSMemoryParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "FreeOSMemory", + Method: "POST", + PathPattern: "/free_os_memory", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FreeOSMemoryReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FreeOSMemoryOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FreeOSMemoryDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +JobInfo transfers stats about the job + +Returns current, completed transfers and job stats +*/ +func (a *Client) JobInfo(params *JobInfoParams) (*JobInfoOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewJobInfoParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "JobInfo", + Method: "POST", + PathPattern: "/rclone/job/info", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &JobInfoReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*JobInfoOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*JobInfoDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +JobProgress returns aggregated job stats + +Returns aggregated job stats +*/ +func (a *Client) JobProgress(params *JobProgressParams) (*JobProgressOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewJobProgressParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "JobProgress", + Method: "POST", + PathPattern: "/rclone/job/progress", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &JobProgressReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*JobProgressOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*JobProgressDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +JobStop stops async job + +Stops job with provided ID +*/ +func (a *Client) JobStop(params *JobStopParams) (*JobStopOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewJobStopParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "JobStop", + Method: "POST", + PathPattern: "/rclone/job/stop", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &JobStopReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*JobStopOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*JobStopDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +NodeInfo gets information about scylla node + +Get information about Scylla node +*/ +func (a *Client) NodeInfo(params *NodeInfoParams) (*NodeInfoOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewNodeInfoParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "NodeInfo", + Method: "GET", + PathPattern: "/node_info", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &NodeInfoReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*NodeInfoOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*NodeInfoDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +OperationsAbout abouts remote + +Get usage information from the remote +*/ +func (a *Client) OperationsAbout(params *OperationsAboutParams) (*OperationsAboutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewOperationsAboutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "OperationsAbout", + Method: "POST", + PathPattern: "/rclone/operations/about", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &OperationsAboutReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*OperationsAboutOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*OperationsAboutDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +OperationsCheckPermissions checks fs + +Check if the fs is fully accessible +*/ +func (a *Client) OperationsCheckPermissions(params *OperationsCheckPermissionsParams) (*OperationsCheckPermissionsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewOperationsCheckPermissionsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "OperationsCheckPermissions", + Method: "POST", + PathPattern: "/rclone/operations/check-permissions", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &OperationsCheckPermissionsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*OperationsCheckPermissionsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*OperationsCheckPermissionsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +OperationsCopyfile copies a file + +Copy a file from source remote to destination remote +*/ +func (a *Client) OperationsCopyfile(params *OperationsCopyfileParams) (*OperationsCopyfileOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewOperationsCopyfileParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "OperationsCopyfile", + Method: "POST", + PathPattern: "/rclone/operations/copyfile", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &OperationsCopyfileReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*OperationsCopyfileOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*OperationsCopyfileDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +OperationsDeletefile deletes file + +Remove the single file pointed to +*/ +func (a *Client) OperationsDeletefile(params *OperationsDeletefileParams) (*OperationsDeletefileOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewOperationsDeletefileParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "OperationsDeletefile", + Method: "POST", + PathPattern: "/rclone/operations/deletefile", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &OperationsDeletefileReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*OperationsDeletefileOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*OperationsDeletefileDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +OperationsFileInfo objects info + +Get basic file information +*/ +func (a *Client) OperationsFileInfo(params *OperationsFileInfoParams) (*OperationsFileInfoOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewOperationsFileInfoParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "OperationsFileInfo", + Method: "POST", + PathPattern: "/rclone/operations/fileinfo", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &OperationsFileInfoReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*OperationsFileInfoOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*OperationsFileInfoDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +OperationsList lists remote + +List the given remote and path +*/ +func (a *Client) OperationsList(params *OperationsListParams) (*OperationsListOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewOperationsListParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "OperationsList", + Method: "POST", + PathPattern: "/rclone/operations/list", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &OperationsListReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*OperationsListOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*OperationsListDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +OperationsMovefile moves a file + +Move a file from source remote to destination remote +*/ +func (a *Client) OperationsMovefile(params *OperationsMovefileParams) (*OperationsMovefileOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewOperationsMovefileParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "OperationsMovefile", + Method: "POST", + PathPattern: "/rclone/operations/movefile", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &OperationsMovefileReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*OperationsMovefileOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*OperationsMovefileDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +OperationsPurge purges container + +Remove a directory or container and all of its contents +*/ +func (a *Client) OperationsPurge(params *OperationsPurgeParams) (*OperationsPurgeOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewOperationsPurgeParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "OperationsPurge", + Method: "POST", + PathPattern: "/rclone/operations/purge", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &OperationsPurgeReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*OperationsPurgeOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*OperationsPurgeDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +Reload reloads agent config + +Reload agent config +*/ +func (a *Client) Reload(params *ReloadParams) (*ReloadOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewReloadParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "Reload", + Method: "POST", + PathPattern: "/terminate", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ReloadReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ReloadOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ReloadDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SyncCopyDir copies dir contents to directory + +Copy contents from path on source fs to path on destination fs +*/ +func (a *Client) SyncCopyDir(params *SyncCopyDirParams) (*SyncCopyDirOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSyncCopyDirParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "SyncCopyDir", + Method: "POST", + PathPattern: "/rclone/sync/copydir", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SyncCopyDirReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*SyncCopyDirOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*SyncCopyDirDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SyncCopyPaths copies paths from fsrc remote src paths to fdst remote dst paths + +Copy provided list of paths from directory on source fs to directory on destination fs +*/ +func (a *Client) SyncCopyPaths(params *SyncCopyPathsParams) (*SyncCopyPathsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSyncCopyPathsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "SyncCopyPaths", + Method: "POST", + PathPattern: "/rclone/sync/copypaths", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SyncCopyPathsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*SyncCopyPathsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*SyncCopyPathsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SyncMoveDir moves dir contents to directory + +Move contents from path on source fs to path on destination fs +*/ +func (a *Client) SyncMoveDir(params *SyncMoveDirParams) (*SyncMoveDirOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSyncMoveDirParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "SyncMoveDir", + Method: "POST", + PathPattern: "/rclone/sync/movedir", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SyncMoveDirReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*SyncMoveDirOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*SyncMoveDirDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_copyfile_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_copyfile_parameters.go new file mode 100644 index 00000000000..0f931785d9b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_copyfile_parameters.go @@ -0,0 +1,164 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewOperationsCopyfileParams creates a new OperationsCopyfileParams object +// with the default values initialized. +func NewOperationsCopyfileParams() *OperationsCopyfileParams { + var () + return &OperationsCopyfileParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewOperationsCopyfileParamsWithTimeout creates a new OperationsCopyfileParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewOperationsCopyfileParamsWithTimeout(timeout time.Duration) *OperationsCopyfileParams { + var () + return &OperationsCopyfileParams{ + + timeout: timeout, + } +} + +// NewOperationsCopyfileParamsWithContext creates a new OperationsCopyfileParams object +// with the default values initialized, and the ability to set a context for a request +func NewOperationsCopyfileParamsWithContext(ctx context.Context) *OperationsCopyfileParams { + var () + return &OperationsCopyfileParams{ + + Context: ctx, + } +} + +// NewOperationsCopyfileParamsWithHTTPClient creates a new OperationsCopyfileParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewOperationsCopyfileParamsWithHTTPClient(client *http.Client) *OperationsCopyfileParams { + var () + return &OperationsCopyfileParams{ + HTTPClient: client, + } +} + +/* +OperationsCopyfileParams contains all the parameters to send to the API endpoint +for the operations copyfile operation typically these are written to a http.Request +*/ +type OperationsCopyfileParams struct { + + /*Options + Options + + */ + Options *models.MoveOrCopyFileOptions + /*Group + Place this operation under this stat group + + */ + Group string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the operations copyfile params +func (o *OperationsCopyfileParams) WithTimeout(timeout time.Duration) *OperationsCopyfileParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the operations copyfile params +func (o *OperationsCopyfileParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the operations copyfile params +func (o *OperationsCopyfileParams) WithContext(ctx context.Context) *OperationsCopyfileParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the operations copyfile params +func (o *OperationsCopyfileParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the operations copyfile params +func (o *OperationsCopyfileParams) WithHTTPClient(client *http.Client) *OperationsCopyfileParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the operations copyfile params +func (o *OperationsCopyfileParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithOptions adds the options to the operations copyfile params +func (o *OperationsCopyfileParams) WithOptions(options *models.MoveOrCopyFileOptions) *OperationsCopyfileParams { + o.SetOptions(options) + return o +} + +// SetOptions adds the options to the operations copyfile params +func (o *OperationsCopyfileParams) SetOptions(options *models.MoveOrCopyFileOptions) { + o.Options = options +} + +// WithGroup adds the group to the operations copyfile params +func (o *OperationsCopyfileParams) WithGroup(group string) *OperationsCopyfileParams { + o.SetGroup(group) + return o +} + +// SetGroup adds the group to the operations copyfile params +func (o *OperationsCopyfileParams) SetGroup(group string) { + o.Group = group +} + +// WriteToRequest writes these params to a swagger request +func (o *OperationsCopyfileParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Options != nil { + if err := r.SetBodyParam(o.Options); err != nil { + return err + } + } + + // query param _group + qrGroup := o.Group + qGroup := qrGroup + if qGroup != "" { + if err := r.SetQueryParam("_group", qGroup); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_copyfile_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_copyfile_responses.go new file mode 100644 index 00000000000..188278a84d3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_copyfile_responses.go @@ -0,0 +1,123 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// OperationsCopyfileReader is a Reader for the OperationsCopyfile structure. +type OperationsCopyfileReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *OperationsCopyfileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewOperationsCopyfileOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewOperationsCopyfileDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewOperationsCopyfileOK creates a OperationsCopyfileOK with default headers values +func NewOperationsCopyfileOK() *OperationsCopyfileOK { + return &OperationsCopyfileOK{} +} + +/* +OperationsCopyfileOK handles this case with default header values. + +Empty object +*/ +type OperationsCopyfileOK struct { + JobID int64 +} + +func (o *OperationsCopyfileOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewOperationsCopyfileDefault creates a OperationsCopyfileDefault with default headers values +func NewOperationsCopyfileDefault(code int) *OperationsCopyfileDefault { + return &OperationsCopyfileDefault{ + _statusCode: code, + } +} + +/* +OperationsCopyfileDefault handles this case with default header values. + +Server error +*/ +type OperationsCopyfileDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the operations copyfile default response +func (o *OperationsCopyfileDefault) Code() int { + return o._statusCode +} + +func (o *OperationsCopyfileDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *OperationsCopyfileDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *OperationsCopyfileDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_deletefile_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_deletefile_parameters.go new file mode 100644 index 00000000000..587e30092df --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_deletefile_parameters.go @@ -0,0 +1,202 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewOperationsDeletefileParams creates a new OperationsDeletefileParams object +// with the default values initialized. +func NewOperationsDeletefileParams() *OperationsDeletefileParams { + var ( + asyncDefault = bool(true) + ) + return &OperationsDeletefileParams{ + Async: asyncDefault, + + timeout: cr.DefaultTimeout, + } +} + +// NewOperationsDeletefileParamsWithTimeout creates a new OperationsDeletefileParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewOperationsDeletefileParamsWithTimeout(timeout time.Duration) *OperationsDeletefileParams { + var ( + asyncDefault = bool(true) + ) + return &OperationsDeletefileParams{ + Async: asyncDefault, + + timeout: timeout, + } +} + +// NewOperationsDeletefileParamsWithContext creates a new OperationsDeletefileParams object +// with the default values initialized, and the ability to set a context for a request +func NewOperationsDeletefileParamsWithContext(ctx context.Context) *OperationsDeletefileParams { + var ( + asyncDefault = bool(true) + ) + return &OperationsDeletefileParams{ + Async: asyncDefault, + + Context: ctx, + } +} + +// NewOperationsDeletefileParamsWithHTTPClient creates a new OperationsDeletefileParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewOperationsDeletefileParamsWithHTTPClient(client *http.Client) *OperationsDeletefileParams { + var ( + asyncDefault = bool(true) + ) + return &OperationsDeletefileParams{ + Async: asyncDefault, + HTTPClient: client, + } +} + +/* +OperationsDeletefileParams contains all the parameters to send to the API endpoint +for the operations deletefile operation typically these are written to a http.Request +*/ +type OperationsDeletefileParams struct { + + /*RemotePath + Remote path + + */ + RemotePath *models.RemotePath + /*Async + Async request + + */ + Async bool + /*Group + Place this operation under this stat group + + */ + Group string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the operations deletefile params +func (o *OperationsDeletefileParams) WithTimeout(timeout time.Duration) *OperationsDeletefileParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the operations deletefile params +func (o *OperationsDeletefileParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the operations deletefile params +func (o *OperationsDeletefileParams) WithContext(ctx context.Context) *OperationsDeletefileParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the operations deletefile params +func (o *OperationsDeletefileParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the operations deletefile params +func (o *OperationsDeletefileParams) WithHTTPClient(client *http.Client) *OperationsDeletefileParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the operations deletefile params +func (o *OperationsDeletefileParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRemotePath adds the remotePath to the operations deletefile params +func (o *OperationsDeletefileParams) WithRemotePath(remotePath *models.RemotePath) *OperationsDeletefileParams { + o.SetRemotePath(remotePath) + return o +} + +// SetRemotePath adds the remotePath to the operations deletefile params +func (o *OperationsDeletefileParams) SetRemotePath(remotePath *models.RemotePath) { + o.RemotePath = remotePath +} + +// WithAsync adds the async to the operations deletefile params +func (o *OperationsDeletefileParams) WithAsync(async bool) *OperationsDeletefileParams { + o.SetAsync(async) + return o +} + +// SetAsync adds the async to the operations deletefile params +func (o *OperationsDeletefileParams) SetAsync(async bool) { + o.Async = async +} + +// WithGroup adds the group to the operations deletefile params +func (o *OperationsDeletefileParams) WithGroup(group string) *OperationsDeletefileParams { + o.SetGroup(group) + return o +} + +// SetGroup adds the group to the operations deletefile params +func (o *OperationsDeletefileParams) SetGroup(group string) { + o.Group = group +} + +// WriteToRequest writes these params to a swagger request +func (o *OperationsDeletefileParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.RemotePath != nil { + if err := r.SetBodyParam(o.RemotePath); err != nil { + return err + } + } + + // query param _async + qrAsync := o.Async + qAsync := swag.FormatBool(qrAsync) + if qAsync != "" { + if err := r.SetQueryParam("_async", qAsync); err != nil { + return err + } + } + + // query param _group + qrGroup := o.Group + qGroup := qrGroup + if qGroup != "" { + if err := r.SetQueryParam("_group", qGroup); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_deletefile_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_deletefile_responses.go new file mode 100644 index 00000000000..707fd78c65d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_deletefile_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// OperationsDeletefileReader is a Reader for the OperationsDeletefile structure. +type OperationsDeletefileReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *OperationsDeletefileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewOperationsDeletefileOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewOperationsDeletefileDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewOperationsDeletefileOK creates a OperationsDeletefileOK with default headers values +func NewOperationsDeletefileOK() *OperationsDeletefileOK { + return &OperationsDeletefileOK{} +} + +/* +OperationsDeletefileOK handles this case with default header values. + +Job ID +*/ +type OperationsDeletefileOK struct { + Payload *models.Jobid + JobID int64 +} + +func (o *OperationsDeletefileOK) GetPayload() *models.Jobid { + return o.Payload +} + +func (o *OperationsDeletefileOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Jobid) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewOperationsDeletefileDefault creates a OperationsDeletefileDefault with default headers values +func NewOperationsDeletefileDefault(code int) *OperationsDeletefileDefault { + return &OperationsDeletefileDefault{ + _statusCode: code, + } +} + +/* +OperationsDeletefileDefault handles this case with default header values. + +Server error +*/ +type OperationsDeletefileDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the operations deletefile default response +func (o *OperationsDeletefileDefault) Code() int { + return o._statusCode +} + +func (o *OperationsDeletefileDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *OperationsDeletefileDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *OperationsDeletefileDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_file_info_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_file_info_parameters.go new file mode 100644 index 00000000000..d77627e800f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_file_info_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewOperationsFileInfoParams creates a new OperationsFileInfoParams object +// with the default values initialized. +func NewOperationsFileInfoParams() *OperationsFileInfoParams { + var () + return &OperationsFileInfoParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewOperationsFileInfoParamsWithTimeout creates a new OperationsFileInfoParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewOperationsFileInfoParamsWithTimeout(timeout time.Duration) *OperationsFileInfoParams { + var () + return &OperationsFileInfoParams{ + + timeout: timeout, + } +} + +// NewOperationsFileInfoParamsWithContext creates a new OperationsFileInfoParams object +// with the default values initialized, and the ability to set a context for a request +func NewOperationsFileInfoParamsWithContext(ctx context.Context) *OperationsFileInfoParams { + var () + return &OperationsFileInfoParams{ + + Context: ctx, + } +} + +// NewOperationsFileInfoParamsWithHTTPClient creates a new OperationsFileInfoParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewOperationsFileInfoParamsWithHTTPClient(client *http.Client) *OperationsFileInfoParams { + var () + return &OperationsFileInfoParams{ + HTTPClient: client, + } +} + +/* +OperationsFileInfoParams contains all the parameters to send to the API endpoint +for the operations file info operation typically these are written to a http.Request +*/ +type OperationsFileInfoParams struct { + + /*RemotePath + Remote path + + */ + RemotePath *models.RemotePath + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the operations file info params +func (o *OperationsFileInfoParams) WithTimeout(timeout time.Duration) *OperationsFileInfoParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the operations file info params +func (o *OperationsFileInfoParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the operations file info params +func (o *OperationsFileInfoParams) WithContext(ctx context.Context) *OperationsFileInfoParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the operations file info params +func (o *OperationsFileInfoParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the operations file info params +func (o *OperationsFileInfoParams) WithHTTPClient(client *http.Client) *OperationsFileInfoParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the operations file info params +func (o *OperationsFileInfoParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRemotePath adds the remotePath to the operations file info params +func (o *OperationsFileInfoParams) WithRemotePath(remotePath *models.RemotePath) *OperationsFileInfoParams { + o.SetRemotePath(remotePath) + return o +} + +// SetRemotePath adds the remotePath to the operations file info params +func (o *OperationsFileInfoParams) SetRemotePath(remotePath *models.RemotePath) { + o.RemotePath = remotePath +} + +// WriteToRequest writes these params to a swagger request +func (o *OperationsFileInfoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.RemotePath != nil { + if err := r.SetBodyParam(o.RemotePath); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_file_info_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_file_info_responses.go new file mode 100644 index 00000000000..ae52cf8a70a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_file_info_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// OperationsFileInfoReader is a Reader for the OperationsFileInfo structure. +type OperationsFileInfoReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *OperationsFileInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewOperationsFileInfoOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewOperationsFileInfoDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewOperationsFileInfoOK creates a OperationsFileInfoOK with default headers values +func NewOperationsFileInfoOK() *OperationsFileInfoOK { + return &OperationsFileInfoOK{} +} + +/* +OperationsFileInfoOK handles this case with default header values. + +File information +*/ +type OperationsFileInfoOK struct { + Payload *models.FileInfo + JobID int64 +} + +func (o *OperationsFileInfoOK) GetPayload() *models.FileInfo { + return o.Payload +} + +func (o *OperationsFileInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.FileInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewOperationsFileInfoDefault creates a OperationsFileInfoDefault with default headers values +func NewOperationsFileInfoDefault(code int) *OperationsFileInfoDefault { + return &OperationsFileInfoDefault{ + _statusCode: code, + } +} + +/* +OperationsFileInfoDefault handles this case with default header values. + +Server error +*/ +type OperationsFileInfoDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the operations file info default response +func (o *OperationsFileInfoDefault) Code() int { + return o._statusCode +} + +func (o *OperationsFileInfoDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *OperationsFileInfoDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *OperationsFileInfoDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_list_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_list_parameters.go new file mode 100644 index 00000000000..cd77d393770 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_list_parameters.go @@ -0,0 +1,139 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewOperationsListParams creates a new OperationsListParams object +// with the default values initialized. +func NewOperationsListParams() *OperationsListParams { + var () + return &OperationsListParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewOperationsListParamsWithTimeout creates a new OperationsListParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewOperationsListParamsWithTimeout(timeout time.Duration) *OperationsListParams { + var () + return &OperationsListParams{ + + timeout: timeout, + } +} + +// NewOperationsListParamsWithContext creates a new OperationsListParams object +// with the default values initialized, and the ability to set a context for a request +func NewOperationsListParamsWithContext(ctx context.Context) *OperationsListParams { + var () + return &OperationsListParams{ + + Context: ctx, + } +} + +// NewOperationsListParamsWithHTTPClient creates a new OperationsListParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewOperationsListParamsWithHTTPClient(client *http.Client) *OperationsListParams { + var () + return &OperationsListParams{ + HTTPClient: client, + } +} + +/* +OperationsListParams contains all the parameters to send to the API endpoint +for the operations list operation typically these are written to a http.Request +*/ +type OperationsListParams struct { + + /*ListOpts + listOpts + + */ + ListOpts *models.ListOptions + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the operations list params +func (o *OperationsListParams) WithTimeout(timeout time.Duration) *OperationsListParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the operations list params +func (o *OperationsListParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the operations list params +func (o *OperationsListParams) WithContext(ctx context.Context) *OperationsListParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the operations list params +func (o *OperationsListParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the operations list params +func (o *OperationsListParams) WithHTTPClient(client *http.Client) *OperationsListParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the operations list params +func (o *OperationsListParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithListOpts adds the listOpts to the operations list params +func (o *OperationsListParams) WithListOpts(listOpts *models.ListOptions) *OperationsListParams { + o.SetListOpts(listOpts) + return o +} + +// SetListOpts adds the listOpts to the operations list params +func (o *OperationsListParams) SetListOpts(listOpts *models.ListOptions) { + o.ListOpts = listOpts +} + +// WriteToRequest writes these params to a swagger request +func (o *OperationsListParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.ListOpts != nil { + if err := r.SetBodyParam(o.ListOpts); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_list_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_list_responses.go new file mode 100644 index 00000000000..5ed3ca3a55e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_list_responses.go @@ -0,0 +1,204 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// OperationsListReader is a Reader for the OperationsList structure. +type OperationsListReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *OperationsListReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewOperationsListOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewOperationsListDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewOperationsListOK creates a OperationsListOK with default headers values +func NewOperationsListOK() *OperationsListOK { + return &OperationsListOK{} +} + +/* +OperationsListOK handles this case with default header values. + +List of items +*/ +type OperationsListOK struct { + Payload *OperationsListOKBody + JobID int64 +} + +func (o *OperationsListOK) GetPayload() *OperationsListOKBody { + return o.Payload +} + +func (o *OperationsListOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(OperationsListOKBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewOperationsListDefault creates a OperationsListDefault with default headers values +func NewOperationsListDefault(code int) *OperationsListDefault { + return &OperationsListDefault{ + _statusCode: code, + } +} + +/* +OperationsListDefault handles this case with default header values. + +Server error +*/ +type OperationsListDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the operations list default response +func (o *OperationsListDefault) Code() int { + return o._statusCode +} + +func (o *OperationsListDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *OperationsListDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *OperationsListDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} + +/* +OperationsListOKBody operations list o k body +swagger:model OperationsListOKBody +*/ +type OperationsListOKBody struct { + + // list + List []*models.ListItem `json:"list"` +} + +// Validate validates this operations list o k body +func (o *OperationsListOKBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateList(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *OperationsListOKBody) validateList(formats strfmt.Registry) error { + + if swag.IsZero(o.List) { // not required + return nil + } + + for i := 0; i < len(o.List); i++ { + if swag.IsZero(o.List[i]) { // not required + continue + } + + if o.List[i] != nil { + if err := o.List[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("operationsListOK" + "." + "list" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (o *OperationsListOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *OperationsListOKBody) UnmarshalBinary(b []byte) error { + var res OperationsListOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_movefile_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_movefile_parameters.go new file mode 100644 index 00000000000..8c412feb285 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_movefile_parameters.go @@ -0,0 +1,164 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewOperationsMovefileParams creates a new OperationsMovefileParams object +// with the default values initialized. +func NewOperationsMovefileParams() *OperationsMovefileParams { + var () + return &OperationsMovefileParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewOperationsMovefileParamsWithTimeout creates a new OperationsMovefileParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewOperationsMovefileParamsWithTimeout(timeout time.Duration) *OperationsMovefileParams { + var () + return &OperationsMovefileParams{ + + timeout: timeout, + } +} + +// NewOperationsMovefileParamsWithContext creates a new OperationsMovefileParams object +// with the default values initialized, and the ability to set a context for a request +func NewOperationsMovefileParamsWithContext(ctx context.Context) *OperationsMovefileParams { + var () + return &OperationsMovefileParams{ + + Context: ctx, + } +} + +// NewOperationsMovefileParamsWithHTTPClient creates a new OperationsMovefileParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewOperationsMovefileParamsWithHTTPClient(client *http.Client) *OperationsMovefileParams { + var () + return &OperationsMovefileParams{ + HTTPClient: client, + } +} + +/* +OperationsMovefileParams contains all the parameters to send to the API endpoint +for the operations movefile operation typically these are written to a http.Request +*/ +type OperationsMovefileParams struct { + + /*Options + Options + + */ + Options *models.MoveOrCopyFileOptions + /*Group + Place this operation under this stat group + + */ + Group string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the operations movefile params +func (o *OperationsMovefileParams) WithTimeout(timeout time.Duration) *OperationsMovefileParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the operations movefile params +func (o *OperationsMovefileParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the operations movefile params +func (o *OperationsMovefileParams) WithContext(ctx context.Context) *OperationsMovefileParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the operations movefile params +func (o *OperationsMovefileParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the operations movefile params +func (o *OperationsMovefileParams) WithHTTPClient(client *http.Client) *OperationsMovefileParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the operations movefile params +func (o *OperationsMovefileParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithOptions adds the options to the operations movefile params +func (o *OperationsMovefileParams) WithOptions(options *models.MoveOrCopyFileOptions) *OperationsMovefileParams { + o.SetOptions(options) + return o +} + +// SetOptions adds the options to the operations movefile params +func (o *OperationsMovefileParams) SetOptions(options *models.MoveOrCopyFileOptions) { + o.Options = options +} + +// WithGroup adds the group to the operations movefile params +func (o *OperationsMovefileParams) WithGroup(group string) *OperationsMovefileParams { + o.SetGroup(group) + return o +} + +// SetGroup adds the group to the operations movefile params +func (o *OperationsMovefileParams) SetGroup(group string) { + o.Group = group +} + +// WriteToRequest writes these params to a swagger request +func (o *OperationsMovefileParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Options != nil { + if err := r.SetBodyParam(o.Options); err != nil { + return err + } + } + + // query param _group + qrGroup := o.Group + qGroup := qrGroup + if qGroup != "" { + if err := r.SetQueryParam("_group", qGroup); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_movefile_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_movefile_responses.go new file mode 100644 index 00000000000..1ad635d7d36 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_movefile_responses.go @@ -0,0 +1,123 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// OperationsMovefileReader is a Reader for the OperationsMovefile structure. +type OperationsMovefileReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *OperationsMovefileReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewOperationsMovefileOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewOperationsMovefileDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewOperationsMovefileOK creates a OperationsMovefileOK with default headers values +func NewOperationsMovefileOK() *OperationsMovefileOK { + return &OperationsMovefileOK{} +} + +/* +OperationsMovefileOK handles this case with default header values. + +Empty object +*/ +type OperationsMovefileOK struct { + JobID int64 +} + +func (o *OperationsMovefileOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewOperationsMovefileDefault creates a OperationsMovefileDefault with default headers values +func NewOperationsMovefileDefault(code int) *OperationsMovefileDefault { + return &OperationsMovefileDefault{ + _statusCode: code, + } +} + +/* +OperationsMovefileDefault handles this case with default header values. + +Server error +*/ +type OperationsMovefileDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the operations movefile default response +func (o *OperationsMovefileDefault) Code() int { + return o._statusCode +} + +func (o *OperationsMovefileDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *OperationsMovefileDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *OperationsMovefileDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_purge_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_purge_parameters.go new file mode 100644 index 00000000000..7de11ad494f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_purge_parameters.go @@ -0,0 +1,202 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewOperationsPurgeParams creates a new OperationsPurgeParams object +// with the default values initialized. +func NewOperationsPurgeParams() *OperationsPurgeParams { + var ( + asyncDefault = bool(true) + ) + return &OperationsPurgeParams{ + Async: asyncDefault, + + timeout: cr.DefaultTimeout, + } +} + +// NewOperationsPurgeParamsWithTimeout creates a new OperationsPurgeParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewOperationsPurgeParamsWithTimeout(timeout time.Duration) *OperationsPurgeParams { + var ( + asyncDefault = bool(true) + ) + return &OperationsPurgeParams{ + Async: asyncDefault, + + timeout: timeout, + } +} + +// NewOperationsPurgeParamsWithContext creates a new OperationsPurgeParams object +// with the default values initialized, and the ability to set a context for a request +func NewOperationsPurgeParamsWithContext(ctx context.Context) *OperationsPurgeParams { + var ( + asyncDefault = bool(true) + ) + return &OperationsPurgeParams{ + Async: asyncDefault, + + Context: ctx, + } +} + +// NewOperationsPurgeParamsWithHTTPClient creates a new OperationsPurgeParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewOperationsPurgeParamsWithHTTPClient(client *http.Client) *OperationsPurgeParams { + var ( + asyncDefault = bool(true) + ) + return &OperationsPurgeParams{ + Async: asyncDefault, + HTTPClient: client, + } +} + +/* +OperationsPurgeParams contains all the parameters to send to the API endpoint +for the operations purge operation typically these are written to a http.Request +*/ +type OperationsPurgeParams struct { + + /*RemotePath + Remote path + + */ + RemotePath *models.RemotePath + /*Async + Async request + + */ + Async bool + /*Group + Place this operation under this stat group + + */ + Group string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the operations purge params +func (o *OperationsPurgeParams) WithTimeout(timeout time.Duration) *OperationsPurgeParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the operations purge params +func (o *OperationsPurgeParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the operations purge params +func (o *OperationsPurgeParams) WithContext(ctx context.Context) *OperationsPurgeParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the operations purge params +func (o *OperationsPurgeParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the operations purge params +func (o *OperationsPurgeParams) WithHTTPClient(client *http.Client) *OperationsPurgeParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the operations purge params +func (o *OperationsPurgeParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRemotePath adds the remotePath to the operations purge params +func (o *OperationsPurgeParams) WithRemotePath(remotePath *models.RemotePath) *OperationsPurgeParams { + o.SetRemotePath(remotePath) + return o +} + +// SetRemotePath adds the remotePath to the operations purge params +func (o *OperationsPurgeParams) SetRemotePath(remotePath *models.RemotePath) { + o.RemotePath = remotePath +} + +// WithAsync adds the async to the operations purge params +func (o *OperationsPurgeParams) WithAsync(async bool) *OperationsPurgeParams { + o.SetAsync(async) + return o +} + +// SetAsync adds the async to the operations purge params +func (o *OperationsPurgeParams) SetAsync(async bool) { + o.Async = async +} + +// WithGroup adds the group to the operations purge params +func (o *OperationsPurgeParams) WithGroup(group string) *OperationsPurgeParams { + o.SetGroup(group) + return o +} + +// SetGroup adds the group to the operations purge params +func (o *OperationsPurgeParams) SetGroup(group string) { + o.Group = group +} + +// WriteToRequest writes these params to a swagger request +func (o *OperationsPurgeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.RemotePath != nil { + if err := r.SetBodyParam(o.RemotePath); err != nil { + return err + } + } + + // query param _async + qrAsync := o.Async + qAsync := swag.FormatBool(qrAsync) + if qAsync != "" { + if err := r.SetQueryParam("_async", qAsync); err != nil { + return err + } + } + + // query param _group + qrGroup := o.Group + qGroup := qrGroup + if qGroup != "" { + if err := r.SetQueryParam("_group", qGroup); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_purge_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_purge_responses.go new file mode 100644 index 00000000000..3ff317064d7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/operations_purge_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// OperationsPurgeReader is a Reader for the OperationsPurge structure. +type OperationsPurgeReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *OperationsPurgeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewOperationsPurgeOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewOperationsPurgeDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewOperationsPurgeOK creates a OperationsPurgeOK with default headers values +func NewOperationsPurgeOK() *OperationsPurgeOK { + return &OperationsPurgeOK{} +} + +/* +OperationsPurgeOK handles this case with default header values. + +Job ID +*/ +type OperationsPurgeOK struct { + Payload *models.Jobid + JobID int64 +} + +func (o *OperationsPurgeOK) GetPayload() *models.Jobid { + return o.Payload +} + +func (o *OperationsPurgeOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Jobid) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewOperationsPurgeDefault creates a OperationsPurgeDefault with default headers values +func NewOperationsPurgeDefault(code int) *OperationsPurgeDefault { + return &OperationsPurgeDefault{ + _statusCode: code, + } +} + +/* +OperationsPurgeDefault handles this case with default header values. + +Server error +*/ +type OperationsPurgeDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the operations purge default response +func (o *OperationsPurgeDefault) Code() int { + return o._statusCode +} + +func (o *OperationsPurgeDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *OperationsPurgeDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *OperationsPurgeDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/reload_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/reload_parameters.go new file mode 100644 index 00000000000..90f806b23d3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/reload_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewReloadParams creates a new ReloadParams object +// with the default values initialized. +func NewReloadParams() *ReloadParams { + + return &ReloadParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewReloadParamsWithTimeout creates a new ReloadParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewReloadParamsWithTimeout(timeout time.Duration) *ReloadParams { + + return &ReloadParams{ + + timeout: timeout, + } +} + +// NewReloadParamsWithContext creates a new ReloadParams object +// with the default values initialized, and the ability to set a context for a request +func NewReloadParamsWithContext(ctx context.Context) *ReloadParams { + + return &ReloadParams{ + + Context: ctx, + } +} + +// NewReloadParamsWithHTTPClient creates a new ReloadParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewReloadParamsWithHTTPClient(client *http.Client) *ReloadParams { + + return &ReloadParams{ + HTTPClient: client, + } +} + +/* +ReloadParams contains all the parameters to send to the API endpoint +for the reload operation typically these are written to a http.Request +*/ +type ReloadParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the reload params +func (o *ReloadParams) WithTimeout(timeout time.Duration) *ReloadParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the reload params +func (o *ReloadParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the reload params +func (o *ReloadParams) WithContext(ctx context.Context) *ReloadParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the reload params +func (o *ReloadParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the reload params +func (o *ReloadParams) WithHTTPClient(client *http.Client) *ReloadParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the reload params +func (o *ReloadParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ReloadParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/reload_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/reload_responses.go new file mode 100644 index 00000000000..41e3e2646a7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/reload_responses.go @@ -0,0 +1,133 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// ReloadReader is a Reader for the Reload structure. +type ReloadReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ReloadReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewReloadOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewReloadDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewReloadOK creates a ReloadOK with default headers values +func NewReloadOK() *ReloadOK { + return &ReloadOK{} +} + +/* +ReloadOK handles this case with default header values. + +Empty object +*/ +type ReloadOK struct { + Payload interface{} + JobID int64 +} + +func (o *ReloadOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ReloadOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewReloadDefault creates a ReloadDefault with default headers values +func NewReloadDefault(code int) *ReloadDefault { + return &ReloadDefault{ + _statusCode: code, + } +} + +/* +ReloadDefault handles this case with default header values. + +Server error +*/ +type ReloadDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the reload default response +func (o *ReloadDefault) Code() int { + return o._statusCode +} + +func (o *ReloadDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *ReloadDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *ReloadDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_dir_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_dir_parameters.go new file mode 100644 index 00000000000..638f12721ab --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_dir_parameters.go @@ -0,0 +1,202 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewSyncCopyDirParams creates a new SyncCopyDirParams object +// with the default values initialized. +func NewSyncCopyDirParams() *SyncCopyDirParams { + var ( + asyncDefault = bool(true) + ) + return &SyncCopyDirParams{ + Async: asyncDefault, + + timeout: cr.DefaultTimeout, + } +} + +// NewSyncCopyDirParamsWithTimeout creates a new SyncCopyDirParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewSyncCopyDirParamsWithTimeout(timeout time.Duration) *SyncCopyDirParams { + var ( + asyncDefault = bool(true) + ) + return &SyncCopyDirParams{ + Async: asyncDefault, + + timeout: timeout, + } +} + +// NewSyncCopyDirParamsWithContext creates a new SyncCopyDirParams object +// with the default values initialized, and the ability to set a context for a request +func NewSyncCopyDirParamsWithContext(ctx context.Context) *SyncCopyDirParams { + var ( + asyncDefault = bool(true) + ) + return &SyncCopyDirParams{ + Async: asyncDefault, + + Context: ctx, + } +} + +// NewSyncCopyDirParamsWithHTTPClient creates a new SyncCopyDirParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewSyncCopyDirParamsWithHTTPClient(client *http.Client) *SyncCopyDirParams { + var ( + asyncDefault = bool(true) + ) + return &SyncCopyDirParams{ + Async: asyncDefault, + HTTPClient: client, + } +} + +/* +SyncCopyDirParams contains all the parameters to send to the API endpoint +for the sync copy dir operation typically these are written to a http.Request +*/ +type SyncCopyDirParams struct { + + /*Options + Options + + */ + Options *models.MoveOrCopyFileOptions + /*Async + Async request + + */ + Async bool + /*Group + Place this operation under this stat group + + */ + Group string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the sync copy dir params +func (o *SyncCopyDirParams) WithTimeout(timeout time.Duration) *SyncCopyDirParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the sync copy dir params +func (o *SyncCopyDirParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the sync copy dir params +func (o *SyncCopyDirParams) WithContext(ctx context.Context) *SyncCopyDirParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the sync copy dir params +func (o *SyncCopyDirParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the sync copy dir params +func (o *SyncCopyDirParams) WithHTTPClient(client *http.Client) *SyncCopyDirParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the sync copy dir params +func (o *SyncCopyDirParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithOptions adds the options to the sync copy dir params +func (o *SyncCopyDirParams) WithOptions(options *models.MoveOrCopyFileOptions) *SyncCopyDirParams { + o.SetOptions(options) + return o +} + +// SetOptions adds the options to the sync copy dir params +func (o *SyncCopyDirParams) SetOptions(options *models.MoveOrCopyFileOptions) { + o.Options = options +} + +// WithAsync adds the async to the sync copy dir params +func (o *SyncCopyDirParams) WithAsync(async bool) *SyncCopyDirParams { + o.SetAsync(async) + return o +} + +// SetAsync adds the async to the sync copy dir params +func (o *SyncCopyDirParams) SetAsync(async bool) { + o.Async = async +} + +// WithGroup adds the group to the sync copy dir params +func (o *SyncCopyDirParams) WithGroup(group string) *SyncCopyDirParams { + o.SetGroup(group) + return o +} + +// SetGroup adds the group to the sync copy dir params +func (o *SyncCopyDirParams) SetGroup(group string) { + o.Group = group +} + +// WriteToRequest writes these params to a swagger request +func (o *SyncCopyDirParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Options != nil { + if err := r.SetBodyParam(o.Options); err != nil { + return err + } + } + + // query param _async + qrAsync := o.Async + qAsync := swag.FormatBool(qrAsync) + if qAsync != "" { + if err := r.SetQueryParam("_async", qAsync); err != nil { + return err + } + } + + // query param _group + qrGroup := o.Group + qGroup := qrGroup + if qGroup != "" { + if err := r.SetQueryParam("_group", qGroup); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_dir_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_dir_responses.go new file mode 100644 index 00000000000..c262b000496 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_dir_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// SyncCopyDirReader is a Reader for the SyncCopyDir structure. +type SyncCopyDirReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SyncCopyDirReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSyncCopyDirOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewSyncCopyDirDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSyncCopyDirOK creates a SyncCopyDirOK with default headers values +func NewSyncCopyDirOK() *SyncCopyDirOK { + return &SyncCopyDirOK{} +} + +/* +SyncCopyDirOK handles this case with default header values. + +Job ID +*/ +type SyncCopyDirOK struct { + Payload *models.Jobid + JobID int64 +} + +func (o *SyncCopyDirOK) GetPayload() *models.Jobid { + return o.Payload +} + +func (o *SyncCopyDirOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Jobid) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewSyncCopyDirDefault creates a SyncCopyDirDefault with default headers values +func NewSyncCopyDirDefault(code int) *SyncCopyDirDefault { + return &SyncCopyDirDefault{ + _statusCode: code, + } +} + +/* +SyncCopyDirDefault handles this case with default header values. + +Server error +*/ +type SyncCopyDirDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the sync copy dir default response +func (o *SyncCopyDirDefault) Code() int { + return o._statusCode +} + +func (o *SyncCopyDirDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SyncCopyDirDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *SyncCopyDirDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_paths_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_paths_parameters.go new file mode 100644 index 00000000000..20ec989c2cf --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_paths_parameters.go @@ -0,0 +1,202 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewSyncCopyPathsParams creates a new SyncCopyPathsParams object +// with the default values initialized. +func NewSyncCopyPathsParams() *SyncCopyPathsParams { + var ( + asyncDefault = bool(true) + ) + return &SyncCopyPathsParams{ + Async: asyncDefault, + + timeout: cr.DefaultTimeout, + } +} + +// NewSyncCopyPathsParamsWithTimeout creates a new SyncCopyPathsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewSyncCopyPathsParamsWithTimeout(timeout time.Duration) *SyncCopyPathsParams { + var ( + asyncDefault = bool(true) + ) + return &SyncCopyPathsParams{ + Async: asyncDefault, + + timeout: timeout, + } +} + +// NewSyncCopyPathsParamsWithContext creates a new SyncCopyPathsParams object +// with the default values initialized, and the ability to set a context for a request +func NewSyncCopyPathsParamsWithContext(ctx context.Context) *SyncCopyPathsParams { + var ( + asyncDefault = bool(true) + ) + return &SyncCopyPathsParams{ + Async: asyncDefault, + + Context: ctx, + } +} + +// NewSyncCopyPathsParamsWithHTTPClient creates a new SyncCopyPathsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewSyncCopyPathsParamsWithHTTPClient(client *http.Client) *SyncCopyPathsParams { + var ( + asyncDefault = bool(true) + ) + return &SyncCopyPathsParams{ + Async: asyncDefault, + HTTPClient: client, + } +} + +/* +SyncCopyPathsParams contains all the parameters to send to the API endpoint +for the sync copy paths operation typically these are written to a http.Request +*/ +type SyncCopyPathsParams struct { + + /*Options + Options + + */ + Options *models.CopyPathsOptions + /*Async + Async request + + */ + Async bool + /*Group + Place this operation under this stat group + + */ + Group string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the sync copy paths params +func (o *SyncCopyPathsParams) WithTimeout(timeout time.Duration) *SyncCopyPathsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the sync copy paths params +func (o *SyncCopyPathsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the sync copy paths params +func (o *SyncCopyPathsParams) WithContext(ctx context.Context) *SyncCopyPathsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the sync copy paths params +func (o *SyncCopyPathsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the sync copy paths params +func (o *SyncCopyPathsParams) WithHTTPClient(client *http.Client) *SyncCopyPathsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the sync copy paths params +func (o *SyncCopyPathsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithOptions adds the options to the sync copy paths params +func (o *SyncCopyPathsParams) WithOptions(options *models.CopyPathsOptions) *SyncCopyPathsParams { + o.SetOptions(options) + return o +} + +// SetOptions adds the options to the sync copy paths params +func (o *SyncCopyPathsParams) SetOptions(options *models.CopyPathsOptions) { + o.Options = options +} + +// WithAsync adds the async to the sync copy paths params +func (o *SyncCopyPathsParams) WithAsync(async bool) *SyncCopyPathsParams { + o.SetAsync(async) + return o +} + +// SetAsync adds the async to the sync copy paths params +func (o *SyncCopyPathsParams) SetAsync(async bool) { + o.Async = async +} + +// WithGroup adds the group to the sync copy paths params +func (o *SyncCopyPathsParams) WithGroup(group string) *SyncCopyPathsParams { + o.SetGroup(group) + return o +} + +// SetGroup adds the group to the sync copy paths params +func (o *SyncCopyPathsParams) SetGroup(group string) { + o.Group = group +} + +// WriteToRequest writes these params to a swagger request +func (o *SyncCopyPathsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Options != nil { + if err := r.SetBodyParam(o.Options); err != nil { + return err + } + } + + // query param _async + qrAsync := o.Async + qAsync := swag.FormatBool(qrAsync) + if qAsync != "" { + if err := r.SetQueryParam("_async", qAsync); err != nil { + return err + } + } + + // query param _group + qrGroup := o.Group + qGroup := qrGroup + if qGroup != "" { + if err := r.SetQueryParam("_group", qGroup); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_paths_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_paths_responses.go new file mode 100644 index 00000000000..529fd7b1cab --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_copy_paths_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// SyncCopyPathsReader is a Reader for the SyncCopyPaths structure. +type SyncCopyPathsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SyncCopyPathsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSyncCopyPathsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewSyncCopyPathsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSyncCopyPathsOK creates a SyncCopyPathsOK with default headers values +func NewSyncCopyPathsOK() *SyncCopyPathsOK { + return &SyncCopyPathsOK{} +} + +/* +SyncCopyPathsOK handles this case with default header values. + +Job ID +*/ +type SyncCopyPathsOK struct { + Payload *models.Jobid + JobID int64 +} + +func (o *SyncCopyPathsOK) GetPayload() *models.Jobid { + return o.Payload +} + +func (o *SyncCopyPathsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Jobid) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewSyncCopyPathsDefault creates a SyncCopyPathsDefault with default headers values +func NewSyncCopyPathsDefault(code int) *SyncCopyPathsDefault { + return &SyncCopyPathsDefault{ + _statusCode: code, + } +} + +/* +SyncCopyPathsDefault handles this case with default header values. + +Server error +*/ +type SyncCopyPathsDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the sync copy paths default response +func (o *SyncCopyPathsDefault) Code() int { + return o._statusCode +} + +func (o *SyncCopyPathsDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SyncCopyPathsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *SyncCopyPathsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_move_dir_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_move_dir_parameters.go new file mode 100644 index 00000000000..72219846840 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_move_dir_parameters.go @@ -0,0 +1,202 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// NewSyncMoveDirParams creates a new SyncMoveDirParams object +// with the default values initialized. +func NewSyncMoveDirParams() *SyncMoveDirParams { + var ( + asyncDefault = bool(true) + ) + return &SyncMoveDirParams{ + Async: asyncDefault, + + timeout: cr.DefaultTimeout, + } +} + +// NewSyncMoveDirParamsWithTimeout creates a new SyncMoveDirParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewSyncMoveDirParamsWithTimeout(timeout time.Duration) *SyncMoveDirParams { + var ( + asyncDefault = bool(true) + ) + return &SyncMoveDirParams{ + Async: asyncDefault, + + timeout: timeout, + } +} + +// NewSyncMoveDirParamsWithContext creates a new SyncMoveDirParams object +// with the default values initialized, and the ability to set a context for a request +func NewSyncMoveDirParamsWithContext(ctx context.Context) *SyncMoveDirParams { + var ( + asyncDefault = bool(true) + ) + return &SyncMoveDirParams{ + Async: asyncDefault, + + Context: ctx, + } +} + +// NewSyncMoveDirParamsWithHTTPClient creates a new SyncMoveDirParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewSyncMoveDirParamsWithHTTPClient(client *http.Client) *SyncMoveDirParams { + var ( + asyncDefault = bool(true) + ) + return &SyncMoveDirParams{ + Async: asyncDefault, + HTTPClient: client, + } +} + +/* +SyncMoveDirParams contains all the parameters to send to the API endpoint +for the sync move dir operation typically these are written to a http.Request +*/ +type SyncMoveDirParams struct { + + /*Options + Options + + */ + Options *models.MoveOrCopyFileOptions + /*Async + Async request + + */ + Async bool + /*Group + Place this operation under this stat group + + */ + Group string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the sync move dir params +func (o *SyncMoveDirParams) WithTimeout(timeout time.Duration) *SyncMoveDirParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the sync move dir params +func (o *SyncMoveDirParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the sync move dir params +func (o *SyncMoveDirParams) WithContext(ctx context.Context) *SyncMoveDirParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the sync move dir params +func (o *SyncMoveDirParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the sync move dir params +func (o *SyncMoveDirParams) WithHTTPClient(client *http.Client) *SyncMoveDirParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the sync move dir params +func (o *SyncMoveDirParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithOptions adds the options to the sync move dir params +func (o *SyncMoveDirParams) WithOptions(options *models.MoveOrCopyFileOptions) *SyncMoveDirParams { + o.SetOptions(options) + return o +} + +// SetOptions adds the options to the sync move dir params +func (o *SyncMoveDirParams) SetOptions(options *models.MoveOrCopyFileOptions) { + o.Options = options +} + +// WithAsync adds the async to the sync move dir params +func (o *SyncMoveDirParams) WithAsync(async bool) *SyncMoveDirParams { + o.SetAsync(async) + return o +} + +// SetAsync adds the async to the sync move dir params +func (o *SyncMoveDirParams) SetAsync(async bool) { + o.Async = async +} + +// WithGroup adds the group to the sync move dir params +func (o *SyncMoveDirParams) WithGroup(group string) *SyncMoveDirParams { + o.SetGroup(group) + return o +} + +// SetGroup adds the group to the sync move dir params +func (o *SyncMoveDirParams) SetGroup(group string) { + o.Group = group +} + +// WriteToRequest writes these params to a swagger request +func (o *SyncMoveDirParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Options != nil { + if err := r.SetBodyParam(o.Options); err != nil { + return err + } + } + + // query param _async + qrAsync := o.Async + qAsync := swag.FormatBool(qrAsync) + if qAsync != "" { + if err := r.SetQueryParam("_async", qAsync); err != nil { + return err + } + } + + // query param _group + qrGroup := o.Group + qGroup := qrGroup + if qGroup != "" { + if err := r.SetQueryParam("_group", qGroup); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_move_dir_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_move_dir_responses.go new file mode 100644 index 00000000000..78c89ceb615 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations/sync_move_dir_responses.go @@ -0,0 +1,135 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strconv" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models" +) + +// SyncMoveDirReader is a Reader for the SyncMoveDir structure. +type SyncMoveDirReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SyncMoveDirReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSyncMoveDirOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewSyncMoveDirDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSyncMoveDirOK creates a SyncMoveDirOK with default headers values +func NewSyncMoveDirOK() *SyncMoveDirOK { + return &SyncMoveDirOK{} +} + +/* +SyncMoveDirOK handles this case with default header values. + +Job ID +*/ +type SyncMoveDirOK struct { + Payload *models.Jobid + JobID int64 +} + +func (o *SyncMoveDirOK) GetPayload() *models.Jobid { + return o.Payload +} + +func (o *SyncMoveDirOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.Jobid) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +// NewSyncMoveDirDefault creates a SyncMoveDirDefault with default headers values +func NewSyncMoveDirDefault(code int) *SyncMoveDirDefault { + return &SyncMoveDirDefault{ + _statusCode: code, + } +} + +/* +SyncMoveDirDefault handles this case with default header values. + +Server error +*/ +type SyncMoveDirDefault struct { + _statusCode int + + Payload *models.ErrorResponse + JobID int64 +} + +// Code gets the status code for the sync move dir default response +func (o *SyncMoveDirDefault) Code() int { + return o._statusCode +} + +func (o *SyncMoveDirDefault) GetPayload() *models.ErrorResponse { + return o.Payload +} + +func (o *SyncMoveDirDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorResponse) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + if jobIDHeader := response.GetHeader("x-rclone-jobid"); jobIDHeader != "" { + jobID, err := strconv.ParseInt(jobIDHeader, 10, 64) + if err != nil { + return err + } + + o.JobID = jobID + } + return nil +} + +func (o *SyncMoveDirDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/bandwidth.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/bandwidth.go new file mode 100644 index 00000000000..f4c9f0f348b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/bandwidth.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Bandwidth bandwidth rate +// +// # Rate at witch to rate limit bandwidth +// +// swagger:model Bandwidth +type Bandwidth struct { + + // String representation of the bandwidth rate limit (eg. 100k, 1M, ...). + Rate string `json:"rate,omitempty"` +} + +// Validate validates this bandwidth +func (m *Bandwidth) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Bandwidth) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Bandwidth) UnmarshalBinary(b []byte) error { + var res Bandwidth + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/content.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/content.go new file mode 100644 index 00000000000..dbf886bb6f8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/content.go @@ -0,0 +1,44 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Content content +// +// swagger:model Content +type Content struct { + + // File content + // Format: byte + Content strfmt.Base64 `json:"Content,omitempty"` +} + +// Validate validates this content +func (m *Content) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Content) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Content) UnmarshalBinary(b []byte) error { + var res Content + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/copy_paths_options.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/copy_paths_options.go new file mode 100644 index 00000000000..514f6cb2696 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/copy_paths_options.go @@ -0,0 +1,55 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// CopyPathsOptions copy paths options +// +// swagger:model CopyPathsOptions +type CopyPathsOptions struct { + + // Destination file system e.g. s3: or gcs: + DstFs string `json:"dstFs,omitempty"` + + // A directory within that remote eg. files/ for the destination + DstRemote string `json:"dstRemote,omitempty"` + + // Paths relative to srcRemote/dstRemote eg. file.txt for both source and destination + Paths []string `json:"paths"` + + // Source file system e.g. s3: or gcs: + SrcFs string `json:"srcFs,omitempty"` + + // A directory within that remote eg. files/ for the source + SrcRemote string `json:"srcRemote,omitempty"` +} + +// Validate validates this copy paths options +func (m *CopyPathsOptions) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *CopyPathsOptions) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CopyPathsOptions) UnmarshalBinary(b []byte) error { + var res CopyPathsOptions + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/error_response.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/error_response.go new file mode 100644 index 00000000000..4721a24e421 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/error_response.go @@ -0,0 +1,54 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ErrorResponse error +// +// # Details about error response +// +// swagger:model ErrorResponse +type ErrorResponse struct { + + // Map of request parameters + Input interface{} `json:"input,omitempty"` + + // Error description + Message string `json:"message,omitempty"` + + // Requested path + Path string `json:"path,omitempty"` + + // HTTP status code + Status int64 `json:"status,omitempty"` +} + +// Validate validates this error response +func (m *ErrorResponse) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ErrorResponse) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ErrorResponse) UnmarshalBinary(b []byte) error { + var res ErrorResponse + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/file_info.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/file_info.go new file mode 100644 index 00000000000..c21015f6da9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/file_info.go @@ -0,0 +1,71 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// FileInfo file info +// +// swagger:model FileInfo +type FileInfo struct { + + // Modification time + // Format: date-time + ModTime strfmt.DateTime `json:"modTime,omitempty"` + + // Size in bytes + Size int64 `json:"size,omitempty"` +} + +// Validate validates this file info +func (m *FileInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateModTime(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *FileInfo) validateModTime(formats strfmt.Registry) error { + + if swag.IsZero(m.ModTime) { // not required + return nil + } + + if err := validate.FormatOf("modTime", "body", "date-time", m.ModTime.String(), formats); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *FileInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *FileInfo) UnmarshalBinary(b []byte) error { + var res FileInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/file_system_details.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/file_system_details.go new file mode 100644 index 00000000000..64efd5c5643 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/file_system_details.go @@ -0,0 +1,49 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// FileSystemDetails file system details +// +// swagger:model FileSystemDetails +type FileSystemDetails struct { + + // Free space in bytes + Free int64 `json:"free,omitempty"` + + // Available space in bytes + Total int64 `json:"total,omitempty"` + + // Used space in bytes + Used int64 `json:"used,omitempty"` +} + +// Validate validates this file system details +func (m *FileSystemDetails) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *FileSystemDetails) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *FileSystemDetails) UnmarshalBinary(b []byte) error { + var res FileSystemDetails + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/group_list.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/group_list.go new file mode 100644 index 00000000000..e7a63b9072e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/group_list.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// GroupList group list +// +// swagger:model GroupList +type GroupList struct { + + // groups + Groups []string `json:"groups"` +} + +// Validate validates this group list +func (m *GroupList) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *GroupList) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *GroupList) UnmarshalBinary(b []byte) error { + var res GroupList + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job.go new file mode 100644 index 00000000000..3a995559b0a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job.go @@ -0,0 +1,109 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Job job +// +// # Status information about the job +// +// swagger:model Job +type Job struct { + + // Time in seconds that the job ran for + Duration float64 `json:"duration,omitempty"` + + // Time the job finished (eg 2018-10-26T18:50:20.528746884+01:00) + // Format: date-time + EndTime strfmt.DateTime `json:"endTime,omitempty"` + + // Error from the job or empty string for no error + Error string `json:"error,omitempty"` + + // Job has finished execution + Finished bool `json:"finished,omitempty"` + + // ID of the job + ID int64 `json:"id,omitempty"` + + // Output of the job as would have been returned if called synchronously + Output interface{} `json:"output,omitempty"` + + // Time the job started (eg 2018-10-26T18:50:20.528746884+01:00) + // Format: date-time + StartTime strfmt.DateTime `json:"startTime,omitempty"` + + // True for success false otherwise + Success bool `json:"success,omitempty"` +} + +// Validate validates this job +func (m *Job) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEndTime(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStartTime(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Job) validateEndTime(formats strfmt.Registry) error { + + if swag.IsZero(m.EndTime) { // not required + return nil + } + + if err := validate.FormatOf("endTime", "body", "date-time", m.EndTime.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Job) validateStartTime(formats strfmt.Registry) error { + + if swag.IsZero(m.StartTime) { // not required + return nil + } + + if err := validate.FormatOf("startTime", "body", "date-time", m.StartTime.String(), formats); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Job) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Job) UnmarshalBinary(b []byte) error { + var res Job + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job_info.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job_info.go new file mode 100644 index 00000000000..8f22d0f6ed4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job_info.go @@ -0,0 +1,130 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// JobInfo job info +// +// swagger:model JobInfo +type JobInfo struct { + + // job + Job *Job `json:"job,omitempty"` + + // Core status + Stats *Stats `json:"stats,omitempty"` + + // Completed transfers + Transferred []*Transfer `json:"transferred"` +} + +// Validate validates this job info +func (m *JobInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateJob(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStats(formats); err != nil { + res = append(res, err) + } + + if err := m.validateTransferred(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JobInfo) validateJob(formats strfmt.Registry) error { + + if swag.IsZero(m.Job) { // not required + return nil + } + + if m.Job != nil { + if err := m.Job.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("job") + } + return err + } + } + + return nil +} + +func (m *JobInfo) validateStats(formats strfmt.Registry) error { + + if swag.IsZero(m.Stats) { // not required + return nil + } + + if m.Stats != nil { + if err := m.Stats.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("stats") + } + return err + } + } + + return nil +} + +func (m *JobInfo) validateTransferred(formats strfmt.Registry) error { + + if swag.IsZero(m.Transferred) { // not required + return nil + } + + for i := 0; i < len(m.Transferred); i++ { + if swag.IsZero(m.Transferred[i]) { // not required + continue + } + + if m.Transferred[i] != nil { + if err := m.Transferred[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("transferred" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *JobInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JobInfo) UnmarshalBinary(b []byte) error { + var res JobInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job_info_params.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job_info_params.go new file mode 100644 index 00000000000..435c85d185f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job_info_params.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// JobInfoParams job info params +// +// swagger:model JobInfoParams +type JobInfoParams struct { + + // ID of the job + Jobid int64 `json:"jobid,omitempty"` + + // Duration in seconds + Wait int64 `json:"wait,omitempty"` +} + +// Validate validates this job info params +func (m *JobInfoParams) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *JobInfoParams) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JobInfoParams) UnmarshalBinary(b []byte) error { + var res JobInfoParams + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job_progress.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job_progress.go new file mode 100644 index 00000000000..a66dcaa6200 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/job_progress.go @@ -0,0 +1,160 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// JobProgress job progress +// +// swagger:model JobProgress +type JobProgress struct { + + // time at which job completed + // Format: date-time + CompletedAt strfmt.DateTime `json:"completed_at,omitempty"` + + // string description of the error (empty if successful) + Error string `json:"error,omitempty"` + + // number of bytes that failed transfer + Failed int64 `json:"failed,omitempty"` + + // number of bytes that were skipped + Skipped int64 `json:"skipped,omitempty"` + + // time at which job started + // Format: date-time + StartedAt strfmt.DateTime `json:"started_at,omitempty"` + + // status of the job + // Enum: [success error running not_found] + Status string `json:"status,omitempty"` + + // number of bytes that are successfully uploaded + Uploaded int64 `json:"uploaded,omitempty"` +} + +// Validate validates this job progress +func (m *JobProgress) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCompletedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStartedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStatus(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *JobProgress) validateCompletedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.CompletedAt) { // not required + return nil + } + + if err := validate.FormatOf("completed_at", "body", "date-time", m.CompletedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *JobProgress) validateStartedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.StartedAt) { // not required + return nil + } + + if err := validate.FormatOf("started_at", "body", "date-time", m.StartedAt.String(), formats); err != nil { + return err + } + + return nil +} + +var jobProgressTypeStatusPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["success","error","running","not_found"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + jobProgressTypeStatusPropEnum = append(jobProgressTypeStatusPropEnum, v) + } +} + +const ( + + // JobProgressStatusSuccess captures enum value "success" + JobProgressStatusSuccess string = "success" + + // JobProgressStatusError captures enum value "error" + JobProgressStatusError string = "error" + + // JobProgressStatusRunning captures enum value "running" + JobProgressStatusRunning string = "running" + + // JobProgressStatusNotFound captures enum value "not_found" + JobProgressStatusNotFound string = "not_found" +) + +// prop value enum +func (m *JobProgress) validateStatusEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, jobProgressTypeStatusPropEnum, true); err != nil { + return err + } + return nil +} + +func (m *JobProgress) validateStatus(formats strfmt.Registry) error { + + if swag.IsZero(m.Status) { // not required + return nil + } + + // value enum + if err := m.validateStatusEnum("status", "body", m.Status); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *JobProgress) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *JobProgress) UnmarshalBinary(b []byte) error { + var res JobProgress + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/jobid.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/jobid.go new file mode 100644 index 00000000000..e878cdd096e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/jobid.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Jobid jobid +// +// swagger:model Jobid +type Jobid struct { + + // ID of the job + Jobid int64 `json:"jobid,omitempty"` +} + +// Validate validates this jobid +func (m *Jobid) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Jobid) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Jobid) UnmarshalBinary(b []byte) error { + var res Jobid + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/list_item.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/list_item.go new file mode 100644 index 00000000000..7f981eff4b6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/list_item.go @@ -0,0 +1,95 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ListItem list item +// +// swagger:model ListItem +type ListItem struct { + + // Encrypted name + Encrypted string `json:"Encrypted,omitempty"` + + // Hash of the item + Hashes interface{} `json:"Hashes,omitempty"` + + // ID of the item + ID string `json:"ID,omitempty"` + + // true if item is directory + IsDir bool `json:"IsDir,omitempty"` + + // mime type of the item + MimeType string `json:"MimeType,omitempty"` + + // Modification time + // Format: date-time + ModTime strfmt.DateTime `json:"ModTime,omitempty"` + + // Name of the item + Name string `json:"Name,omitempty"` + + // Original ID of the item + OrigID string `json:"OrigID,omitempty"` + + // Path of the item + Path string `json:"Path,omitempty"` + + // Size in bytes + Size int64 `json:"Size,omitempty"` +} + +// Validate validates this list item +func (m *ListItem) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateModTime(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ListItem) validateModTime(formats strfmt.Registry) error { + + if swag.IsZero(m.ModTime) { // not required + return nil + } + + if err := validate.FormatOf("ModTime", "body", "date-time", m.ModTime.String(), formats); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ListItem) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ListItem) UnmarshalBinary(b []byte) error { + var res ListItem + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/list_options.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/list_options.go new file mode 100644 index 00000000000..cc2be53ce2d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/list_options.go @@ -0,0 +1,165 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ListOptions list options +// +// swagger:model ListOptions +type ListOptions struct { + + // A remote name string eg. drive: + // Required: true + Fs *string `json:"fs"` + + // Show only the newest versions of files (no snapshot tag suffix attached) + NewestOnly bool `json:"newestOnly,omitempty"` + + // opt + Opt *ListOptionsOpt `json:"opt,omitempty"` + + // A path within that remote eg. dir + // Required: true + Remote *string `json:"remote"` + + // Show older version of files (snapshot tag suffix attached) + VersionedOnly bool `json:"versionedOnly,omitempty"` +} + +// Validate validates this list options +func (m *ListOptions) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateFs(formats); err != nil { + res = append(res, err) + } + + if err := m.validateOpt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateRemote(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ListOptions) validateFs(formats strfmt.Registry) error { + + if err := validate.Required("fs", "body", m.Fs); err != nil { + return err + } + + return nil +} + +func (m *ListOptions) validateOpt(formats strfmt.Registry) error { + + if swag.IsZero(m.Opt) { // not required + return nil + } + + if m.Opt != nil { + if err := m.Opt.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("opt") + } + return err + } + } + + return nil +} + +func (m *ListOptions) validateRemote(formats strfmt.Registry) error { + + if err := validate.Required("remote", "body", m.Remote); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ListOptions) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ListOptions) UnmarshalBinary(b []byte) error { + var res ListOptions + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// ListOptionsOpt A path within that remote eg. dir +// +// swagger:model ListOptionsOpt +type ListOptionsOpt struct { + + // Show only directories in the listing + DirsOnly bool `json:"dirsOnly,omitempty"` + + // Show only files in the listing + FilesOnly bool `json:"filesOnly,omitempty"` + + // Don't read the mime type time (can speed things up) + NoMimeType bool `json:"noMimeType,omitempty"` + + // Don't read the modification time (can speed things up) + NoModTime bool `json:"noModTime,omitempty"` + + // Recurse into the listing + Recurse bool `json:"recurse,omitempty"` + + // Show the encrypted names + ShowEncrypted bool `json:"showEncrypted,omitempty"` + + // Include hashes in the output (may take longer) + ShowHash bool `json:"showHash,omitempty"` + + // Show the ID of the underlying Object + ShowOrigIDs bool `json:"showOrigIDs,omitempty"` +} + +// Validate validates this list options opt +func (m *ListOptionsOpt) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ListOptionsOpt) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ListOptionsOpt) UnmarshalBinary(b []byte) error { + var res ListOptionsOpt + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/move_or_copy_file_options.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/move_or_copy_file_options.go new file mode 100644 index 00000000000..4a5870b7e9a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/move_or_copy_file_options.go @@ -0,0 +1,55 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MoveOrCopyFileOptions move or copy file options +// +// swagger:model MoveOrCopyFileOptions +type MoveOrCopyFileOptions struct { + + // Destination file system e.g. s3: or gcs: + DstFs string `json:"dstFs,omitempty"` + + // A path within that remote eg. file.txt for the destination + DstRemote string `json:"dstRemote,omitempty"` + + // Source file system e.g. s3: or gcs: + SrcFs string `json:"srcFs,omitempty"` + + // A path within that remote eg. file.txt for the source + SrcRemote string `json:"srcRemote,omitempty"` + + // A suffix which will be added to otherwise overwritten or deleted files + Suffix string `json:"suffix,omitempty"` +} + +// Validate validates this move or copy file options +func (m *MoveOrCopyFileOptions) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MoveOrCopyFileOptions) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MoveOrCopyFileOptions) UnmarshalBinary(b []byte) error { + var res MoveOrCopyFileOptions + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/node_info.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/node_info.go new file mode 100644 index 00000000000..7e5967511d9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/node_info.go @@ -0,0 +1,117 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NodeInfo node info +// +// Information about Scylla node. +// +// swagger:model NodeInfo +type NodeInfo struct { + + // Scylla Manager Agent version. + AgentVersion string `json:"agent_version,omitempty"` + + // Address for Alternator API requests. + AlternatorAddress string `json:"alternator_address,omitempty"` + + // Whether Alternator requires authentication. + AlternatorEnforceAuthorization bool `json:"alternator_enforce_authorization,omitempty"` + + // Port for Alternator HTTPS API server. + AlternatorHTTPSPort string `json:"alternator_https_port,omitempty"` + + // Port for Alternator API server. + AlternatorPort string `json:"alternator_port,omitempty"` + + // Address for REST API requests. + APIAddress string `json:"api_address,omitempty"` + + // Port for REST API server. + APIPort string `json:"api_port,omitempty"` + + // Address that is broadcasted to tell other Scylla nodes to connect to. Related to listen_address. + BroadcastAddress string `json:"broadcast_address,omitempty"` + + // Address that is broadcasted to tell the clients to connect to. + BroadcastRPCAddress string `json:"broadcast_rpc_address,omitempty"` + + // Whether client encryption is enabled. + ClientEncryptionEnabled bool `json:"client_encryption_enabled,omitempty"` + + // Whether client authorization is required. + ClientEncryptionRequireAuth bool `json:"client_encryption_require_auth,omitempty"` + + // Whether Scylla uses RAFT for cluster management and DDL. + ConsistentClusterManagement bool `json:"consistent_cluster_management,omitempty"` + + // Logical CPU count. + CPUCount int64 `json:"cpu_count,omitempty"` + + // Whether CQL requires password authentication. + CqlPasswordProtected bool `json:"cql_password_protected,omitempty"` + + // Address Scylla listens for connections from other nodes. + ListenAddress string `json:"listen_address,omitempty"` + + // Total available memory. + MemoryTotal int64 `json:"memory_total,omitempty"` + + // Port for the CQL native transport to listen for clients on. + NativeTransportPort string `json:"native_transport_port,omitempty"` + + // Port for the encrypted CQL native transport to listen for clients on. + NativeTransportPortSsl string `json:"native_transport_port_ssl,omitempty"` + + // Address for Prometheus queries. + PrometheusAddress string `json:"prometheus_address,omitempty"` + + // Port for Prometheus server. + PrometheusPort string `json:"prometheus_port,omitempty"` + + // Address on which Scylla is going to expect Thrift and CQL clients connections. + RPCAddress string `json:"rpc_address,omitempty"` + + // Port for Thrift to listen for clients on. + RPCPort string `json:"rpc_port,omitempty"` + + // Scylla version. + ScyllaVersion string `json:"scylla_version,omitempty"` + + // Whether Scylla supports uuid-like sstable naming. + SstableUUIDFormat bool `json:"sstable_uuid_format,omitempty"` + + // Uptime in seconds. + Uptime int64 `json:"uptime,omitempty"` +} + +// Validate validates this node info +func (m *NodeInfo) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *NodeInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *NodeInfo) UnmarshalBinary(b []byte) error { + var res NodeInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/remote.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/remote.go new file mode 100644 index 00000000000..a168cb89c47 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/remote.go @@ -0,0 +1,49 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Remote remote +// +// swagger:model Remote +type Remote struct { + + // Name of the remote + Name string `json:"name,omitempty"` + + // Additional parameters for the remote + Parameters interface{} `json:"parameters,omitempty"` + + // Type of the new remote + Type string `json:"type,omitempty"` +} + +// Validate validates this remote +func (m *Remote) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Remote) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Remote) UnmarshalBinary(b []byte) error { + var res Remote + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/remote_path.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/remote_path.go new file mode 100644 index 00000000000..5895374882f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/remote_path.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// RemotePath remote path +// +// swagger:model RemotePath +type RemotePath struct { + + // A remote name string eg. drive: + Fs string `json:"fs,omitempty"` + + // A path within that remote eg. dir + Remote string `json:"remote,omitempty"` +} + +// Validate validates this remote path +func (m *RemotePath) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RemotePath) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RemotePath) UnmarshalBinary(b []byte) error { + var res RemotePath + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/stats.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/stats.go new file mode 100644 index 00000000000..5c90cd5e93b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/stats.go @@ -0,0 +1,163 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Stats stats +// +// swagger:model Stats +type Stats struct { + + // total transferred bytes since the start of the process + Bytes int64 `json:"bytes,omitempty"` + + // an array of names of currently active file checks + Checking []string `json:"checking"` + + // number of checked files + Checks int64 `json:"checks,omitempty"` + + // number of deleted files + Deletes int64 `json:"deletes,omitempty"` + + // time in seconds since the start of the process + ElapsedTime float64 `json:"elapsedTime,omitempty"` + + // number of errors + Errors int64 `json:"errors,omitempty"` + + // whether there has been at least one FatalError + FatalError bool `json:"fatalError,omitempty"` + + // last occurred error + LastError string `json:"lastError,omitempty"` + + // whether there has been at least one non-NoRetryError + RetryError bool `json:"retryError,omitempty"` + + // average speed in bytes/sec since start of the process + Speed float64 `json:"speed,omitempty"` + + // an array of currently active file transfers + Transferring []*StatsTransferringItems0 `json:"transferring"` + + // number of transferred files + Transfers int64 `json:"transfers,omitempty"` +} + +// Validate validates this stats +func (m *Stats) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateTransferring(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Stats) validateTransferring(formats strfmt.Registry) error { + + if swag.IsZero(m.Transferring) { // not required + return nil + } + + for i := 0; i < len(m.Transferring); i++ { + if swag.IsZero(m.Transferring[i]) { // not required + continue + } + + if m.Transferring[i] != nil { + if err := m.Transferring[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("transferring" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Stats) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Stats) UnmarshalBinary(b []byte) error { + var res Stats + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} + +// StatsTransferringItems0 stats transferring items0 +// +// swagger:model StatsTransferringItems0 +type StatsTransferringItems0 struct { + + // total transferred bytes for this file + Bytes int64 `json:"bytes,omitempty"` + + // estimated time in seconds until file transfer completion + Eta float64 `json:"eta,omitempty"` + + // name of the file + Name string `json:"name,omitempty"` + + // progress of the file transfer in percent + Percentage float64 `json:"percentage,omitempty"` + + // size of the file in bytes + Size int64 `json:"size,omitempty"` + + // speed in bytes/sec + Speed float64 `json:"speed,omitempty"` + + // speed in bytes/sec as an exponentially weighted moving average + SpeedAvg float64 `json:"speedAvg,omitempty"` +} + +// Validate validates this stats transferring items0 +func (m *StatsTransferringItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StatsTransferringItems0) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StatsTransferringItems0) UnmarshalBinary(b []byte) error { + var res StatsTransferringItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/stats_params.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/stats_params.go new file mode 100644 index 00000000000..e47cdc7438c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/stats_params.go @@ -0,0 +1,43 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StatsParams stats params +// +// swagger:model StatsParams +type StatsParams struct { + + // group name + Group string `json:"group,omitempty"` +} + +// Validate validates this stats params +func (m *StatsParams) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StatsParams) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StatsParams) UnmarshalBinary(b []byte) error { + var res StatsParams + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/transfer.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/transfer.go new file mode 100644 index 00000000000..f5377969057 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models/transfer.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// Transfer transfer +// +// swagger:model Transfer +type Transfer struct { + + // total transferred bytes for this file + Bytes int64 `json:"bytes,omitempty"` + + // if the transfer is only checked (skipped, deleted) + Checked bool `json:"checked,omitempty"` + + // time at which transfer completed + // Format: date-time + CompletedAt strfmt.DateTime `json:"completed_at,omitempty"` + + // string description of the error (empty if successful) + Error string `json:"error,omitempty"` + + // name of the file + Name string `json:"name,omitempty"` + + // size of the file in bytes + Size int64 `json:"size,omitempty"` + + // time at which transfer started + // Format: date-time + StartedAt strfmt.DateTime `json:"started_at,omitempty"` +} + +// Validate validates this transfer +func (m *Transfer) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateCompletedAt(formats); err != nil { + res = append(res, err) + } + + if err := m.validateStartedAt(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Transfer) validateCompletedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.CompletedAt) { // not required + return nil + } + + if err := validate.FormatOf("completed_at", "body", "date-time", m.CompletedAt.String(), formats); err != nil { + return err + } + + return nil +} + +func (m *Transfer) validateStartedAt(formats strfmt.Registry) error { + + if swag.IsZero(m.StartedAt) { // not required + return nil + } + + if err := validate.FormatOf("started_at", "body", "date-time", m.StartedAt.String(), formats); err != nil { + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Transfer) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Transfer) UnmarshalBinary(b []byte) error { + var res Transfer + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_capacity_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_capacity_post_parameters.go new file mode 100644 index 00000000000..ca58eaa612f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_capacity_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceCounterCacheCapacityPostParams creates a new CacheServiceCounterCacheCapacityPostParams object +// with the default values initialized. +func NewCacheServiceCounterCacheCapacityPostParams() *CacheServiceCounterCacheCapacityPostParams { + var () + return &CacheServiceCounterCacheCapacityPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceCounterCacheCapacityPostParamsWithTimeout creates a new CacheServiceCounterCacheCapacityPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceCounterCacheCapacityPostParamsWithTimeout(timeout time.Duration) *CacheServiceCounterCacheCapacityPostParams { + var () + return &CacheServiceCounterCacheCapacityPostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceCounterCacheCapacityPostParamsWithContext creates a new CacheServiceCounterCacheCapacityPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceCounterCacheCapacityPostParamsWithContext(ctx context.Context) *CacheServiceCounterCacheCapacityPostParams { + var () + return &CacheServiceCounterCacheCapacityPostParams{ + + Context: ctx, + } +} + +// NewCacheServiceCounterCacheCapacityPostParamsWithHTTPClient creates a new CacheServiceCounterCacheCapacityPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceCounterCacheCapacityPostParamsWithHTTPClient(client *http.Client) *CacheServiceCounterCacheCapacityPostParams { + var () + return &CacheServiceCounterCacheCapacityPostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceCounterCacheCapacityPostParams contains all the parameters to send to the API endpoint +for the cache service counter cache capacity post operation typically these are written to a http.Request +*/ +type CacheServiceCounterCacheCapacityPostParams struct { + + /*Capacity + counter cache capacity in mb + + */ + Capacity string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service counter cache capacity post params +func (o *CacheServiceCounterCacheCapacityPostParams) WithTimeout(timeout time.Duration) *CacheServiceCounterCacheCapacityPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service counter cache capacity post params +func (o *CacheServiceCounterCacheCapacityPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service counter cache capacity post params +func (o *CacheServiceCounterCacheCapacityPostParams) WithContext(ctx context.Context) *CacheServiceCounterCacheCapacityPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service counter cache capacity post params +func (o *CacheServiceCounterCacheCapacityPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service counter cache capacity post params +func (o *CacheServiceCounterCacheCapacityPostParams) WithHTTPClient(client *http.Client) *CacheServiceCounterCacheCapacityPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service counter cache capacity post params +func (o *CacheServiceCounterCacheCapacityPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCapacity adds the capacity to the cache service counter cache capacity post params +func (o *CacheServiceCounterCacheCapacityPostParams) WithCapacity(capacity string) *CacheServiceCounterCacheCapacityPostParams { + o.SetCapacity(capacity) + return o +} + +// SetCapacity adds the capacity to the cache service counter cache capacity post params +func (o *CacheServiceCounterCacheCapacityPostParams) SetCapacity(capacity string) { + o.Capacity = capacity +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceCounterCacheCapacityPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param capacity + qrCapacity := o.Capacity + qCapacity := qrCapacity + if qCapacity != "" { + if err := r.SetQueryParam("capacity", qCapacity); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_capacity_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_capacity_post_responses.go new file mode 100644 index 00000000000..e91ebc2de9b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_capacity_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceCounterCacheCapacityPostReader is a Reader for the CacheServiceCounterCacheCapacityPost structure. +type CacheServiceCounterCacheCapacityPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceCounterCacheCapacityPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceCounterCacheCapacityPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceCounterCacheCapacityPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceCounterCacheCapacityPostOK creates a CacheServiceCounterCacheCapacityPostOK with default headers values +func NewCacheServiceCounterCacheCapacityPostOK() *CacheServiceCounterCacheCapacityPostOK { + return &CacheServiceCounterCacheCapacityPostOK{} +} + +/* +CacheServiceCounterCacheCapacityPostOK handles this case with default header values. + +Success +*/ +type CacheServiceCounterCacheCapacityPostOK struct { +} + +func (o *CacheServiceCounterCacheCapacityPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceCounterCacheCapacityPostDefault creates a CacheServiceCounterCacheCapacityPostDefault with default headers values +func NewCacheServiceCounterCacheCapacityPostDefault(code int) *CacheServiceCounterCacheCapacityPostDefault { + return &CacheServiceCounterCacheCapacityPostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceCounterCacheCapacityPostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceCounterCacheCapacityPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service counter cache capacity post default response +func (o *CacheServiceCounterCacheCapacityPostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceCounterCacheCapacityPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceCounterCacheCapacityPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceCounterCacheCapacityPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_get_parameters.go new file mode 100644 index 00000000000..fca4c32f19b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceCounterCacheKeysToSaveGetParams creates a new CacheServiceCounterCacheKeysToSaveGetParams object +// with the default values initialized. +func NewCacheServiceCounterCacheKeysToSaveGetParams() *CacheServiceCounterCacheKeysToSaveGetParams { + + return &CacheServiceCounterCacheKeysToSaveGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceCounterCacheKeysToSaveGetParamsWithTimeout creates a new CacheServiceCounterCacheKeysToSaveGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceCounterCacheKeysToSaveGetParamsWithTimeout(timeout time.Duration) *CacheServiceCounterCacheKeysToSaveGetParams { + + return &CacheServiceCounterCacheKeysToSaveGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceCounterCacheKeysToSaveGetParamsWithContext creates a new CacheServiceCounterCacheKeysToSaveGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceCounterCacheKeysToSaveGetParamsWithContext(ctx context.Context) *CacheServiceCounterCacheKeysToSaveGetParams { + + return &CacheServiceCounterCacheKeysToSaveGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceCounterCacheKeysToSaveGetParamsWithHTTPClient creates a new CacheServiceCounterCacheKeysToSaveGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceCounterCacheKeysToSaveGetParamsWithHTTPClient(client *http.Client) *CacheServiceCounterCacheKeysToSaveGetParams { + + return &CacheServiceCounterCacheKeysToSaveGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceCounterCacheKeysToSaveGetParams contains all the parameters to send to the API endpoint +for the cache service counter cache keys to save get operation typically these are written to a http.Request +*/ +type CacheServiceCounterCacheKeysToSaveGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service counter cache keys to save get params +func (o *CacheServiceCounterCacheKeysToSaveGetParams) WithTimeout(timeout time.Duration) *CacheServiceCounterCacheKeysToSaveGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service counter cache keys to save get params +func (o *CacheServiceCounterCacheKeysToSaveGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service counter cache keys to save get params +func (o *CacheServiceCounterCacheKeysToSaveGetParams) WithContext(ctx context.Context) *CacheServiceCounterCacheKeysToSaveGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service counter cache keys to save get params +func (o *CacheServiceCounterCacheKeysToSaveGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service counter cache keys to save get params +func (o *CacheServiceCounterCacheKeysToSaveGetParams) WithHTTPClient(client *http.Client) *CacheServiceCounterCacheKeysToSaveGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service counter cache keys to save get params +func (o *CacheServiceCounterCacheKeysToSaveGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceCounterCacheKeysToSaveGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_get_responses.go new file mode 100644 index 00000000000..fa36173b91c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceCounterCacheKeysToSaveGetReader is a Reader for the CacheServiceCounterCacheKeysToSaveGet structure. +type CacheServiceCounterCacheKeysToSaveGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceCounterCacheKeysToSaveGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceCounterCacheKeysToSaveGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceCounterCacheKeysToSaveGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceCounterCacheKeysToSaveGetOK creates a CacheServiceCounterCacheKeysToSaveGetOK with default headers values +func NewCacheServiceCounterCacheKeysToSaveGetOK() *CacheServiceCounterCacheKeysToSaveGetOK { + return &CacheServiceCounterCacheKeysToSaveGetOK{} +} + +/* +CacheServiceCounterCacheKeysToSaveGetOK handles this case with default header values. + +Success +*/ +type CacheServiceCounterCacheKeysToSaveGetOK struct { + Payload int32 +} + +func (o *CacheServiceCounterCacheKeysToSaveGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CacheServiceCounterCacheKeysToSaveGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceCounterCacheKeysToSaveGetDefault creates a CacheServiceCounterCacheKeysToSaveGetDefault with default headers values +func NewCacheServiceCounterCacheKeysToSaveGetDefault(code int) *CacheServiceCounterCacheKeysToSaveGetDefault { + return &CacheServiceCounterCacheKeysToSaveGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceCounterCacheKeysToSaveGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceCounterCacheKeysToSaveGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service counter cache keys to save get default response +func (o *CacheServiceCounterCacheKeysToSaveGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceCounterCacheKeysToSaveGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceCounterCacheKeysToSaveGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceCounterCacheKeysToSaveGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_post_parameters.go new file mode 100644 index 00000000000..50c2cf28af5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewCacheServiceCounterCacheKeysToSavePostParams creates a new CacheServiceCounterCacheKeysToSavePostParams object +// with the default values initialized. +func NewCacheServiceCounterCacheKeysToSavePostParams() *CacheServiceCounterCacheKeysToSavePostParams { + var () + return &CacheServiceCounterCacheKeysToSavePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceCounterCacheKeysToSavePostParamsWithTimeout creates a new CacheServiceCounterCacheKeysToSavePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceCounterCacheKeysToSavePostParamsWithTimeout(timeout time.Duration) *CacheServiceCounterCacheKeysToSavePostParams { + var () + return &CacheServiceCounterCacheKeysToSavePostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceCounterCacheKeysToSavePostParamsWithContext creates a new CacheServiceCounterCacheKeysToSavePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceCounterCacheKeysToSavePostParamsWithContext(ctx context.Context) *CacheServiceCounterCacheKeysToSavePostParams { + var () + return &CacheServiceCounterCacheKeysToSavePostParams{ + + Context: ctx, + } +} + +// NewCacheServiceCounterCacheKeysToSavePostParamsWithHTTPClient creates a new CacheServiceCounterCacheKeysToSavePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceCounterCacheKeysToSavePostParamsWithHTTPClient(client *http.Client) *CacheServiceCounterCacheKeysToSavePostParams { + var () + return &CacheServiceCounterCacheKeysToSavePostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceCounterCacheKeysToSavePostParams contains all the parameters to send to the API endpoint +for the cache service counter cache keys to save post operation typically these are written to a http.Request +*/ +type CacheServiceCounterCacheKeysToSavePostParams struct { + + /*Cckts + counter cache keys to save + + */ + Cckts int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service counter cache keys to save post params +func (o *CacheServiceCounterCacheKeysToSavePostParams) WithTimeout(timeout time.Duration) *CacheServiceCounterCacheKeysToSavePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service counter cache keys to save post params +func (o *CacheServiceCounterCacheKeysToSavePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service counter cache keys to save post params +func (o *CacheServiceCounterCacheKeysToSavePostParams) WithContext(ctx context.Context) *CacheServiceCounterCacheKeysToSavePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service counter cache keys to save post params +func (o *CacheServiceCounterCacheKeysToSavePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service counter cache keys to save post params +func (o *CacheServiceCounterCacheKeysToSavePostParams) WithHTTPClient(client *http.Client) *CacheServiceCounterCacheKeysToSavePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service counter cache keys to save post params +func (o *CacheServiceCounterCacheKeysToSavePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCckts adds the cckts to the cache service counter cache keys to save post params +func (o *CacheServiceCounterCacheKeysToSavePostParams) WithCckts(cckts int32) *CacheServiceCounterCacheKeysToSavePostParams { + o.SetCckts(cckts) + return o +} + +// SetCckts adds the cckts to the cache service counter cache keys to save post params +func (o *CacheServiceCounterCacheKeysToSavePostParams) SetCckts(cckts int32) { + o.Cckts = cckts +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceCounterCacheKeysToSavePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param cckts + qrCckts := o.Cckts + qCckts := swag.FormatInt32(qrCckts) + if qCckts != "" { + if err := r.SetQueryParam("cckts", qCckts); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_post_responses.go new file mode 100644 index 00000000000..ac59b474c96 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_keys_to_save_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceCounterCacheKeysToSavePostReader is a Reader for the CacheServiceCounterCacheKeysToSavePost structure. +type CacheServiceCounterCacheKeysToSavePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceCounterCacheKeysToSavePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceCounterCacheKeysToSavePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceCounterCacheKeysToSavePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceCounterCacheKeysToSavePostOK creates a CacheServiceCounterCacheKeysToSavePostOK with default headers values +func NewCacheServiceCounterCacheKeysToSavePostOK() *CacheServiceCounterCacheKeysToSavePostOK { + return &CacheServiceCounterCacheKeysToSavePostOK{} +} + +/* +CacheServiceCounterCacheKeysToSavePostOK handles this case with default header values. + +Success +*/ +type CacheServiceCounterCacheKeysToSavePostOK struct { +} + +func (o *CacheServiceCounterCacheKeysToSavePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceCounterCacheKeysToSavePostDefault creates a CacheServiceCounterCacheKeysToSavePostDefault with default headers values +func NewCacheServiceCounterCacheKeysToSavePostDefault(code int) *CacheServiceCounterCacheKeysToSavePostDefault { + return &CacheServiceCounterCacheKeysToSavePostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceCounterCacheKeysToSavePostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceCounterCacheKeysToSavePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service counter cache keys to save post default response +func (o *CacheServiceCounterCacheKeysToSavePostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceCounterCacheKeysToSavePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceCounterCacheKeysToSavePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceCounterCacheKeysToSavePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_get_parameters.go new file mode 100644 index 00000000000..0a1a26534d1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceCounterCacheSavePeriodGetParams creates a new CacheServiceCounterCacheSavePeriodGetParams object +// with the default values initialized. +func NewCacheServiceCounterCacheSavePeriodGetParams() *CacheServiceCounterCacheSavePeriodGetParams { + + return &CacheServiceCounterCacheSavePeriodGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceCounterCacheSavePeriodGetParamsWithTimeout creates a new CacheServiceCounterCacheSavePeriodGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceCounterCacheSavePeriodGetParamsWithTimeout(timeout time.Duration) *CacheServiceCounterCacheSavePeriodGetParams { + + return &CacheServiceCounterCacheSavePeriodGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceCounterCacheSavePeriodGetParamsWithContext creates a new CacheServiceCounterCacheSavePeriodGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceCounterCacheSavePeriodGetParamsWithContext(ctx context.Context) *CacheServiceCounterCacheSavePeriodGetParams { + + return &CacheServiceCounterCacheSavePeriodGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceCounterCacheSavePeriodGetParamsWithHTTPClient creates a new CacheServiceCounterCacheSavePeriodGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceCounterCacheSavePeriodGetParamsWithHTTPClient(client *http.Client) *CacheServiceCounterCacheSavePeriodGetParams { + + return &CacheServiceCounterCacheSavePeriodGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceCounterCacheSavePeriodGetParams contains all the parameters to send to the API endpoint +for the cache service counter cache save period get operation typically these are written to a http.Request +*/ +type CacheServiceCounterCacheSavePeriodGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service counter cache save period get params +func (o *CacheServiceCounterCacheSavePeriodGetParams) WithTimeout(timeout time.Duration) *CacheServiceCounterCacheSavePeriodGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service counter cache save period get params +func (o *CacheServiceCounterCacheSavePeriodGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service counter cache save period get params +func (o *CacheServiceCounterCacheSavePeriodGetParams) WithContext(ctx context.Context) *CacheServiceCounterCacheSavePeriodGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service counter cache save period get params +func (o *CacheServiceCounterCacheSavePeriodGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service counter cache save period get params +func (o *CacheServiceCounterCacheSavePeriodGetParams) WithHTTPClient(client *http.Client) *CacheServiceCounterCacheSavePeriodGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service counter cache save period get params +func (o *CacheServiceCounterCacheSavePeriodGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceCounterCacheSavePeriodGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_get_responses.go new file mode 100644 index 00000000000..5eed411fb9a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceCounterCacheSavePeriodGetReader is a Reader for the CacheServiceCounterCacheSavePeriodGet structure. +type CacheServiceCounterCacheSavePeriodGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceCounterCacheSavePeriodGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceCounterCacheSavePeriodGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceCounterCacheSavePeriodGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceCounterCacheSavePeriodGetOK creates a CacheServiceCounterCacheSavePeriodGetOK with default headers values +func NewCacheServiceCounterCacheSavePeriodGetOK() *CacheServiceCounterCacheSavePeriodGetOK { + return &CacheServiceCounterCacheSavePeriodGetOK{} +} + +/* +CacheServiceCounterCacheSavePeriodGetOK handles this case with default header values. + +Success +*/ +type CacheServiceCounterCacheSavePeriodGetOK struct { + Payload int32 +} + +func (o *CacheServiceCounterCacheSavePeriodGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CacheServiceCounterCacheSavePeriodGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceCounterCacheSavePeriodGetDefault creates a CacheServiceCounterCacheSavePeriodGetDefault with default headers values +func NewCacheServiceCounterCacheSavePeriodGetDefault(code int) *CacheServiceCounterCacheSavePeriodGetDefault { + return &CacheServiceCounterCacheSavePeriodGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceCounterCacheSavePeriodGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceCounterCacheSavePeriodGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service counter cache save period get default response +func (o *CacheServiceCounterCacheSavePeriodGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceCounterCacheSavePeriodGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceCounterCacheSavePeriodGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceCounterCacheSavePeriodGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_post_parameters.go new file mode 100644 index 00000000000..2ddcb322197 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewCacheServiceCounterCacheSavePeriodPostParams creates a new CacheServiceCounterCacheSavePeriodPostParams object +// with the default values initialized. +func NewCacheServiceCounterCacheSavePeriodPostParams() *CacheServiceCounterCacheSavePeriodPostParams { + var () + return &CacheServiceCounterCacheSavePeriodPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceCounterCacheSavePeriodPostParamsWithTimeout creates a new CacheServiceCounterCacheSavePeriodPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceCounterCacheSavePeriodPostParamsWithTimeout(timeout time.Duration) *CacheServiceCounterCacheSavePeriodPostParams { + var () + return &CacheServiceCounterCacheSavePeriodPostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceCounterCacheSavePeriodPostParamsWithContext creates a new CacheServiceCounterCacheSavePeriodPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceCounterCacheSavePeriodPostParamsWithContext(ctx context.Context) *CacheServiceCounterCacheSavePeriodPostParams { + var () + return &CacheServiceCounterCacheSavePeriodPostParams{ + + Context: ctx, + } +} + +// NewCacheServiceCounterCacheSavePeriodPostParamsWithHTTPClient creates a new CacheServiceCounterCacheSavePeriodPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceCounterCacheSavePeriodPostParamsWithHTTPClient(client *http.Client) *CacheServiceCounterCacheSavePeriodPostParams { + var () + return &CacheServiceCounterCacheSavePeriodPostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceCounterCacheSavePeriodPostParams contains all the parameters to send to the API endpoint +for the cache service counter cache save period post operation typically these are written to a http.Request +*/ +type CacheServiceCounterCacheSavePeriodPostParams struct { + + /*Ccspis + counter cache save period in seconds + + */ + Ccspis int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service counter cache save period post params +func (o *CacheServiceCounterCacheSavePeriodPostParams) WithTimeout(timeout time.Duration) *CacheServiceCounterCacheSavePeriodPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service counter cache save period post params +func (o *CacheServiceCounterCacheSavePeriodPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service counter cache save period post params +func (o *CacheServiceCounterCacheSavePeriodPostParams) WithContext(ctx context.Context) *CacheServiceCounterCacheSavePeriodPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service counter cache save period post params +func (o *CacheServiceCounterCacheSavePeriodPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service counter cache save period post params +func (o *CacheServiceCounterCacheSavePeriodPostParams) WithHTTPClient(client *http.Client) *CacheServiceCounterCacheSavePeriodPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service counter cache save period post params +func (o *CacheServiceCounterCacheSavePeriodPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCcspis adds the ccspis to the cache service counter cache save period post params +func (o *CacheServiceCounterCacheSavePeriodPostParams) WithCcspis(ccspis int32) *CacheServiceCounterCacheSavePeriodPostParams { + o.SetCcspis(ccspis) + return o +} + +// SetCcspis adds the ccspis to the cache service counter cache save period post params +func (o *CacheServiceCounterCacheSavePeriodPostParams) SetCcspis(ccspis int32) { + o.Ccspis = ccspis +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceCounterCacheSavePeriodPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param ccspis + qrCcspis := o.Ccspis + qCcspis := swag.FormatInt32(qrCcspis) + if qCcspis != "" { + if err := r.SetQueryParam("ccspis", qCcspis); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_post_responses.go new file mode 100644 index 00000000000..642d0169d5f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_counter_cache_save_period_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceCounterCacheSavePeriodPostReader is a Reader for the CacheServiceCounterCacheSavePeriodPost structure. +type CacheServiceCounterCacheSavePeriodPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceCounterCacheSavePeriodPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceCounterCacheSavePeriodPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceCounterCacheSavePeriodPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceCounterCacheSavePeriodPostOK creates a CacheServiceCounterCacheSavePeriodPostOK with default headers values +func NewCacheServiceCounterCacheSavePeriodPostOK() *CacheServiceCounterCacheSavePeriodPostOK { + return &CacheServiceCounterCacheSavePeriodPostOK{} +} + +/* +CacheServiceCounterCacheSavePeriodPostOK handles this case with default header values. + +Success +*/ +type CacheServiceCounterCacheSavePeriodPostOK struct { +} + +func (o *CacheServiceCounterCacheSavePeriodPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceCounterCacheSavePeriodPostDefault creates a CacheServiceCounterCacheSavePeriodPostDefault with default headers values +func NewCacheServiceCounterCacheSavePeriodPostDefault(code int) *CacheServiceCounterCacheSavePeriodPostDefault { + return &CacheServiceCounterCacheSavePeriodPostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceCounterCacheSavePeriodPostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceCounterCacheSavePeriodPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service counter cache save period post default response +func (o *CacheServiceCounterCacheSavePeriodPostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceCounterCacheSavePeriodPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceCounterCacheSavePeriodPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceCounterCacheSavePeriodPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_counter_cache_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_counter_cache_post_parameters.go new file mode 100644 index 00000000000..82975a0beda --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_counter_cache_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceInvalidateCounterCachePostParams creates a new CacheServiceInvalidateCounterCachePostParams object +// with the default values initialized. +func NewCacheServiceInvalidateCounterCachePostParams() *CacheServiceInvalidateCounterCachePostParams { + + return &CacheServiceInvalidateCounterCachePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceInvalidateCounterCachePostParamsWithTimeout creates a new CacheServiceInvalidateCounterCachePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceInvalidateCounterCachePostParamsWithTimeout(timeout time.Duration) *CacheServiceInvalidateCounterCachePostParams { + + return &CacheServiceInvalidateCounterCachePostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceInvalidateCounterCachePostParamsWithContext creates a new CacheServiceInvalidateCounterCachePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceInvalidateCounterCachePostParamsWithContext(ctx context.Context) *CacheServiceInvalidateCounterCachePostParams { + + return &CacheServiceInvalidateCounterCachePostParams{ + + Context: ctx, + } +} + +// NewCacheServiceInvalidateCounterCachePostParamsWithHTTPClient creates a new CacheServiceInvalidateCounterCachePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceInvalidateCounterCachePostParamsWithHTTPClient(client *http.Client) *CacheServiceInvalidateCounterCachePostParams { + + return &CacheServiceInvalidateCounterCachePostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceInvalidateCounterCachePostParams contains all the parameters to send to the API endpoint +for the cache service invalidate counter cache post operation typically these are written to a http.Request +*/ +type CacheServiceInvalidateCounterCachePostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service invalidate counter cache post params +func (o *CacheServiceInvalidateCounterCachePostParams) WithTimeout(timeout time.Duration) *CacheServiceInvalidateCounterCachePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service invalidate counter cache post params +func (o *CacheServiceInvalidateCounterCachePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service invalidate counter cache post params +func (o *CacheServiceInvalidateCounterCachePostParams) WithContext(ctx context.Context) *CacheServiceInvalidateCounterCachePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service invalidate counter cache post params +func (o *CacheServiceInvalidateCounterCachePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service invalidate counter cache post params +func (o *CacheServiceInvalidateCounterCachePostParams) WithHTTPClient(client *http.Client) *CacheServiceInvalidateCounterCachePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service invalidate counter cache post params +func (o *CacheServiceInvalidateCounterCachePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceInvalidateCounterCachePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_counter_cache_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_counter_cache_post_responses.go new file mode 100644 index 00000000000..60412d25f99 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_counter_cache_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceInvalidateCounterCachePostReader is a Reader for the CacheServiceInvalidateCounterCachePost structure. +type CacheServiceInvalidateCounterCachePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceInvalidateCounterCachePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceInvalidateCounterCachePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceInvalidateCounterCachePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceInvalidateCounterCachePostOK creates a CacheServiceInvalidateCounterCachePostOK with default headers values +func NewCacheServiceInvalidateCounterCachePostOK() *CacheServiceInvalidateCounterCachePostOK { + return &CacheServiceInvalidateCounterCachePostOK{} +} + +/* +CacheServiceInvalidateCounterCachePostOK handles this case with default header values. + +Success +*/ +type CacheServiceInvalidateCounterCachePostOK struct { +} + +func (o *CacheServiceInvalidateCounterCachePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceInvalidateCounterCachePostDefault creates a CacheServiceInvalidateCounterCachePostDefault with default headers values +func NewCacheServiceInvalidateCounterCachePostDefault(code int) *CacheServiceInvalidateCounterCachePostDefault { + return &CacheServiceInvalidateCounterCachePostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceInvalidateCounterCachePostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceInvalidateCounterCachePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service invalidate counter cache post default response +func (o *CacheServiceInvalidateCounterCachePostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceInvalidateCounterCachePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceInvalidateCounterCachePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceInvalidateCounterCachePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_key_cache_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_key_cache_post_parameters.go new file mode 100644 index 00000000000..24c300a52cf --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_key_cache_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceInvalidateKeyCachePostParams creates a new CacheServiceInvalidateKeyCachePostParams object +// with the default values initialized. +func NewCacheServiceInvalidateKeyCachePostParams() *CacheServiceInvalidateKeyCachePostParams { + + return &CacheServiceInvalidateKeyCachePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceInvalidateKeyCachePostParamsWithTimeout creates a new CacheServiceInvalidateKeyCachePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceInvalidateKeyCachePostParamsWithTimeout(timeout time.Duration) *CacheServiceInvalidateKeyCachePostParams { + + return &CacheServiceInvalidateKeyCachePostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceInvalidateKeyCachePostParamsWithContext creates a new CacheServiceInvalidateKeyCachePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceInvalidateKeyCachePostParamsWithContext(ctx context.Context) *CacheServiceInvalidateKeyCachePostParams { + + return &CacheServiceInvalidateKeyCachePostParams{ + + Context: ctx, + } +} + +// NewCacheServiceInvalidateKeyCachePostParamsWithHTTPClient creates a new CacheServiceInvalidateKeyCachePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceInvalidateKeyCachePostParamsWithHTTPClient(client *http.Client) *CacheServiceInvalidateKeyCachePostParams { + + return &CacheServiceInvalidateKeyCachePostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceInvalidateKeyCachePostParams contains all the parameters to send to the API endpoint +for the cache service invalidate key cache post operation typically these are written to a http.Request +*/ +type CacheServiceInvalidateKeyCachePostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service invalidate key cache post params +func (o *CacheServiceInvalidateKeyCachePostParams) WithTimeout(timeout time.Duration) *CacheServiceInvalidateKeyCachePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service invalidate key cache post params +func (o *CacheServiceInvalidateKeyCachePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service invalidate key cache post params +func (o *CacheServiceInvalidateKeyCachePostParams) WithContext(ctx context.Context) *CacheServiceInvalidateKeyCachePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service invalidate key cache post params +func (o *CacheServiceInvalidateKeyCachePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service invalidate key cache post params +func (o *CacheServiceInvalidateKeyCachePostParams) WithHTTPClient(client *http.Client) *CacheServiceInvalidateKeyCachePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service invalidate key cache post params +func (o *CacheServiceInvalidateKeyCachePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceInvalidateKeyCachePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_key_cache_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_key_cache_post_responses.go new file mode 100644 index 00000000000..8da9cda092c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_invalidate_key_cache_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceInvalidateKeyCachePostReader is a Reader for the CacheServiceInvalidateKeyCachePost structure. +type CacheServiceInvalidateKeyCachePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceInvalidateKeyCachePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceInvalidateKeyCachePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceInvalidateKeyCachePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceInvalidateKeyCachePostOK creates a CacheServiceInvalidateKeyCachePostOK with default headers values +func NewCacheServiceInvalidateKeyCachePostOK() *CacheServiceInvalidateKeyCachePostOK { + return &CacheServiceInvalidateKeyCachePostOK{} +} + +/* +CacheServiceInvalidateKeyCachePostOK handles this case with default header values. + +Success +*/ +type CacheServiceInvalidateKeyCachePostOK struct { +} + +func (o *CacheServiceInvalidateKeyCachePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceInvalidateKeyCachePostDefault creates a CacheServiceInvalidateKeyCachePostDefault with default headers values +func NewCacheServiceInvalidateKeyCachePostDefault(code int) *CacheServiceInvalidateKeyCachePostDefault { + return &CacheServiceInvalidateKeyCachePostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceInvalidateKeyCachePostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceInvalidateKeyCachePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service invalidate key cache post default response +func (o *CacheServiceInvalidateKeyCachePostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceInvalidateKeyCachePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceInvalidateKeyCachePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceInvalidateKeyCachePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_capacity_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_capacity_post_parameters.go new file mode 100644 index 00000000000..3c6b602649a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_capacity_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceKeyCacheCapacityPostParams creates a new CacheServiceKeyCacheCapacityPostParams object +// with the default values initialized. +func NewCacheServiceKeyCacheCapacityPostParams() *CacheServiceKeyCacheCapacityPostParams { + var () + return &CacheServiceKeyCacheCapacityPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceKeyCacheCapacityPostParamsWithTimeout creates a new CacheServiceKeyCacheCapacityPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceKeyCacheCapacityPostParamsWithTimeout(timeout time.Duration) *CacheServiceKeyCacheCapacityPostParams { + var () + return &CacheServiceKeyCacheCapacityPostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceKeyCacheCapacityPostParamsWithContext creates a new CacheServiceKeyCacheCapacityPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceKeyCacheCapacityPostParamsWithContext(ctx context.Context) *CacheServiceKeyCacheCapacityPostParams { + var () + return &CacheServiceKeyCacheCapacityPostParams{ + + Context: ctx, + } +} + +// NewCacheServiceKeyCacheCapacityPostParamsWithHTTPClient creates a new CacheServiceKeyCacheCapacityPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceKeyCacheCapacityPostParamsWithHTTPClient(client *http.Client) *CacheServiceKeyCacheCapacityPostParams { + var () + return &CacheServiceKeyCacheCapacityPostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceKeyCacheCapacityPostParams contains all the parameters to send to the API endpoint +for the cache service key cache capacity post operation typically these are written to a http.Request +*/ +type CacheServiceKeyCacheCapacityPostParams struct { + + /*Capacity + key cache capacity in mb + + */ + Capacity string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service key cache capacity post params +func (o *CacheServiceKeyCacheCapacityPostParams) WithTimeout(timeout time.Duration) *CacheServiceKeyCacheCapacityPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service key cache capacity post params +func (o *CacheServiceKeyCacheCapacityPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service key cache capacity post params +func (o *CacheServiceKeyCacheCapacityPostParams) WithContext(ctx context.Context) *CacheServiceKeyCacheCapacityPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service key cache capacity post params +func (o *CacheServiceKeyCacheCapacityPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service key cache capacity post params +func (o *CacheServiceKeyCacheCapacityPostParams) WithHTTPClient(client *http.Client) *CacheServiceKeyCacheCapacityPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service key cache capacity post params +func (o *CacheServiceKeyCacheCapacityPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCapacity adds the capacity to the cache service key cache capacity post params +func (o *CacheServiceKeyCacheCapacityPostParams) WithCapacity(capacity string) *CacheServiceKeyCacheCapacityPostParams { + o.SetCapacity(capacity) + return o +} + +// SetCapacity adds the capacity to the cache service key cache capacity post params +func (o *CacheServiceKeyCacheCapacityPostParams) SetCapacity(capacity string) { + o.Capacity = capacity +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceKeyCacheCapacityPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param capacity + qrCapacity := o.Capacity + qCapacity := qrCapacity + if qCapacity != "" { + if err := r.SetQueryParam("capacity", qCapacity); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_capacity_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_capacity_post_responses.go new file mode 100644 index 00000000000..fddf204c985 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_capacity_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceKeyCacheCapacityPostReader is a Reader for the CacheServiceKeyCacheCapacityPost structure. +type CacheServiceKeyCacheCapacityPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceKeyCacheCapacityPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceKeyCacheCapacityPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceKeyCacheCapacityPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceKeyCacheCapacityPostOK creates a CacheServiceKeyCacheCapacityPostOK with default headers values +func NewCacheServiceKeyCacheCapacityPostOK() *CacheServiceKeyCacheCapacityPostOK { + return &CacheServiceKeyCacheCapacityPostOK{} +} + +/* +CacheServiceKeyCacheCapacityPostOK handles this case with default header values. + +Success +*/ +type CacheServiceKeyCacheCapacityPostOK struct { +} + +func (o *CacheServiceKeyCacheCapacityPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceKeyCacheCapacityPostDefault creates a CacheServiceKeyCacheCapacityPostDefault with default headers values +func NewCacheServiceKeyCacheCapacityPostDefault(code int) *CacheServiceKeyCacheCapacityPostDefault { + return &CacheServiceKeyCacheCapacityPostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceKeyCacheCapacityPostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceKeyCacheCapacityPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service key cache capacity post default response +func (o *CacheServiceKeyCacheCapacityPostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceKeyCacheCapacityPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceKeyCacheCapacityPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceKeyCacheCapacityPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_get_parameters.go new file mode 100644 index 00000000000..abb4316d237 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceKeyCacheKeysToSaveGetParams creates a new CacheServiceKeyCacheKeysToSaveGetParams object +// with the default values initialized. +func NewCacheServiceKeyCacheKeysToSaveGetParams() *CacheServiceKeyCacheKeysToSaveGetParams { + + return &CacheServiceKeyCacheKeysToSaveGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceKeyCacheKeysToSaveGetParamsWithTimeout creates a new CacheServiceKeyCacheKeysToSaveGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceKeyCacheKeysToSaveGetParamsWithTimeout(timeout time.Duration) *CacheServiceKeyCacheKeysToSaveGetParams { + + return &CacheServiceKeyCacheKeysToSaveGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceKeyCacheKeysToSaveGetParamsWithContext creates a new CacheServiceKeyCacheKeysToSaveGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceKeyCacheKeysToSaveGetParamsWithContext(ctx context.Context) *CacheServiceKeyCacheKeysToSaveGetParams { + + return &CacheServiceKeyCacheKeysToSaveGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceKeyCacheKeysToSaveGetParamsWithHTTPClient creates a new CacheServiceKeyCacheKeysToSaveGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceKeyCacheKeysToSaveGetParamsWithHTTPClient(client *http.Client) *CacheServiceKeyCacheKeysToSaveGetParams { + + return &CacheServiceKeyCacheKeysToSaveGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceKeyCacheKeysToSaveGetParams contains all the parameters to send to the API endpoint +for the cache service key cache keys to save get operation typically these are written to a http.Request +*/ +type CacheServiceKeyCacheKeysToSaveGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service key cache keys to save get params +func (o *CacheServiceKeyCacheKeysToSaveGetParams) WithTimeout(timeout time.Duration) *CacheServiceKeyCacheKeysToSaveGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service key cache keys to save get params +func (o *CacheServiceKeyCacheKeysToSaveGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service key cache keys to save get params +func (o *CacheServiceKeyCacheKeysToSaveGetParams) WithContext(ctx context.Context) *CacheServiceKeyCacheKeysToSaveGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service key cache keys to save get params +func (o *CacheServiceKeyCacheKeysToSaveGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service key cache keys to save get params +func (o *CacheServiceKeyCacheKeysToSaveGetParams) WithHTTPClient(client *http.Client) *CacheServiceKeyCacheKeysToSaveGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service key cache keys to save get params +func (o *CacheServiceKeyCacheKeysToSaveGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceKeyCacheKeysToSaveGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_get_responses.go new file mode 100644 index 00000000000..f201bc813d0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceKeyCacheKeysToSaveGetReader is a Reader for the CacheServiceKeyCacheKeysToSaveGet structure. +type CacheServiceKeyCacheKeysToSaveGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceKeyCacheKeysToSaveGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceKeyCacheKeysToSaveGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceKeyCacheKeysToSaveGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceKeyCacheKeysToSaveGetOK creates a CacheServiceKeyCacheKeysToSaveGetOK with default headers values +func NewCacheServiceKeyCacheKeysToSaveGetOK() *CacheServiceKeyCacheKeysToSaveGetOK { + return &CacheServiceKeyCacheKeysToSaveGetOK{} +} + +/* +CacheServiceKeyCacheKeysToSaveGetOK handles this case with default header values. + +Success +*/ +type CacheServiceKeyCacheKeysToSaveGetOK struct { + Payload int32 +} + +func (o *CacheServiceKeyCacheKeysToSaveGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CacheServiceKeyCacheKeysToSaveGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceKeyCacheKeysToSaveGetDefault creates a CacheServiceKeyCacheKeysToSaveGetDefault with default headers values +func NewCacheServiceKeyCacheKeysToSaveGetDefault(code int) *CacheServiceKeyCacheKeysToSaveGetDefault { + return &CacheServiceKeyCacheKeysToSaveGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceKeyCacheKeysToSaveGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceKeyCacheKeysToSaveGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service key cache keys to save get default response +func (o *CacheServiceKeyCacheKeysToSaveGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceKeyCacheKeysToSaveGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceKeyCacheKeysToSaveGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceKeyCacheKeysToSaveGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_post_parameters.go new file mode 100644 index 00000000000..9201d6d1bc0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewCacheServiceKeyCacheKeysToSavePostParams creates a new CacheServiceKeyCacheKeysToSavePostParams object +// with the default values initialized. +func NewCacheServiceKeyCacheKeysToSavePostParams() *CacheServiceKeyCacheKeysToSavePostParams { + var () + return &CacheServiceKeyCacheKeysToSavePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceKeyCacheKeysToSavePostParamsWithTimeout creates a new CacheServiceKeyCacheKeysToSavePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceKeyCacheKeysToSavePostParamsWithTimeout(timeout time.Duration) *CacheServiceKeyCacheKeysToSavePostParams { + var () + return &CacheServiceKeyCacheKeysToSavePostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceKeyCacheKeysToSavePostParamsWithContext creates a new CacheServiceKeyCacheKeysToSavePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceKeyCacheKeysToSavePostParamsWithContext(ctx context.Context) *CacheServiceKeyCacheKeysToSavePostParams { + var () + return &CacheServiceKeyCacheKeysToSavePostParams{ + + Context: ctx, + } +} + +// NewCacheServiceKeyCacheKeysToSavePostParamsWithHTTPClient creates a new CacheServiceKeyCacheKeysToSavePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceKeyCacheKeysToSavePostParamsWithHTTPClient(client *http.Client) *CacheServiceKeyCacheKeysToSavePostParams { + var () + return &CacheServiceKeyCacheKeysToSavePostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceKeyCacheKeysToSavePostParams contains all the parameters to send to the API endpoint +for the cache service key cache keys to save post operation typically these are written to a http.Request +*/ +type CacheServiceKeyCacheKeysToSavePostParams struct { + + /*Kckts + key cache keys to save + + */ + Kckts int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service key cache keys to save post params +func (o *CacheServiceKeyCacheKeysToSavePostParams) WithTimeout(timeout time.Duration) *CacheServiceKeyCacheKeysToSavePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service key cache keys to save post params +func (o *CacheServiceKeyCacheKeysToSavePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service key cache keys to save post params +func (o *CacheServiceKeyCacheKeysToSavePostParams) WithContext(ctx context.Context) *CacheServiceKeyCacheKeysToSavePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service key cache keys to save post params +func (o *CacheServiceKeyCacheKeysToSavePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service key cache keys to save post params +func (o *CacheServiceKeyCacheKeysToSavePostParams) WithHTTPClient(client *http.Client) *CacheServiceKeyCacheKeysToSavePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service key cache keys to save post params +func (o *CacheServiceKeyCacheKeysToSavePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithKckts adds the kckts to the cache service key cache keys to save post params +func (o *CacheServiceKeyCacheKeysToSavePostParams) WithKckts(kckts int32) *CacheServiceKeyCacheKeysToSavePostParams { + o.SetKckts(kckts) + return o +} + +// SetKckts adds the kckts to the cache service key cache keys to save post params +func (o *CacheServiceKeyCacheKeysToSavePostParams) SetKckts(kckts int32) { + o.Kckts = kckts +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceKeyCacheKeysToSavePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param kckts + qrKckts := o.Kckts + qKckts := swag.FormatInt32(qrKckts) + if qKckts != "" { + if err := r.SetQueryParam("kckts", qKckts); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_post_responses.go new file mode 100644 index 00000000000..0ffd4a4984e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_keys_to_save_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceKeyCacheKeysToSavePostReader is a Reader for the CacheServiceKeyCacheKeysToSavePost structure. +type CacheServiceKeyCacheKeysToSavePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceKeyCacheKeysToSavePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceKeyCacheKeysToSavePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceKeyCacheKeysToSavePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceKeyCacheKeysToSavePostOK creates a CacheServiceKeyCacheKeysToSavePostOK with default headers values +func NewCacheServiceKeyCacheKeysToSavePostOK() *CacheServiceKeyCacheKeysToSavePostOK { + return &CacheServiceKeyCacheKeysToSavePostOK{} +} + +/* +CacheServiceKeyCacheKeysToSavePostOK handles this case with default header values. + +Success +*/ +type CacheServiceKeyCacheKeysToSavePostOK struct { +} + +func (o *CacheServiceKeyCacheKeysToSavePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceKeyCacheKeysToSavePostDefault creates a CacheServiceKeyCacheKeysToSavePostDefault with default headers values +func NewCacheServiceKeyCacheKeysToSavePostDefault(code int) *CacheServiceKeyCacheKeysToSavePostDefault { + return &CacheServiceKeyCacheKeysToSavePostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceKeyCacheKeysToSavePostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceKeyCacheKeysToSavePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service key cache keys to save post default response +func (o *CacheServiceKeyCacheKeysToSavePostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceKeyCacheKeysToSavePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceKeyCacheKeysToSavePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceKeyCacheKeysToSavePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_get_parameters.go new file mode 100644 index 00000000000..991955171ee --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceKeyCacheSavePeriodGetParams creates a new CacheServiceKeyCacheSavePeriodGetParams object +// with the default values initialized. +func NewCacheServiceKeyCacheSavePeriodGetParams() *CacheServiceKeyCacheSavePeriodGetParams { + + return &CacheServiceKeyCacheSavePeriodGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceKeyCacheSavePeriodGetParamsWithTimeout creates a new CacheServiceKeyCacheSavePeriodGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceKeyCacheSavePeriodGetParamsWithTimeout(timeout time.Duration) *CacheServiceKeyCacheSavePeriodGetParams { + + return &CacheServiceKeyCacheSavePeriodGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceKeyCacheSavePeriodGetParamsWithContext creates a new CacheServiceKeyCacheSavePeriodGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceKeyCacheSavePeriodGetParamsWithContext(ctx context.Context) *CacheServiceKeyCacheSavePeriodGetParams { + + return &CacheServiceKeyCacheSavePeriodGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceKeyCacheSavePeriodGetParamsWithHTTPClient creates a new CacheServiceKeyCacheSavePeriodGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceKeyCacheSavePeriodGetParamsWithHTTPClient(client *http.Client) *CacheServiceKeyCacheSavePeriodGetParams { + + return &CacheServiceKeyCacheSavePeriodGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceKeyCacheSavePeriodGetParams contains all the parameters to send to the API endpoint +for the cache service key cache save period get operation typically these are written to a http.Request +*/ +type CacheServiceKeyCacheSavePeriodGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service key cache save period get params +func (o *CacheServiceKeyCacheSavePeriodGetParams) WithTimeout(timeout time.Duration) *CacheServiceKeyCacheSavePeriodGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service key cache save period get params +func (o *CacheServiceKeyCacheSavePeriodGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service key cache save period get params +func (o *CacheServiceKeyCacheSavePeriodGetParams) WithContext(ctx context.Context) *CacheServiceKeyCacheSavePeriodGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service key cache save period get params +func (o *CacheServiceKeyCacheSavePeriodGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service key cache save period get params +func (o *CacheServiceKeyCacheSavePeriodGetParams) WithHTTPClient(client *http.Client) *CacheServiceKeyCacheSavePeriodGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service key cache save period get params +func (o *CacheServiceKeyCacheSavePeriodGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceKeyCacheSavePeriodGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_get_responses.go new file mode 100644 index 00000000000..01f53723fd5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceKeyCacheSavePeriodGetReader is a Reader for the CacheServiceKeyCacheSavePeriodGet structure. +type CacheServiceKeyCacheSavePeriodGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceKeyCacheSavePeriodGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceKeyCacheSavePeriodGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceKeyCacheSavePeriodGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceKeyCacheSavePeriodGetOK creates a CacheServiceKeyCacheSavePeriodGetOK with default headers values +func NewCacheServiceKeyCacheSavePeriodGetOK() *CacheServiceKeyCacheSavePeriodGetOK { + return &CacheServiceKeyCacheSavePeriodGetOK{} +} + +/* +CacheServiceKeyCacheSavePeriodGetOK handles this case with default header values. + +Success +*/ +type CacheServiceKeyCacheSavePeriodGetOK struct { + Payload int32 +} + +func (o *CacheServiceKeyCacheSavePeriodGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CacheServiceKeyCacheSavePeriodGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceKeyCacheSavePeriodGetDefault creates a CacheServiceKeyCacheSavePeriodGetDefault with default headers values +func NewCacheServiceKeyCacheSavePeriodGetDefault(code int) *CacheServiceKeyCacheSavePeriodGetDefault { + return &CacheServiceKeyCacheSavePeriodGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceKeyCacheSavePeriodGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceKeyCacheSavePeriodGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service key cache save period get default response +func (o *CacheServiceKeyCacheSavePeriodGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceKeyCacheSavePeriodGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceKeyCacheSavePeriodGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceKeyCacheSavePeriodGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_post_parameters.go new file mode 100644 index 00000000000..043f6489e91 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewCacheServiceKeyCacheSavePeriodPostParams creates a new CacheServiceKeyCacheSavePeriodPostParams object +// with the default values initialized. +func NewCacheServiceKeyCacheSavePeriodPostParams() *CacheServiceKeyCacheSavePeriodPostParams { + var () + return &CacheServiceKeyCacheSavePeriodPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceKeyCacheSavePeriodPostParamsWithTimeout creates a new CacheServiceKeyCacheSavePeriodPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceKeyCacheSavePeriodPostParamsWithTimeout(timeout time.Duration) *CacheServiceKeyCacheSavePeriodPostParams { + var () + return &CacheServiceKeyCacheSavePeriodPostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceKeyCacheSavePeriodPostParamsWithContext creates a new CacheServiceKeyCacheSavePeriodPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceKeyCacheSavePeriodPostParamsWithContext(ctx context.Context) *CacheServiceKeyCacheSavePeriodPostParams { + var () + return &CacheServiceKeyCacheSavePeriodPostParams{ + + Context: ctx, + } +} + +// NewCacheServiceKeyCacheSavePeriodPostParamsWithHTTPClient creates a new CacheServiceKeyCacheSavePeriodPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceKeyCacheSavePeriodPostParamsWithHTTPClient(client *http.Client) *CacheServiceKeyCacheSavePeriodPostParams { + var () + return &CacheServiceKeyCacheSavePeriodPostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceKeyCacheSavePeriodPostParams contains all the parameters to send to the API endpoint +for the cache service key cache save period post operation typically these are written to a http.Request +*/ +type CacheServiceKeyCacheSavePeriodPostParams struct { + + /*Period + key cache save period in seconds + + */ + Period int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service key cache save period post params +func (o *CacheServiceKeyCacheSavePeriodPostParams) WithTimeout(timeout time.Duration) *CacheServiceKeyCacheSavePeriodPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service key cache save period post params +func (o *CacheServiceKeyCacheSavePeriodPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service key cache save period post params +func (o *CacheServiceKeyCacheSavePeriodPostParams) WithContext(ctx context.Context) *CacheServiceKeyCacheSavePeriodPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service key cache save period post params +func (o *CacheServiceKeyCacheSavePeriodPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service key cache save period post params +func (o *CacheServiceKeyCacheSavePeriodPostParams) WithHTTPClient(client *http.Client) *CacheServiceKeyCacheSavePeriodPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service key cache save period post params +func (o *CacheServiceKeyCacheSavePeriodPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithPeriod adds the period to the cache service key cache save period post params +func (o *CacheServiceKeyCacheSavePeriodPostParams) WithPeriod(period int32) *CacheServiceKeyCacheSavePeriodPostParams { + o.SetPeriod(period) + return o +} + +// SetPeriod adds the period to the cache service key cache save period post params +func (o *CacheServiceKeyCacheSavePeriodPostParams) SetPeriod(period int32) { + o.Period = period +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceKeyCacheSavePeriodPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param period + qrPeriod := o.Period + qPeriod := swag.FormatInt32(qrPeriod) + if qPeriod != "" { + if err := r.SetQueryParam("period", qPeriod); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_post_responses.go new file mode 100644 index 00000000000..d820901c00f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_key_cache_save_period_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceKeyCacheSavePeriodPostReader is a Reader for the CacheServiceKeyCacheSavePeriodPost structure. +type CacheServiceKeyCacheSavePeriodPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceKeyCacheSavePeriodPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceKeyCacheSavePeriodPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceKeyCacheSavePeriodPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceKeyCacheSavePeriodPostOK creates a CacheServiceKeyCacheSavePeriodPostOK with default headers values +func NewCacheServiceKeyCacheSavePeriodPostOK() *CacheServiceKeyCacheSavePeriodPostOK { + return &CacheServiceKeyCacheSavePeriodPostOK{} +} + +/* +CacheServiceKeyCacheSavePeriodPostOK handles this case with default header values. + +Success +*/ +type CacheServiceKeyCacheSavePeriodPostOK struct { +} + +func (o *CacheServiceKeyCacheSavePeriodPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceKeyCacheSavePeriodPostDefault creates a CacheServiceKeyCacheSavePeriodPostDefault with default headers values +func NewCacheServiceKeyCacheSavePeriodPostDefault(code int) *CacheServiceKeyCacheSavePeriodPostDefault { + return &CacheServiceKeyCacheSavePeriodPostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceKeyCacheSavePeriodPostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceKeyCacheSavePeriodPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service key cache save period post default response +func (o *CacheServiceKeyCacheSavePeriodPostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceKeyCacheSavePeriodPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceKeyCacheSavePeriodPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceKeyCacheSavePeriodPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_capacity_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_capacity_get_parameters.go new file mode 100644 index 00000000000..d12b97492dc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_capacity_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsCounterCapacityGetParams creates a new CacheServiceMetricsCounterCapacityGetParams object +// with the default values initialized. +func NewCacheServiceMetricsCounterCapacityGetParams() *CacheServiceMetricsCounterCapacityGetParams { + + return &CacheServiceMetricsCounterCapacityGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsCounterCapacityGetParamsWithTimeout creates a new CacheServiceMetricsCounterCapacityGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsCounterCapacityGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsCounterCapacityGetParams { + + return &CacheServiceMetricsCounterCapacityGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsCounterCapacityGetParamsWithContext creates a new CacheServiceMetricsCounterCapacityGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsCounterCapacityGetParamsWithContext(ctx context.Context) *CacheServiceMetricsCounterCapacityGetParams { + + return &CacheServiceMetricsCounterCapacityGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsCounterCapacityGetParamsWithHTTPClient creates a new CacheServiceMetricsCounterCapacityGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsCounterCapacityGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsCounterCapacityGetParams { + + return &CacheServiceMetricsCounterCapacityGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsCounterCapacityGetParams contains all the parameters to send to the API endpoint +for the cache service metrics counter capacity get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsCounterCapacityGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics counter capacity get params +func (o *CacheServiceMetricsCounterCapacityGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsCounterCapacityGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics counter capacity get params +func (o *CacheServiceMetricsCounterCapacityGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics counter capacity get params +func (o *CacheServiceMetricsCounterCapacityGetParams) WithContext(ctx context.Context) *CacheServiceMetricsCounterCapacityGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics counter capacity get params +func (o *CacheServiceMetricsCounterCapacityGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics counter capacity get params +func (o *CacheServiceMetricsCounterCapacityGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsCounterCapacityGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics counter capacity get params +func (o *CacheServiceMetricsCounterCapacityGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsCounterCapacityGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_capacity_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_capacity_get_responses.go new file mode 100644 index 00000000000..917854594fd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_capacity_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsCounterCapacityGetReader is a Reader for the CacheServiceMetricsCounterCapacityGet structure. +type CacheServiceMetricsCounterCapacityGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsCounterCapacityGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsCounterCapacityGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsCounterCapacityGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsCounterCapacityGetOK creates a CacheServiceMetricsCounterCapacityGetOK with default headers values +func NewCacheServiceMetricsCounterCapacityGetOK() *CacheServiceMetricsCounterCapacityGetOK { + return &CacheServiceMetricsCounterCapacityGetOK{} +} + +/* +CacheServiceMetricsCounterCapacityGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsCounterCapacityGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsCounterCapacityGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsCounterCapacityGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsCounterCapacityGetDefault creates a CacheServiceMetricsCounterCapacityGetDefault with default headers values +func NewCacheServiceMetricsCounterCapacityGetDefault(code int) *CacheServiceMetricsCounterCapacityGetDefault { + return &CacheServiceMetricsCounterCapacityGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsCounterCapacityGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsCounterCapacityGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics counter capacity get default response +func (o *CacheServiceMetricsCounterCapacityGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsCounterCapacityGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsCounterCapacityGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsCounterCapacityGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_entries_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_entries_get_parameters.go new file mode 100644 index 00000000000..f2dd906ef90 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_entries_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsCounterEntriesGetParams creates a new CacheServiceMetricsCounterEntriesGetParams object +// with the default values initialized. +func NewCacheServiceMetricsCounterEntriesGetParams() *CacheServiceMetricsCounterEntriesGetParams { + + return &CacheServiceMetricsCounterEntriesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsCounterEntriesGetParamsWithTimeout creates a new CacheServiceMetricsCounterEntriesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsCounterEntriesGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsCounterEntriesGetParams { + + return &CacheServiceMetricsCounterEntriesGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsCounterEntriesGetParamsWithContext creates a new CacheServiceMetricsCounterEntriesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsCounterEntriesGetParamsWithContext(ctx context.Context) *CacheServiceMetricsCounterEntriesGetParams { + + return &CacheServiceMetricsCounterEntriesGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsCounterEntriesGetParamsWithHTTPClient creates a new CacheServiceMetricsCounterEntriesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsCounterEntriesGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsCounterEntriesGetParams { + + return &CacheServiceMetricsCounterEntriesGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsCounterEntriesGetParams contains all the parameters to send to the API endpoint +for the cache service metrics counter entries get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsCounterEntriesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics counter entries get params +func (o *CacheServiceMetricsCounterEntriesGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsCounterEntriesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics counter entries get params +func (o *CacheServiceMetricsCounterEntriesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics counter entries get params +func (o *CacheServiceMetricsCounterEntriesGetParams) WithContext(ctx context.Context) *CacheServiceMetricsCounterEntriesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics counter entries get params +func (o *CacheServiceMetricsCounterEntriesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics counter entries get params +func (o *CacheServiceMetricsCounterEntriesGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsCounterEntriesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics counter entries get params +func (o *CacheServiceMetricsCounterEntriesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsCounterEntriesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_entries_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_entries_get_responses.go new file mode 100644 index 00000000000..07f1a238104 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_entries_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsCounterEntriesGetReader is a Reader for the CacheServiceMetricsCounterEntriesGet structure. +type CacheServiceMetricsCounterEntriesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsCounterEntriesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsCounterEntriesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsCounterEntriesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsCounterEntriesGetOK creates a CacheServiceMetricsCounterEntriesGetOK with default headers values +func NewCacheServiceMetricsCounterEntriesGetOK() *CacheServiceMetricsCounterEntriesGetOK { + return &CacheServiceMetricsCounterEntriesGetOK{} +} + +/* +CacheServiceMetricsCounterEntriesGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsCounterEntriesGetOK struct { + Payload int32 +} + +func (o *CacheServiceMetricsCounterEntriesGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CacheServiceMetricsCounterEntriesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsCounterEntriesGetDefault creates a CacheServiceMetricsCounterEntriesGetDefault with default headers values +func NewCacheServiceMetricsCounterEntriesGetDefault(code int) *CacheServiceMetricsCounterEntriesGetDefault { + return &CacheServiceMetricsCounterEntriesGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsCounterEntriesGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsCounterEntriesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics counter entries get default response +func (o *CacheServiceMetricsCounterEntriesGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsCounterEntriesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsCounterEntriesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsCounterEntriesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hit_rate_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hit_rate_get_parameters.go new file mode 100644 index 00000000000..82818904387 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hit_rate_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsCounterHitRateGetParams creates a new CacheServiceMetricsCounterHitRateGetParams object +// with the default values initialized. +func NewCacheServiceMetricsCounterHitRateGetParams() *CacheServiceMetricsCounterHitRateGetParams { + + return &CacheServiceMetricsCounterHitRateGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsCounterHitRateGetParamsWithTimeout creates a new CacheServiceMetricsCounterHitRateGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsCounterHitRateGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsCounterHitRateGetParams { + + return &CacheServiceMetricsCounterHitRateGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsCounterHitRateGetParamsWithContext creates a new CacheServiceMetricsCounterHitRateGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsCounterHitRateGetParamsWithContext(ctx context.Context) *CacheServiceMetricsCounterHitRateGetParams { + + return &CacheServiceMetricsCounterHitRateGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsCounterHitRateGetParamsWithHTTPClient creates a new CacheServiceMetricsCounterHitRateGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsCounterHitRateGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsCounterHitRateGetParams { + + return &CacheServiceMetricsCounterHitRateGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsCounterHitRateGetParams contains all the parameters to send to the API endpoint +for the cache service metrics counter hit rate get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsCounterHitRateGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics counter hit rate get params +func (o *CacheServiceMetricsCounterHitRateGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsCounterHitRateGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics counter hit rate get params +func (o *CacheServiceMetricsCounterHitRateGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics counter hit rate get params +func (o *CacheServiceMetricsCounterHitRateGetParams) WithContext(ctx context.Context) *CacheServiceMetricsCounterHitRateGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics counter hit rate get params +func (o *CacheServiceMetricsCounterHitRateGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics counter hit rate get params +func (o *CacheServiceMetricsCounterHitRateGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsCounterHitRateGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics counter hit rate get params +func (o *CacheServiceMetricsCounterHitRateGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsCounterHitRateGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hit_rate_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hit_rate_get_responses.go new file mode 100644 index 00000000000..03c416e7a60 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hit_rate_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsCounterHitRateGetReader is a Reader for the CacheServiceMetricsCounterHitRateGet structure. +type CacheServiceMetricsCounterHitRateGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsCounterHitRateGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsCounterHitRateGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsCounterHitRateGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsCounterHitRateGetOK creates a CacheServiceMetricsCounterHitRateGetOK with default headers values +func NewCacheServiceMetricsCounterHitRateGetOK() *CacheServiceMetricsCounterHitRateGetOK { + return &CacheServiceMetricsCounterHitRateGetOK{} +} + +/* +CacheServiceMetricsCounterHitRateGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsCounterHitRateGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsCounterHitRateGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsCounterHitRateGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsCounterHitRateGetDefault creates a CacheServiceMetricsCounterHitRateGetDefault with default headers values +func NewCacheServiceMetricsCounterHitRateGetDefault(code int) *CacheServiceMetricsCounterHitRateGetDefault { + return &CacheServiceMetricsCounterHitRateGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsCounterHitRateGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsCounterHitRateGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics counter hit rate get default response +func (o *CacheServiceMetricsCounterHitRateGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsCounterHitRateGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsCounterHitRateGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsCounterHitRateGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_get_parameters.go new file mode 100644 index 00000000000..371ac603b44 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsCounterHitsGetParams creates a new CacheServiceMetricsCounterHitsGetParams object +// with the default values initialized. +func NewCacheServiceMetricsCounterHitsGetParams() *CacheServiceMetricsCounterHitsGetParams { + + return &CacheServiceMetricsCounterHitsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsCounterHitsGetParamsWithTimeout creates a new CacheServiceMetricsCounterHitsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsCounterHitsGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsCounterHitsGetParams { + + return &CacheServiceMetricsCounterHitsGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsCounterHitsGetParamsWithContext creates a new CacheServiceMetricsCounterHitsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsCounterHitsGetParamsWithContext(ctx context.Context) *CacheServiceMetricsCounterHitsGetParams { + + return &CacheServiceMetricsCounterHitsGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsCounterHitsGetParamsWithHTTPClient creates a new CacheServiceMetricsCounterHitsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsCounterHitsGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsCounterHitsGetParams { + + return &CacheServiceMetricsCounterHitsGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsCounterHitsGetParams contains all the parameters to send to the API endpoint +for the cache service metrics counter hits get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsCounterHitsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics counter hits get params +func (o *CacheServiceMetricsCounterHitsGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsCounterHitsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics counter hits get params +func (o *CacheServiceMetricsCounterHitsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics counter hits get params +func (o *CacheServiceMetricsCounterHitsGetParams) WithContext(ctx context.Context) *CacheServiceMetricsCounterHitsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics counter hits get params +func (o *CacheServiceMetricsCounterHitsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics counter hits get params +func (o *CacheServiceMetricsCounterHitsGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsCounterHitsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics counter hits get params +func (o *CacheServiceMetricsCounterHitsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsCounterHitsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_get_responses.go new file mode 100644 index 00000000000..f9732971107 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsCounterHitsGetReader is a Reader for the CacheServiceMetricsCounterHitsGet structure. +type CacheServiceMetricsCounterHitsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsCounterHitsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsCounterHitsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsCounterHitsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsCounterHitsGetOK creates a CacheServiceMetricsCounterHitsGetOK with default headers values +func NewCacheServiceMetricsCounterHitsGetOK() *CacheServiceMetricsCounterHitsGetOK { + return &CacheServiceMetricsCounterHitsGetOK{} +} + +/* +CacheServiceMetricsCounterHitsGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsCounterHitsGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsCounterHitsGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsCounterHitsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsCounterHitsGetDefault creates a CacheServiceMetricsCounterHitsGetDefault with default headers values +func NewCacheServiceMetricsCounterHitsGetDefault(code int) *CacheServiceMetricsCounterHitsGetDefault { + return &CacheServiceMetricsCounterHitsGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsCounterHitsGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsCounterHitsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics counter hits get default response +func (o *CacheServiceMetricsCounterHitsGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsCounterHitsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsCounterHitsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsCounterHitsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_moving_avrage_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_moving_avrage_get_parameters.go new file mode 100644 index 00000000000..6b576f26c0f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_moving_avrage_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsCounterHitsMovingAvrageGetParams creates a new CacheServiceMetricsCounterHitsMovingAvrageGetParams object +// with the default values initialized. +func NewCacheServiceMetricsCounterHitsMovingAvrageGetParams() *CacheServiceMetricsCounterHitsMovingAvrageGetParams { + + return &CacheServiceMetricsCounterHitsMovingAvrageGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsCounterHitsMovingAvrageGetParamsWithTimeout creates a new CacheServiceMetricsCounterHitsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsCounterHitsMovingAvrageGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsCounterHitsMovingAvrageGetParams { + + return &CacheServiceMetricsCounterHitsMovingAvrageGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsCounterHitsMovingAvrageGetParamsWithContext creates a new CacheServiceMetricsCounterHitsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsCounterHitsMovingAvrageGetParamsWithContext(ctx context.Context) *CacheServiceMetricsCounterHitsMovingAvrageGetParams { + + return &CacheServiceMetricsCounterHitsMovingAvrageGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsCounterHitsMovingAvrageGetParamsWithHTTPClient creates a new CacheServiceMetricsCounterHitsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsCounterHitsMovingAvrageGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsCounterHitsMovingAvrageGetParams { + + return &CacheServiceMetricsCounterHitsMovingAvrageGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsCounterHitsMovingAvrageGetParams contains all the parameters to send to the API endpoint +for the cache service metrics counter hits moving avrage get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsCounterHitsMovingAvrageGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics counter hits moving avrage get params +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsCounterHitsMovingAvrageGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics counter hits moving avrage get params +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics counter hits moving avrage get params +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetParams) WithContext(ctx context.Context) *CacheServiceMetricsCounterHitsMovingAvrageGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics counter hits moving avrage get params +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics counter hits moving avrage get params +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsCounterHitsMovingAvrageGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics counter hits moving avrage get params +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_moving_avrage_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_moving_avrage_get_responses.go new file mode 100644 index 00000000000..685fd5c8b16 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_hits_moving_avrage_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsCounterHitsMovingAvrageGetReader is a Reader for the CacheServiceMetricsCounterHitsMovingAvrageGet structure. +type CacheServiceMetricsCounterHitsMovingAvrageGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsCounterHitsMovingAvrageGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsCounterHitsMovingAvrageGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsCounterHitsMovingAvrageGetOK creates a CacheServiceMetricsCounterHitsMovingAvrageGetOK with default headers values +func NewCacheServiceMetricsCounterHitsMovingAvrageGetOK() *CacheServiceMetricsCounterHitsMovingAvrageGetOK { + return &CacheServiceMetricsCounterHitsMovingAvrageGetOK{} +} + +/* +CacheServiceMetricsCounterHitsMovingAvrageGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsCounterHitsMovingAvrageGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsCounterHitsMovingAvrageGetDefault creates a CacheServiceMetricsCounterHitsMovingAvrageGetDefault with default headers values +func NewCacheServiceMetricsCounterHitsMovingAvrageGetDefault(code int) *CacheServiceMetricsCounterHitsMovingAvrageGetDefault { + return &CacheServiceMetricsCounterHitsMovingAvrageGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsCounterHitsMovingAvrageGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsCounterHitsMovingAvrageGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics counter hits moving avrage get default response +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsCounterHitsMovingAvrageGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_get_parameters.go new file mode 100644 index 00000000000..e07fdb52ce9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsCounterRequestsGetParams creates a new CacheServiceMetricsCounterRequestsGetParams object +// with the default values initialized. +func NewCacheServiceMetricsCounterRequestsGetParams() *CacheServiceMetricsCounterRequestsGetParams { + + return &CacheServiceMetricsCounterRequestsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsCounterRequestsGetParamsWithTimeout creates a new CacheServiceMetricsCounterRequestsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsCounterRequestsGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsCounterRequestsGetParams { + + return &CacheServiceMetricsCounterRequestsGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsCounterRequestsGetParamsWithContext creates a new CacheServiceMetricsCounterRequestsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsCounterRequestsGetParamsWithContext(ctx context.Context) *CacheServiceMetricsCounterRequestsGetParams { + + return &CacheServiceMetricsCounterRequestsGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsCounterRequestsGetParamsWithHTTPClient creates a new CacheServiceMetricsCounterRequestsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsCounterRequestsGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsCounterRequestsGetParams { + + return &CacheServiceMetricsCounterRequestsGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsCounterRequestsGetParams contains all the parameters to send to the API endpoint +for the cache service metrics counter requests get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsCounterRequestsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics counter requests get params +func (o *CacheServiceMetricsCounterRequestsGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsCounterRequestsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics counter requests get params +func (o *CacheServiceMetricsCounterRequestsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics counter requests get params +func (o *CacheServiceMetricsCounterRequestsGetParams) WithContext(ctx context.Context) *CacheServiceMetricsCounterRequestsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics counter requests get params +func (o *CacheServiceMetricsCounterRequestsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics counter requests get params +func (o *CacheServiceMetricsCounterRequestsGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsCounterRequestsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics counter requests get params +func (o *CacheServiceMetricsCounterRequestsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsCounterRequestsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_get_responses.go new file mode 100644 index 00000000000..d0c0bfd62df --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsCounterRequestsGetReader is a Reader for the CacheServiceMetricsCounterRequestsGet structure. +type CacheServiceMetricsCounterRequestsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsCounterRequestsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsCounterRequestsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsCounterRequestsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsCounterRequestsGetOK creates a CacheServiceMetricsCounterRequestsGetOK with default headers values +func NewCacheServiceMetricsCounterRequestsGetOK() *CacheServiceMetricsCounterRequestsGetOK { + return &CacheServiceMetricsCounterRequestsGetOK{} +} + +/* +CacheServiceMetricsCounterRequestsGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsCounterRequestsGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsCounterRequestsGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsCounterRequestsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsCounterRequestsGetDefault creates a CacheServiceMetricsCounterRequestsGetDefault with default headers values +func NewCacheServiceMetricsCounterRequestsGetDefault(code int) *CacheServiceMetricsCounterRequestsGetDefault { + return &CacheServiceMetricsCounterRequestsGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsCounterRequestsGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsCounterRequestsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics counter requests get default response +func (o *CacheServiceMetricsCounterRequestsGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsCounterRequestsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsCounterRequestsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsCounterRequestsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_moving_avrage_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_moving_avrage_get_parameters.go new file mode 100644 index 00000000000..80344a64636 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_moving_avrage_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsCounterRequestsMovingAvrageGetParams creates a new CacheServiceMetricsCounterRequestsMovingAvrageGetParams object +// with the default values initialized. +func NewCacheServiceMetricsCounterRequestsMovingAvrageGetParams() *CacheServiceMetricsCounterRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsCounterRequestsMovingAvrageGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsCounterRequestsMovingAvrageGetParamsWithTimeout creates a new CacheServiceMetricsCounterRequestsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsCounterRequestsMovingAvrageGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsCounterRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsCounterRequestsMovingAvrageGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsCounterRequestsMovingAvrageGetParamsWithContext creates a new CacheServiceMetricsCounterRequestsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsCounterRequestsMovingAvrageGetParamsWithContext(ctx context.Context) *CacheServiceMetricsCounterRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsCounterRequestsMovingAvrageGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsCounterRequestsMovingAvrageGetParamsWithHTTPClient creates a new CacheServiceMetricsCounterRequestsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsCounterRequestsMovingAvrageGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsCounterRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsCounterRequestsMovingAvrageGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsCounterRequestsMovingAvrageGetParams contains all the parameters to send to the API endpoint +for the cache service metrics counter requests moving avrage get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsCounterRequestsMovingAvrageGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics counter requests moving avrage get params +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsCounterRequestsMovingAvrageGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics counter requests moving avrage get params +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics counter requests moving avrage get params +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetParams) WithContext(ctx context.Context) *CacheServiceMetricsCounterRequestsMovingAvrageGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics counter requests moving avrage get params +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics counter requests moving avrage get params +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsCounterRequestsMovingAvrageGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics counter requests moving avrage get params +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_moving_avrage_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_moving_avrage_get_responses.go new file mode 100644 index 00000000000..533853f0b33 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_requests_moving_avrage_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsCounterRequestsMovingAvrageGetReader is a Reader for the CacheServiceMetricsCounterRequestsMovingAvrageGet structure. +type CacheServiceMetricsCounterRequestsMovingAvrageGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsCounterRequestsMovingAvrageGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsCounterRequestsMovingAvrageGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsCounterRequestsMovingAvrageGetOK creates a CacheServiceMetricsCounterRequestsMovingAvrageGetOK with default headers values +func NewCacheServiceMetricsCounterRequestsMovingAvrageGetOK() *CacheServiceMetricsCounterRequestsMovingAvrageGetOK { + return &CacheServiceMetricsCounterRequestsMovingAvrageGetOK{} +} + +/* +CacheServiceMetricsCounterRequestsMovingAvrageGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsCounterRequestsMovingAvrageGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsCounterRequestsMovingAvrageGetDefault creates a CacheServiceMetricsCounterRequestsMovingAvrageGetDefault with default headers values +func NewCacheServiceMetricsCounterRequestsMovingAvrageGetDefault(code int) *CacheServiceMetricsCounterRequestsMovingAvrageGetDefault { + return &CacheServiceMetricsCounterRequestsMovingAvrageGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsCounterRequestsMovingAvrageGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsCounterRequestsMovingAvrageGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics counter requests moving avrage get default response +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsCounterRequestsMovingAvrageGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_size_get_parameters.go new file mode 100644 index 00000000000..5a7b82c9c91 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsCounterSizeGetParams creates a new CacheServiceMetricsCounterSizeGetParams object +// with the default values initialized. +func NewCacheServiceMetricsCounterSizeGetParams() *CacheServiceMetricsCounterSizeGetParams { + + return &CacheServiceMetricsCounterSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsCounterSizeGetParamsWithTimeout creates a new CacheServiceMetricsCounterSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsCounterSizeGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsCounterSizeGetParams { + + return &CacheServiceMetricsCounterSizeGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsCounterSizeGetParamsWithContext creates a new CacheServiceMetricsCounterSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsCounterSizeGetParamsWithContext(ctx context.Context) *CacheServiceMetricsCounterSizeGetParams { + + return &CacheServiceMetricsCounterSizeGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsCounterSizeGetParamsWithHTTPClient creates a new CacheServiceMetricsCounterSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsCounterSizeGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsCounterSizeGetParams { + + return &CacheServiceMetricsCounterSizeGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsCounterSizeGetParams contains all the parameters to send to the API endpoint +for the cache service metrics counter size get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsCounterSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics counter size get params +func (o *CacheServiceMetricsCounterSizeGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsCounterSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics counter size get params +func (o *CacheServiceMetricsCounterSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics counter size get params +func (o *CacheServiceMetricsCounterSizeGetParams) WithContext(ctx context.Context) *CacheServiceMetricsCounterSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics counter size get params +func (o *CacheServiceMetricsCounterSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics counter size get params +func (o *CacheServiceMetricsCounterSizeGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsCounterSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics counter size get params +func (o *CacheServiceMetricsCounterSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsCounterSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_size_get_responses.go new file mode 100644 index 00000000000..c9502b722fd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_counter_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsCounterSizeGetReader is a Reader for the CacheServiceMetricsCounterSizeGet structure. +type CacheServiceMetricsCounterSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsCounterSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsCounterSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsCounterSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsCounterSizeGetOK creates a CacheServiceMetricsCounterSizeGetOK with default headers values +func NewCacheServiceMetricsCounterSizeGetOK() *CacheServiceMetricsCounterSizeGetOK { + return &CacheServiceMetricsCounterSizeGetOK{} +} + +/* +CacheServiceMetricsCounterSizeGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsCounterSizeGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsCounterSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsCounterSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsCounterSizeGetDefault creates a CacheServiceMetricsCounterSizeGetDefault with default headers values +func NewCacheServiceMetricsCounterSizeGetDefault(code int) *CacheServiceMetricsCounterSizeGetDefault { + return &CacheServiceMetricsCounterSizeGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsCounterSizeGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsCounterSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics counter size get default response +func (o *CacheServiceMetricsCounterSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsCounterSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsCounterSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsCounterSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_capacity_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_capacity_get_parameters.go new file mode 100644 index 00000000000..0e4cbc2bb9d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_capacity_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsKeyCapacityGetParams creates a new CacheServiceMetricsKeyCapacityGetParams object +// with the default values initialized. +func NewCacheServiceMetricsKeyCapacityGetParams() *CacheServiceMetricsKeyCapacityGetParams { + + return &CacheServiceMetricsKeyCapacityGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsKeyCapacityGetParamsWithTimeout creates a new CacheServiceMetricsKeyCapacityGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsKeyCapacityGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsKeyCapacityGetParams { + + return &CacheServiceMetricsKeyCapacityGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsKeyCapacityGetParamsWithContext creates a new CacheServiceMetricsKeyCapacityGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsKeyCapacityGetParamsWithContext(ctx context.Context) *CacheServiceMetricsKeyCapacityGetParams { + + return &CacheServiceMetricsKeyCapacityGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsKeyCapacityGetParamsWithHTTPClient creates a new CacheServiceMetricsKeyCapacityGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsKeyCapacityGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsKeyCapacityGetParams { + + return &CacheServiceMetricsKeyCapacityGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsKeyCapacityGetParams contains all the parameters to send to the API endpoint +for the cache service metrics key capacity get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsKeyCapacityGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics key capacity get params +func (o *CacheServiceMetricsKeyCapacityGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsKeyCapacityGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics key capacity get params +func (o *CacheServiceMetricsKeyCapacityGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics key capacity get params +func (o *CacheServiceMetricsKeyCapacityGetParams) WithContext(ctx context.Context) *CacheServiceMetricsKeyCapacityGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics key capacity get params +func (o *CacheServiceMetricsKeyCapacityGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics key capacity get params +func (o *CacheServiceMetricsKeyCapacityGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsKeyCapacityGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics key capacity get params +func (o *CacheServiceMetricsKeyCapacityGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsKeyCapacityGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_capacity_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_capacity_get_responses.go new file mode 100644 index 00000000000..136472967d6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_capacity_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsKeyCapacityGetReader is a Reader for the CacheServiceMetricsKeyCapacityGet structure. +type CacheServiceMetricsKeyCapacityGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsKeyCapacityGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsKeyCapacityGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsKeyCapacityGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsKeyCapacityGetOK creates a CacheServiceMetricsKeyCapacityGetOK with default headers values +func NewCacheServiceMetricsKeyCapacityGetOK() *CacheServiceMetricsKeyCapacityGetOK { + return &CacheServiceMetricsKeyCapacityGetOK{} +} + +/* +CacheServiceMetricsKeyCapacityGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsKeyCapacityGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsKeyCapacityGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsKeyCapacityGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsKeyCapacityGetDefault creates a CacheServiceMetricsKeyCapacityGetDefault with default headers values +func NewCacheServiceMetricsKeyCapacityGetDefault(code int) *CacheServiceMetricsKeyCapacityGetDefault { + return &CacheServiceMetricsKeyCapacityGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsKeyCapacityGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsKeyCapacityGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics key capacity get default response +func (o *CacheServiceMetricsKeyCapacityGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsKeyCapacityGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsKeyCapacityGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsKeyCapacityGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_entries_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_entries_get_parameters.go new file mode 100644 index 00000000000..5176340f0d5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_entries_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsKeyEntriesGetParams creates a new CacheServiceMetricsKeyEntriesGetParams object +// with the default values initialized. +func NewCacheServiceMetricsKeyEntriesGetParams() *CacheServiceMetricsKeyEntriesGetParams { + + return &CacheServiceMetricsKeyEntriesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsKeyEntriesGetParamsWithTimeout creates a new CacheServiceMetricsKeyEntriesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsKeyEntriesGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsKeyEntriesGetParams { + + return &CacheServiceMetricsKeyEntriesGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsKeyEntriesGetParamsWithContext creates a new CacheServiceMetricsKeyEntriesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsKeyEntriesGetParamsWithContext(ctx context.Context) *CacheServiceMetricsKeyEntriesGetParams { + + return &CacheServiceMetricsKeyEntriesGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsKeyEntriesGetParamsWithHTTPClient creates a new CacheServiceMetricsKeyEntriesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsKeyEntriesGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsKeyEntriesGetParams { + + return &CacheServiceMetricsKeyEntriesGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsKeyEntriesGetParams contains all the parameters to send to the API endpoint +for the cache service metrics key entries get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsKeyEntriesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics key entries get params +func (o *CacheServiceMetricsKeyEntriesGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsKeyEntriesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics key entries get params +func (o *CacheServiceMetricsKeyEntriesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics key entries get params +func (o *CacheServiceMetricsKeyEntriesGetParams) WithContext(ctx context.Context) *CacheServiceMetricsKeyEntriesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics key entries get params +func (o *CacheServiceMetricsKeyEntriesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics key entries get params +func (o *CacheServiceMetricsKeyEntriesGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsKeyEntriesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics key entries get params +func (o *CacheServiceMetricsKeyEntriesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsKeyEntriesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_entries_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_entries_get_responses.go new file mode 100644 index 00000000000..b230ef86dd5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_entries_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsKeyEntriesGetReader is a Reader for the CacheServiceMetricsKeyEntriesGet structure. +type CacheServiceMetricsKeyEntriesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsKeyEntriesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsKeyEntriesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsKeyEntriesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsKeyEntriesGetOK creates a CacheServiceMetricsKeyEntriesGetOK with default headers values +func NewCacheServiceMetricsKeyEntriesGetOK() *CacheServiceMetricsKeyEntriesGetOK { + return &CacheServiceMetricsKeyEntriesGetOK{} +} + +/* +CacheServiceMetricsKeyEntriesGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsKeyEntriesGetOK struct { + Payload int32 +} + +func (o *CacheServiceMetricsKeyEntriesGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CacheServiceMetricsKeyEntriesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsKeyEntriesGetDefault creates a CacheServiceMetricsKeyEntriesGetDefault with default headers values +func NewCacheServiceMetricsKeyEntriesGetDefault(code int) *CacheServiceMetricsKeyEntriesGetDefault { + return &CacheServiceMetricsKeyEntriesGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsKeyEntriesGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsKeyEntriesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics key entries get default response +func (o *CacheServiceMetricsKeyEntriesGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsKeyEntriesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsKeyEntriesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsKeyEntriesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hit_rate_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hit_rate_get_parameters.go new file mode 100644 index 00000000000..7237d7c2e2d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hit_rate_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsKeyHitRateGetParams creates a new CacheServiceMetricsKeyHitRateGetParams object +// with the default values initialized. +func NewCacheServiceMetricsKeyHitRateGetParams() *CacheServiceMetricsKeyHitRateGetParams { + + return &CacheServiceMetricsKeyHitRateGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsKeyHitRateGetParamsWithTimeout creates a new CacheServiceMetricsKeyHitRateGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsKeyHitRateGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsKeyHitRateGetParams { + + return &CacheServiceMetricsKeyHitRateGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsKeyHitRateGetParamsWithContext creates a new CacheServiceMetricsKeyHitRateGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsKeyHitRateGetParamsWithContext(ctx context.Context) *CacheServiceMetricsKeyHitRateGetParams { + + return &CacheServiceMetricsKeyHitRateGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsKeyHitRateGetParamsWithHTTPClient creates a new CacheServiceMetricsKeyHitRateGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsKeyHitRateGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsKeyHitRateGetParams { + + return &CacheServiceMetricsKeyHitRateGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsKeyHitRateGetParams contains all the parameters to send to the API endpoint +for the cache service metrics key hit rate get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsKeyHitRateGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics key hit rate get params +func (o *CacheServiceMetricsKeyHitRateGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsKeyHitRateGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics key hit rate get params +func (o *CacheServiceMetricsKeyHitRateGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics key hit rate get params +func (o *CacheServiceMetricsKeyHitRateGetParams) WithContext(ctx context.Context) *CacheServiceMetricsKeyHitRateGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics key hit rate get params +func (o *CacheServiceMetricsKeyHitRateGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics key hit rate get params +func (o *CacheServiceMetricsKeyHitRateGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsKeyHitRateGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics key hit rate get params +func (o *CacheServiceMetricsKeyHitRateGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsKeyHitRateGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hit_rate_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hit_rate_get_responses.go new file mode 100644 index 00000000000..e18b0d8d567 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hit_rate_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsKeyHitRateGetReader is a Reader for the CacheServiceMetricsKeyHitRateGet structure. +type CacheServiceMetricsKeyHitRateGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsKeyHitRateGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsKeyHitRateGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsKeyHitRateGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsKeyHitRateGetOK creates a CacheServiceMetricsKeyHitRateGetOK with default headers values +func NewCacheServiceMetricsKeyHitRateGetOK() *CacheServiceMetricsKeyHitRateGetOK { + return &CacheServiceMetricsKeyHitRateGetOK{} +} + +/* +CacheServiceMetricsKeyHitRateGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsKeyHitRateGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsKeyHitRateGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsKeyHitRateGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsKeyHitRateGetDefault creates a CacheServiceMetricsKeyHitRateGetDefault with default headers values +func NewCacheServiceMetricsKeyHitRateGetDefault(code int) *CacheServiceMetricsKeyHitRateGetDefault { + return &CacheServiceMetricsKeyHitRateGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsKeyHitRateGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsKeyHitRateGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics key hit rate get default response +func (o *CacheServiceMetricsKeyHitRateGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsKeyHitRateGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsKeyHitRateGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsKeyHitRateGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_get_parameters.go new file mode 100644 index 00000000000..e6ac7ba34f4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsKeyHitsGetParams creates a new CacheServiceMetricsKeyHitsGetParams object +// with the default values initialized. +func NewCacheServiceMetricsKeyHitsGetParams() *CacheServiceMetricsKeyHitsGetParams { + + return &CacheServiceMetricsKeyHitsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsKeyHitsGetParamsWithTimeout creates a new CacheServiceMetricsKeyHitsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsKeyHitsGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsKeyHitsGetParams { + + return &CacheServiceMetricsKeyHitsGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsKeyHitsGetParamsWithContext creates a new CacheServiceMetricsKeyHitsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsKeyHitsGetParamsWithContext(ctx context.Context) *CacheServiceMetricsKeyHitsGetParams { + + return &CacheServiceMetricsKeyHitsGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsKeyHitsGetParamsWithHTTPClient creates a new CacheServiceMetricsKeyHitsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsKeyHitsGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsKeyHitsGetParams { + + return &CacheServiceMetricsKeyHitsGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsKeyHitsGetParams contains all the parameters to send to the API endpoint +for the cache service metrics key hits get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsKeyHitsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics key hits get params +func (o *CacheServiceMetricsKeyHitsGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsKeyHitsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics key hits get params +func (o *CacheServiceMetricsKeyHitsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics key hits get params +func (o *CacheServiceMetricsKeyHitsGetParams) WithContext(ctx context.Context) *CacheServiceMetricsKeyHitsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics key hits get params +func (o *CacheServiceMetricsKeyHitsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics key hits get params +func (o *CacheServiceMetricsKeyHitsGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsKeyHitsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics key hits get params +func (o *CacheServiceMetricsKeyHitsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsKeyHitsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_get_responses.go new file mode 100644 index 00000000000..1bdecfc50f8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsKeyHitsGetReader is a Reader for the CacheServiceMetricsKeyHitsGet structure. +type CacheServiceMetricsKeyHitsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsKeyHitsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsKeyHitsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsKeyHitsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsKeyHitsGetOK creates a CacheServiceMetricsKeyHitsGetOK with default headers values +func NewCacheServiceMetricsKeyHitsGetOK() *CacheServiceMetricsKeyHitsGetOK { + return &CacheServiceMetricsKeyHitsGetOK{} +} + +/* +CacheServiceMetricsKeyHitsGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsKeyHitsGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsKeyHitsGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsKeyHitsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsKeyHitsGetDefault creates a CacheServiceMetricsKeyHitsGetDefault with default headers values +func NewCacheServiceMetricsKeyHitsGetDefault(code int) *CacheServiceMetricsKeyHitsGetDefault { + return &CacheServiceMetricsKeyHitsGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsKeyHitsGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsKeyHitsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics key hits get default response +func (o *CacheServiceMetricsKeyHitsGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsKeyHitsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsKeyHitsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsKeyHitsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_moving_avrage_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_moving_avrage_get_parameters.go new file mode 100644 index 00000000000..f0964e6cde8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_moving_avrage_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsKeyHitsMovingAvrageGetParams creates a new CacheServiceMetricsKeyHitsMovingAvrageGetParams object +// with the default values initialized. +func NewCacheServiceMetricsKeyHitsMovingAvrageGetParams() *CacheServiceMetricsKeyHitsMovingAvrageGetParams { + + return &CacheServiceMetricsKeyHitsMovingAvrageGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsKeyHitsMovingAvrageGetParamsWithTimeout creates a new CacheServiceMetricsKeyHitsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsKeyHitsMovingAvrageGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsKeyHitsMovingAvrageGetParams { + + return &CacheServiceMetricsKeyHitsMovingAvrageGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsKeyHitsMovingAvrageGetParamsWithContext creates a new CacheServiceMetricsKeyHitsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsKeyHitsMovingAvrageGetParamsWithContext(ctx context.Context) *CacheServiceMetricsKeyHitsMovingAvrageGetParams { + + return &CacheServiceMetricsKeyHitsMovingAvrageGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsKeyHitsMovingAvrageGetParamsWithHTTPClient creates a new CacheServiceMetricsKeyHitsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsKeyHitsMovingAvrageGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsKeyHitsMovingAvrageGetParams { + + return &CacheServiceMetricsKeyHitsMovingAvrageGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsKeyHitsMovingAvrageGetParams contains all the parameters to send to the API endpoint +for the cache service metrics key hits moving avrage get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsKeyHitsMovingAvrageGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics key hits moving avrage get params +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsKeyHitsMovingAvrageGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics key hits moving avrage get params +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics key hits moving avrage get params +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetParams) WithContext(ctx context.Context) *CacheServiceMetricsKeyHitsMovingAvrageGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics key hits moving avrage get params +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics key hits moving avrage get params +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsKeyHitsMovingAvrageGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics key hits moving avrage get params +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_moving_avrage_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_moving_avrage_get_responses.go new file mode 100644 index 00000000000..b4dfb391eb1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_hits_moving_avrage_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsKeyHitsMovingAvrageGetReader is a Reader for the CacheServiceMetricsKeyHitsMovingAvrageGet structure. +type CacheServiceMetricsKeyHitsMovingAvrageGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsKeyHitsMovingAvrageGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsKeyHitsMovingAvrageGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsKeyHitsMovingAvrageGetOK creates a CacheServiceMetricsKeyHitsMovingAvrageGetOK with default headers values +func NewCacheServiceMetricsKeyHitsMovingAvrageGetOK() *CacheServiceMetricsKeyHitsMovingAvrageGetOK { + return &CacheServiceMetricsKeyHitsMovingAvrageGetOK{} +} + +/* +CacheServiceMetricsKeyHitsMovingAvrageGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsKeyHitsMovingAvrageGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsKeyHitsMovingAvrageGetDefault creates a CacheServiceMetricsKeyHitsMovingAvrageGetDefault with default headers values +func NewCacheServiceMetricsKeyHitsMovingAvrageGetDefault(code int) *CacheServiceMetricsKeyHitsMovingAvrageGetDefault { + return &CacheServiceMetricsKeyHitsMovingAvrageGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsKeyHitsMovingAvrageGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsKeyHitsMovingAvrageGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics key hits moving avrage get default response +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsKeyHitsMovingAvrageGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_get_parameters.go new file mode 100644 index 00000000000..d710e706fac --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsKeyRequestsGetParams creates a new CacheServiceMetricsKeyRequestsGetParams object +// with the default values initialized. +func NewCacheServiceMetricsKeyRequestsGetParams() *CacheServiceMetricsKeyRequestsGetParams { + + return &CacheServiceMetricsKeyRequestsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsKeyRequestsGetParamsWithTimeout creates a new CacheServiceMetricsKeyRequestsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsKeyRequestsGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsKeyRequestsGetParams { + + return &CacheServiceMetricsKeyRequestsGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsKeyRequestsGetParamsWithContext creates a new CacheServiceMetricsKeyRequestsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsKeyRequestsGetParamsWithContext(ctx context.Context) *CacheServiceMetricsKeyRequestsGetParams { + + return &CacheServiceMetricsKeyRequestsGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsKeyRequestsGetParamsWithHTTPClient creates a new CacheServiceMetricsKeyRequestsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsKeyRequestsGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsKeyRequestsGetParams { + + return &CacheServiceMetricsKeyRequestsGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsKeyRequestsGetParams contains all the parameters to send to the API endpoint +for the cache service metrics key requests get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsKeyRequestsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics key requests get params +func (o *CacheServiceMetricsKeyRequestsGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsKeyRequestsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics key requests get params +func (o *CacheServiceMetricsKeyRequestsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics key requests get params +func (o *CacheServiceMetricsKeyRequestsGetParams) WithContext(ctx context.Context) *CacheServiceMetricsKeyRequestsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics key requests get params +func (o *CacheServiceMetricsKeyRequestsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics key requests get params +func (o *CacheServiceMetricsKeyRequestsGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsKeyRequestsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics key requests get params +func (o *CacheServiceMetricsKeyRequestsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsKeyRequestsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_get_responses.go new file mode 100644 index 00000000000..fdfa45f7047 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsKeyRequestsGetReader is a Reader for the CacheServiceMetricsKeyRequestsGet structure. +type CacheServiceMetricsKeyRequestsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsKeyRequestsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsKeyRequestsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsKeyRequestsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsKeyRequestsGetOK creates a CacheServiceMetricsKeyRequestsGetOK with default headers values +func NewCacheServiceMetricsKeyRequestsGetOK() *CacheServiceMetricsKeyRequestsGetOK { + return &CacheServiceMetricsKeyRequestsGetOK{} +} + +/* +CacheServiceMetricsKeyRequestsGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsKeyRequestsGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsKeyRequestsGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsKeyRequestsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsKeyRequestsGetDefault creates a CacheServiceMetricsKeyRequestsGetDefault with default headers values +func NewCacheServiceMetricsKeyRequestsGetDefault(code int) *CacheServiceMetricsKeyRequestsGetDefault { + return &CacheServiceMetricsKeyRequestsGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsKeyRequestsGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsKeyRequestsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics key requests get default response +func (o *CacheServiceMetricsKeyRequestsGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsKeyRequestsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsKeyRequestsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsKeyRequestsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_moving_avrage_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_moving_avrage_get_parameters.go new file mode 100644 index 00000000000..848b4b16eae --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_moving_avrage_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsKeyRequestsMovingAvrageGetParams creates a new CacheServiceMetricsKeyRequestsMovingAvrageGetParams object +// with the default values initialized. +func NewCacheServiceMetricsKeyRequestsMovingAvrageGetParams() *CacheServiceMetricsKeyRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsKeyRequestsMovingAvrageGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsKeyRequestsMovingAvrageGetParamsWithTimeout creates a new CacheServiceMetricsKeyRequestsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsKeyRequestsMovingAvrageGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsKeyRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsKeyRequestsMovingAvrageGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsKeyRequestsMovingAvrageGetParamsWithContext creates a new CacheServiceMetricsKeyRequestsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsKeyRequestsMovingAvrageGetParamsWithContext(ctx context.Context) *CacheServiceMetricsKeyRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsKeyRequestsMovingAvrageGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsKeyRequestsMovingAvrageGetParamsWithHTTPClient creates a new CacheServiceMetricsKeyRequestsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsKeyRequestsMovingAvrageGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsKeyRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsKeyRequestsMovingAvrageGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsKeyRequestsMovingAvrageGetParams contains all the parameters to send to the API endpoint +for the cache service metrics key requests moving avrage get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsKeyRequestsMovingAvrageGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics key requests moving avrage get params +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsKeyRequestsMovingAvrageGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics key requests moving avrage get params +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics key requests moving avrage get params +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetParams) WithContext(ctx context.Context) *CacheServiceMetricsKeyRequestsMovingAvrageGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics key requests moving avrage get params +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics key requests moving avrage get params +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsKeyRequestsMovingAvrageGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics key requests moving avrage get params +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_moving_avrage_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_moving_avrage_get_responses.go new file mode 100644 index 00000000000..ef11b91d27a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_requests_moving_avrage_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsKeyRequestsMovingAvrageGetReader is a Reader for the CacheServiceMetricsKeyRequestsMovingAvrageGet structure. +type CacheServiceMetricsKeyRequestsMovingAvrageGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsKeyRequestsMovingAvrageGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsKeyRequestsMovingAvrageGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsKeyRequestsMovingAvrageGetOK creates a CacheServiceMetricsKeyRequestsMovingAvrageGetOK with default headers values +func NewCacheServiceMetricsKeyRequestsMovingAvrageGetOK() *CacheServiceMetricsKeyRequestsMovingAvrageGetOK { + return &CacheServiceMetricsKeyRequestsMovingAvrageGetOK{} +} + +/* +CacheServiceMetricsKeyRequestsMovingAvrageGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsKeyRequestsMovingAvrageGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsKeyRequestsMovingAvrageGetDefault creates a CacheServiceMetricsKeyRequestsMovingAvrageGetDefault with default headers values +func NewCacheServiceMetricsKeyRequestsMovingAvrageGetDefault(code int) *CacheServiceMetricsKeyRequestsMovingAvrageGetDefault { + return &CacheServiceMetricsKeyRequestsMovingAvrageGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsKeyRequestsMovingAvrageGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsKeyRequestsMovingAvrageGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics key requests moving avrage get default response +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsKeyRequestsMovingAvrageGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_size_get_parameters.go new file mode 100644 index 00000000000..22f91a18c9b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsKeySizeGetParams creates a new CacheServiceMetricsKeySizeGetParams object +// with the default values initialized. +func NewCacheServiceMetricsKeySizeGetParams() *CacheServiceMetricsKeySizeGetParams { + + return &CacheServiceMetricsKeySizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsKeySizeGetParamsWithTimeout creates a new CacheServiceMetricsKeySizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsKeySizeGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsKeySizeGetParams { + + return &CacheServiceMetricsKeySizeGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsKeySizeGetParamsWithContext creates a new CacheServiceMetricsKeySizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsKeySizeGetParamsWithContext(ctx context.Context) *CacheServiceMetricsKeySizeGetParams { + + return &CacheServiceMetricsKeySizeGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsKeySizeGetParamsWithHTTPClient creates a new CacheServiceMetricsKeySizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsKeySizeGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsKeySizeGetParams { + + return &CacheServiceMetricsKeySizeGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsKeySizeGetParams contains all the parameters to send to the API endpoint +for the cache service metrics key size get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsKeySizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics key size get params +func (o *CacheServiceMetricsKeySizeGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsKeySizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics key size get params +func (o *CacheServiceMetricsKeySizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics key size get params +func (o *CacheServiceMetricsKeySizeGetParams) WithContext(ctx context.Context) *CacheServiceMetricsKeySizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics key size get params +func (o *CacheServiceMetricsKeySizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics key size get params +func (o *CacheServiceMetricsKeySizeGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsKeySizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics key size get params +func (o *CacheServiceMetricsKeySizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsKeySizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_size_get_responses.go new file mode 100644 index 00000000000..40bf61717d3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_key_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsKeySizeGetReader is a Reader for the CacheServiceMetricsKeySizeGet structure. +type CacheServiceMetricsKeySizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsKeySizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsKeySizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsKeySizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsKeySizeGetOK creates a CacheServiceMetricsKeySizeGetOK with default headers values +func NewCacheServiceMetricsKeySizeGetOK() *CacheServiceMetricsKeySizeGetOK { + return &CacheServiceMetricsKeySizeGetOK{} +} + +/* +CacheServiceMetricsKeySizeGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsKeySizeGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsKeySizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsKeySizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsKeySizeGetDefault creates a CacheServiceMetricsKeySizeGetDefault with default headers values +func NewCacheServiceMetricsKeySizeGetDefault(code int) *CacheServiceMetricsKeySizeGetDefault { + return &CacheServiceMetricsKeySizeGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsKeySizeGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsKeySizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics key size get default response +func (o *CacheServiceMetricsKeySizeGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsKeySizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsKeySizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsKeySizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_capacity_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_capacity_get_parameters.go new file mode 100644 index 00000000000..11f402c9e48 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_capacity_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsRowCapacityGetParams creates a new CacheServiceMetricsRowCapacityGetParams object +// with the default values initialized. +func NewCacheServiceMetricsRowCapacityGetParams() *CacheServiceMetricsRowCapacityGetParams { + + return &CacheServiceMetricsRowCapacityGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsRowCapacityGetParamsWithTimeout creates a new CacheServiceMetricsRowCapacityGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsRowCapacityGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsRowCapacityGetParams { + + return &CacheServiceMetricsRowCapacityGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsRowCapacityGetParamsWithContext creates a new CacheServiceMetricsRowCapacityGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsRowCapacityGetParamsWithContext(ctx context.Context) *CacheServiceMetricsRowCapacityGetParams { + + return &CacheServiceMetricsRowCapacityGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsRowCapacityGetParamsWithHTTPClient creates a new CacheServiceMetricsRowCapacityGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsRowCapacityGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsRowCapacityGetParams { + + return &CacheServiceMetricsRowCapacityGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsRowCapacityGetParams contains all the parameters to send to the API endpoint +for the cache service metrics row capacity get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsRowCapacityGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics row capacity get params +func (o *CacheServiceMetricsRowCapacityGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsRowCapacityGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics row capacity get params +func (o *CacheServiceMetricsRowCapacityGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics row capacity get params +func (o *CacheServiceMetricsRowCapacityGetParams) WithContext(ctx context.Context) *CacheServiceMetricsRowCapacityGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics row capacity get params +func (o *CacheServiceMetricsRowCapacityGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics row capacity get params +func (o *CacheServiceMetricsRowCapacityGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsRowCapacityGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics row capacity get params +func (o *CacheServiceMetricsRowCapacityGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsRowCapacityGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_capacity_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_capacity_get_responses.go new file mode 100644 index 00000000000..a1a2d24af8d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_capacity_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsRowCapacityGetReader is a Reader for the CacheServiceMetricsRowCapacityGet structure. +type CacheServiceMetricsRowCapacityGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsRowCapacityGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsRowCapacityGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsRowCapacityGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsRowCapacityGetOK creates a CacheServiceMetricsRowCapacityGetOK with default headers values +func NewCacheServiceMetricsRowCapacityGetOK() *CacheServiceMetricsRowCapacityGetOK { + return &CacheServiceMetricsRowCapacityGetOK{} +} + +/* +CacheServiceMetricsRowCapacityGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsRowCapacityGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsRowCapacityGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsRowCapacityGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsRowCapacityGetDefault creates a CacheServiceMetricsRowCapacityGetDefault with default headers values +func NewCacheServiceMetricsRowCapacityGetDefault(code int) *CacheServiceMetricsRowCapacityGetDefault { + return &CacheServiceMetricsRowCapacityGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsRowCapacityGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsRowCapacityGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics row capacity get default response +func (o *CacheServiceMetricsRowCapacityGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsRowCapacityGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsRowCapacityGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsRowCapacityGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_entries_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_entries_get_parameters.go new file mode 100644 index 00000000000..f4b49b5b33a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_entries_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsRowEntriesGetParams creates a new CacheServiceMetricsRowEntriesGetParams object +// with the default values initialized. +func NewCacheServiceMetricsRowEntriesGetParams() *CacheServiceMetricsRowEntriesGetParams { + + return &CacheServiceMetricsRowEntriesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsRowEntriesGetParamsWithTimeout creates a new CacheServiceMetricsRowEntriesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsRowEntriesGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsRowEntriesGetParams { + + return &CacheServiceMetricsRowEntriesGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsRowEntriesGetParamsWithContext creates a new CacheServiceMetricsRowEntriesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsRowEntriesGetParamsWithContext(ctx context.Context) *CacheServiceMetricsRowEntriesGetParams { + + return &CacheServiceMetricsRowEntriesGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsRowEntriesGetParamsWithHTTPClient creates a new CacheServiceMetricsRowEntriesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsRowEntriesGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsRowEntriesGetParams { + + return &CacheServiceMetricsRowEntriesGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsRowEntriesGetParams contains all the parameters to send to the API endpoint +for the cache service metrics row entries get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsRowEntriesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics row entries get params +func (o *CacheServiceMetricsRowEntriesGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsRowEntriesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics row entries get params +func (o *CacheServiceMetricsRowEntriesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics row entries get params +func (o *CacheServiceMetricsRowEntriesGetParams) WithContext(ctx context.Context) *CacheServiceMetricsRowEntriesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics row entries get params +func (o *CacheServiceMetricsRowEntriesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics row entries get params +func (o *CacheServiceMetricsRowEntriesGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsRowEntriesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics row entries get params +func (o *CacheServiceMetricsRowEntriesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsRowEntriesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_entries_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_entries_get_responses.go new file mode 100644 index 00000000000..c21a89c5d89 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_entries_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsRowEntriesGetReader is a Reader for the CacheServiceMetricsRowEntriesGet structure. +type CacheServiceMetricsRowEntriesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsRowEntriesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsRowEntriesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsRowEntriesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsRowEntriesGetOK creates a CacheServiceMetricsRowEntriesGetOK with default headers values +func NewCacheServiceMetricsRowEntriesGetOK() *CacheServiceMetricsRowEntriesGetOK { + return &CacheServiceMetricsRowEntriesGetOK{} +} + +/* +CacheServiceMetricsRowEntriesGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsRowEntriesGetOK struct { + Payload int32 +} + +func (o *CacheServiceMetricsRowEntriesGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CacheServiceMetricsRowEntriesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsRowEntriesGetDefault creates a CacheServiceMetricsRowEntriesGetDefault with default headers values +func NewCacheServiceMetricsRowEntriesGetDefault(code int) *CacheServiceMetricsRowEntriesGetDefault { + return &CacheServiceMetricsRowEntriesGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsRowEntriesGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsRowEntriesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics row entries get default response +func (o *CacheServiceMetricsRowEntriesGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsRowEntriesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsRowEntriesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsRowEntriesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hit_rate_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hit_rate_get_parameters.go new file mode 100644 index 00000000000..f6ec3b5c187 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hit_rate_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsRowHitRateGetParams creates a new CacheServiceMetricsRowHitRateGetParams object +// with the default values initialized. +func NewCacheServiceMetricsRowHitRateGetParams() *CacheServiceMetricsRowHitRateGetParams { + + return &CacheServiceMetricsRowHitRateGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsRowHitRateGetParamsWithTimeout creates a new CacheServiceMetricsRowHitRateGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsRowHitRateGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsRowHitRateGetParams { + + return &CacheServiceMetricsRowHitRateGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsRowHitRateGetParamsWithContext creates a new CacheServiceMetricsRowHitRateGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsRowHitRateGetParamsWithContext(ctx context.Context) *CacheServiceMetricsRowHitRateGetParams { + + return &CacheServiceMetricsRowHitRateGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsRowHitRateGetParamsWithHTTPClient creates a new CacheServiceMetricsRowHitRateGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsRowHitRateGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsRowHitRateGetParams { + + return &CacheServiceMetricsRowHitRateGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsRowHitRateGetParams contains all the parameters to send to the API endpoint +for the cache service metrics row hit rate get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsRowHitRateGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics row hit rate get params +func (o *CacheServiceMetricsRowHitRateGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsRowHitRateGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics row hit rate get params +func (o *CacheServiceMetricsRowHitRateGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics row hit rate get params +func (o *CacheServiceMetricsRowHitRateGetParams) WithContext(ctx context.Context) *CacheServiceMetricsRowHitRateGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics row hit rate get params +func (o *CacheServiceMetricsRowHitRateGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics row hit rate get params +func (o *CacheServiceMetricsRowHitRateGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsRowHitRateGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics row hit rate get params +func (o *CacheServiceMetricsRowHitRateGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsRowHitRateGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hit_rate_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hit_rate_get_responses.go new file mode 100644 index 00000000000..ff364d4a089 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hit_rate_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsRowHitRateGetReader is a Reader for the CacheServiceMetricsRowHitRateGet structure. +type CacheServiceMetricsRowHitRateGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsRowHitRateGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsRowHitRateGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsRowHitRateGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsRowHitRateGetOK creates a CacheServiceMetricsRowHitRateGetOK with default headers values +func NewCacheServiceMetricsRowHitRateGetOK() *CacheServiceMetricsRowHitRateGetOK { + return &CacheServiceMetricsRowHitRateGetOK{} +} + +/* +CacheServiceMetricsRowHitRateGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsRowHitRateGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsRowHitRateGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsRowHitRateGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsRowHitRateGetDefault creates a CacheServiceMetricsRowHitRateGetDefault with default headers values +func NewCacheServiceMetricsRowHitRateGetDefault(code int) *CacheServiceMetricsRowHitRateGetDefault { + return &CacheServiceMetricsRowHitRateGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsRowHitRateGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsRowHitRateGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics row hit rate get default response +func (o *CacheServiceMetricsRowHitRateGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsRowHitRateGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsRowHitRateGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsRowHitRateGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_get_parameters.go new file mode 100644 index 00000000000..7b2edbe9a4a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsRowHitsGetParams creates a new CacheServiceMetricsRowHitsGetParams object +// with the default values initialized. +func NewCacheServiceMetricsRowHitsGetParams() *CacheServiceMetricsRowHitsGetParams { + + return &CacheServiceMetricsRowHitsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsRowHitsGetParamsWithTimeout creates a new CacheServiceMetricsRowHitsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsRowHitsGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsRowHitsGetParams { + + return &CacheServiceMetricsRowHitsGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsRowHitsGetParamsWithContext creates a new CacheServiceMetricsRowHitsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsRowHitsGetParamsWithContext(ctx context.Context) *CacheServiceMetricsRowHitsGetParams { + + return &CacheServiceMetricsRowHitsGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsRowHitsGetParamsWithHTTPClient creates a new CacheServiceMetricsRowHitsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsRowHitsGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsRowHitsGetParams { + + return &CacheServiceMetricsRowHitsGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsRowHitsGetParams contains all the parameters to send to the API endpoint +for the cache service metrics row hits get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsRowHitsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics row hits get params +func (o *CacheServiceMetricsRowHitsGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsRowHitsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics row hits get params +func (o *CacheServiceMetricsRowHitsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics row hits get params +func (o *CacheServiceMetricsRowHitsGetParams) WithContext(ctx context.Context) *CacheServiceMetricsRowHitsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics row hits get params +func (o *CacheServiceMetricsRowHitsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics row hits get params +func (o *CacheServiceMetricsRowHitsGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsRowHitsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics row hits get params +func (o *CacheServiceMetricsRowHitsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsRowHitsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_get_responses.go new file mode 100644 index 00000000000..bb624564ed8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsRowHitsGetReader is a Reader for the CacheServiceMetricsRowHitsGet structure. +type CacheServiceMetricsRowHitsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsRowHitsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsRowHitsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsRowHitsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsRowHitsGetOK creates a CacheServiceMetricsRowHitsGetOK with default headers values +func NewCacheServiceMetricsRowHitsGetOK() *CacheServiceMetricsRowHitsGetOK { + return &CacheServiceMetricsRowHitsGetOK{} +} + +/* +CacheServiceMetricsRowHitsGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsRowHitsGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsRowHitsGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsRowHitsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsRowHitsGetDefault creates a CacheServiceMetricsRowHitsGetDefault with default headers values +func NewCacheServiceMetricsRowHitsGetDefault(code int) *CacheServiceMetricsRowHitsGetDefault { + return &CacheServiceMetricsRowHitsGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsRowHitsGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsRowHitsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics row hits get default response +func (o *CacheServiceMetricsRowHitsGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsRowHitsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsRowHitsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsRowHitsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_moving_avrage_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_moving_avrage_get_parameters.go new file mode 100644 index 00000000000..b32df73bd5b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_moving_avrage_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsRowHitsMovingAvrageGetParams creates a new CacheServiceMetricsRowHitsMovingAvrageGetParams object +// with the default values initialized. +func NewCacheServiceMetricsRowHitsMovingAvrageGetParams() *CacheServiceMetricsRowHitsMovingAvrageGetParams { + + return &CacheServiceMetricsRowHitsMovingAvrageGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsRowHitsMovingAvrageGetParamsWithTimeout creates a new CacheServiceMetricsRowHitsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsRowHitsMovingAvrageGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsRowHitsMovingAvrageGetParams { + + return &CacheServiceMetricsRowHitsMovingAvrageGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsRowHitsMovingAvrageGetParamsWithContext creates a new CacheServiceMetricsRowHitsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsRowHitsMovingAvrageGetParamsWithContext(ctx context.Context) *CacheServiceMetricsRowHitsMovingAvrageGetParams { + + return &CacheServiceMetricsRowHitsMovingAvrageGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsRowHitsMovingAvrageGetParamsWithHTTPClient creates a new CacheServiceMetricsRowHitsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsRowHitsMovingAvrageGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsRowHitsMovingAvrageGetParams { + + return &CacheServiceMetricsRowHitsMovingAvrageGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsRowHitsMovingAvrageGetParams contains all the parameters to send to the API endpoint +for the cache service metrics row hits moving avrage get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsRowHitsMovingAvrageGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics row hits moving avrage get params +func (o *CacheServiceMetricsRowHitsMovingAvrageGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsRowHitsMovingAvrageGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics row hits moving avrage get params +func (o *CacheServiceMetricsRowHitsMovingAvrageGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics row hits moving avrage get params +func (o *CacheServiceMetricsRowHitsMovingAvrageGetParams) WithContext(ctx context.Context) *CacheServiceMetricsRowHitsMovingAvrageGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics row hits moving avrage get params +func (o *CacheServiceMetricsRowHitsMovingAvrageGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics row hits moving avrage get params +func (o *CacheServiceMetricsRowHitsMovingAvrageGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsRowHitsMovingAvrageGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics row hits moving avrage get params +func (o *CacheServiceMetricsRowHitsMovingAvrageGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsRowHitsMovingAvrageGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_moving_avrage_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_moving_avrage_get_responses.go new file mode 100644 index 00000000000..fca46a800cc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_hits_moving_avrage_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsRowHitsMovingAvrageGetReader is a Reader for the CacheServiceMetricsRowHitsMovingAvrageGet structure. +type CacheServiceMetricsRowHitsMovingAvrageGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsRowHitsMovingAvrageGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsRowHitsMovingAvrageGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsRowHitsMovingAvrageGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsRowHitsMovingAvrageGetOK creates a CacheServiceMetricsRowHitsMovingAvrageGetOK with default headers values +func NewCacheServiceMetricsRowHitsMovingAvrageGetOK() *CacheServiceMetricsRowHitsMovingAvrageGetOK { + return &CacheServiceMetricsRowHitsMovingAvrageGetOK{} +} + +/* +CacheServiceMetricsRowHitsMovingAvrageGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsRowHitsMovingAvrageGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *CacheServiceMetricsRowHitsMovingAvrageGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *CacheServiceMetricsRowHitsMovingAvrageGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsRowHitsMovingAvrageGetDefault creates a CacheServiceMetricsRowHitsMovingAvrageGetDefault with default headers values +func NewCacheServiceMetricsRowHitsMovingAvrageGetDefault(code int) *CacheServiceMetricsRowHitsMovingAvrageGetDefault { + return &CacheServiceMetricsRowHitsMovingAvrageGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsRowHitsMovingAvrageGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsRowHitsMovingAvrageGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics row hits moving avrage get default response +func (o *CacheServiceMetricsRowHitsMovingAvrageGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsRowHitsMovingAvrageGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsRowHitsMovingAvrageGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsRowHitsMovingAvrageGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_get_parameters.go new file mode 100644 index 00000000000..57cf49e9788 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsRowRequestsGetParams creates a new CacheServiceMetricsRowRequestsGetParams object +// with the default values initialized. +func NewCacheServiceMetricsRowRequestsGetParams() *CacheServiceMetricsRowRequestsGetParams { + + return &CacheServiceMetricsRowRequestsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsRowRequestsGetParamsWithTimeout creates a new CacheServiceMetricsRowRequestsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsRowRequestsGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsRowRequestsGetParams { + + return &CacheServiceMetricsRowRequestsGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsRowRequestsGetParamsWithContext creates a new CacheServiceMetricsRowRequestsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsRowRequestsGetParamsWithContext(ctx context.Context) *CacheServiceMetricsRowRequestsGetParams { + + return &CacheServiceMetricsRowRequestsGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsRowRequestsGetParamsWithHTTPClient creates a new CacheServiceMetricsRowRequestsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsRowRequestsGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsRowRequestsGetParams { + + return &CacheServiceMetricsRowRequestsGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsRowRequestsGetParams contains all the parameters to send to the API endpoint +for the cache service metrics row requests get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsRowRequestsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics row requests get params +func (o *CacheServiceMetricsRowRequestsGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsRowRequestsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics row requests get params +func (o *CacheServiceMetricsRowRequestsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics row requests get params +func (o *CacheServiceMetricsRowRequestsGetParams) WithContext(ctx context.Context) *CacheServiceMetricsRowRequestsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics row requests get params +func (o *CacheServiceMetricsRowRequestsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics row requests get params +func (o *CacheServiceMetricsRowRequestsGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsRowRequestsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics row requests get params +func (o *CacheServiceMetricsRowRequestsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsRowRequestsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_get_responses.go new file mode 100644 index 00000000000..fe083b4c186 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsRowRequestsGetReader is a Reader for the CacheServiceMetricsRowRequestsGet structure. +type CacheServiceMetricsRowRequestsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsRowRequestsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsRowRequestsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsRowRequestsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsRowRequestsGetOK creates a CacheServiceMetricsRowRequestsGetOK with default headers values +func NewCacheServiceMetricsRowRequestsGetOK() *CacheServiceMetricsRowRequestsGetOK { + return &CacheServiceMetricsRowRequestsGetOK{} +} + +/* +CacheServiceMetricsRowRequestsGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsRowRequestsGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsRowRequestsGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsRowRequestsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsRowRequestsGetDefault creates a CacheServiceMetricsRowRequestsGetDefault with default headers values +func NewCacheServiceMetricsRowRequestsGetDefault(code int) *CacheServiceMetricsRowRequestsGetDefault { + return &CacheServiceMetricsRowRequestsGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsRowRequestsGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsRowRequestsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics row requests get default response +func (o *CacheServiceMetricsRowRequestsGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsRowRequestsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsRowRequestsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsRowRequestsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_moving_avrage_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_moving_avrage_get_parameters.go new file mode 100644 index 00000000000..22bec6db3d5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_moving_avrage_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsRowRequestsMovingAvrageGetParams creates a new CacheServiceMetricsRowRequestsMovingAvrageGetParams object +// with the default values initialized. +func NewCacheServiceMetricsRowRequestsMovingAvrageGetParams() *CacheServiceMetricsRowRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsRowRequestsMovingAvrageGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsRowRequestsMovingAvrageGetParamsWithTimeout creates a new CacheServiceMetricsRowRequestsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsRowRequestsMovingAvrageGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsRowRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsRowRequestsMovingAvrageGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsRowRequestsMovingAvrageGetParamsWithContext creates a new CacheServiceMetricsRowRequestsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsRowRequestsMovingAvrageGetParamsWithContext(ctx context.Context) *CacheServiceMetricsRowRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsRowRequestsMovingAvrageGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsRowRequestsMovingAvrageGetParamsWithHTTPClient creates a new CacheServiceMetricsRowRequestsMovingAvrageGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsRowRequestsMovingAvrageGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsRowRequestsMovingAvrageGetParams { + + return &CacheServiceMetricsRowRequestsMovingAvrageGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsRowRequestsMovingAvrageGetParams contains all the parameters to send to the API endpoint +for the cache service metrics row requests moving avrage get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsRowRequestsMovingAvrageGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics row requests moving avrage get params +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsRowRequestsMovingAvrageGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics row requests moving avrage get params +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics row requests moving avrage get params +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetParams) WithContext(ctx context.Context) *CacheServiceMetricsRowRequestsMovingAvrageGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics row requests moving avrage get params +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics row requests moving avrage get params +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsRowRequestsMovingAvrageGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics row requests moving avrage get params +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_moving_avrage_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_moving_avrage_get_responses.go new file mode 100644 index 00000000000..50972f19c6e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_requests_moving_avrage_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsRowRequestsMovingAvrageGetReader is a Reader for the CacheServiceMetricsRowRequestsMovingAvrageGet structure. +type CacheServiceMetricsRowRequestsMovingAvrageGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsRowRequestsMovingAvrageGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsRowRequestsMovingAvrageGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsRowRequestsMovingAvrageGetOK creates a CacheServiceMetricsRowRequestsMovingAvrageGetOK with default headers values +func NewCacheServiceMetricsRowRequestsMovingAvrageGetOK() *CacheServiceMetricsRowRequestsMovingAvrageGetOK { + return &CacheServiceMetricsRowRequestsMovingAvrageGetOK{} +} + +/* +CacheServiceMetricsRowRequestsMovingAvrageGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsRowRequestsMovingAvrageGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsRowRequestsMovingAvrageGetDefault creates a CacheServiceMetricsRowRequestsMovingAvrageGetDefault with default headers values +func NewCacheServiceMetricsRowRequestsMovingAvrageGetDefault(code int) *CacheServiceMetricsRowRequestsMovingAvrageGetDefault { + return &CacheServiceMetricsRowRequestsMovingAvrageGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsRowRequestsMovingAvrageGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsRowRequestsMovingAvrageGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics row requests moving avrage get default response +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsRowRequestsMovingAvrageGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_size_get_parameters.go new file mode 100644 index 00000000000..44c486be0ae --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceMetricsRowSizeGetParams creates a new CacheServiceMetricsRowSizeGetParams object +// with the default values initialized. +func NewCacheServiceMetricsRowSizeGetParams() *CacheServiceMetricsRowSizeGetParams { + + return &CacheServiceMetricsRowSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceMetricsRowSizeGetParamsWithTimeout creates a new CacheServiceMetricsRowSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceMetricsRowSizeGetParamsWithTimeout(timeout time.Duration) *CacheServiceMetricsRowSizeGetParams { + + return &CacheServiceMetricsRowSizeGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceMetricsRowSizeGetParamsWithContext creates a new CacheServiceMetricsRowSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceMetricsRowSizeGetParamsWithContext(ctx context.Context) *CacheServiceMetricsRowSizeGetParams { + + return &CacheServiceMetricsRowSizeGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceMetricsRowSizeGetParamsWithHTTPClient creates a new CacheServiceMetricsRowSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceMetricsRowSizeGetParamsWithHTTPClient(client *http.Client) *CacheServiceMetricsRowSizeGetParams { + + return &CacheServiceMetricsRowSizeGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceMetricsRowSizeGetParams contains all the parameters to send to the API endpoint +for the cache service metrics row size get operation typically these are written to a http.Request +*/ +type CacheServiceMetricsRowSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service metrics row size get params +func (o *CacheServiceMetricsRowSizeGetParams) WithTimeout(timeout time.Duration) *CacheServiceMetricsRowSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service metrics row size get params +func (o *CacheServiceMetricsRowSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service metrics row size get params +func (o *CacheServiceMetricsRowSizeGetParams) WithContext(ctx context.Context) *CacheServiceMetricsRowSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service metrics row size get params +func (o *CacheServiceMetricsRowSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service metrics row size get params +func (o *CacheServiceMetricsRowSizeGetParams) WithHTTPClient(client *http.Client) *CacheServiceMetricsRowSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service metrics row size get params +func (o *CacheServiceMetricsRowSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceMetricsRowSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_size_get_responses.go new file mode 100644 index 00000000000..8996e077025 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_metrics_row_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceMetricsRowSizeGetReader is a Reader for the CacheServiceMetricsRowSizeGet structure. +type CacheServiceMetricsRowSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceMetricsRowSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceMetricsRowSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceMetricsRowSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceMetricsRowSizeGetOK creates a CacheServiceMetricsRowSizeGetOK with default headers values +func NewCacheServiceMetricsRowSizeGetOK() *CacheServiceMetricsRowSizeGetOK { + return &CacheServiceMetricsRowSizeGetOK{} +} + +/* +CacheServiceMetricsRowSizeGetOK handles this case with default header values. + +Success +*/ +type CacheServiceMetricsRowSizeGetOK struct { + Payload interface{} +} + +func (o *CacheServiceMetricsRowSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CacheServiceMetricsRowSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceMetricsRowSizeGetDefault creates a CacheServiceMetricsRowSizeGetDefault with default headers values +func NewCacheServiceMetricsRowSizeGetDefault(code int) *CacheServiceMetricsRowSizeGetDefault { + return &CacheServiceMetricsRowSizeGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceMetricsRowSizeGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceMetricsRowSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service metrics row size get default response +func (o *CacheServiceMetricsRowSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceMetricsRowSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceMetricsRowSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceMetricsRowSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_capacity_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_capacity_post_parameters.go new file mode 100644 index 00000000000..39646fcf240 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_capacity_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceRowCacheCapacityPostParams creates a new CacheServiceRowCacheCapacityPostParams object +// with the default values initialized. +func NewCacheServiceRowCacheCapacityPostParams() *CacheServiceRowCacheCapacityPostParams { + var () + return &CacheServiceRowCacheCapacityPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceRowCacheCapacityPostParamsWithTimeout creates a new CacheServiceRowCacheCapacityPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceRowCacheCapacityPostParamsWithTimeout(timeout time.Duration) *CacheServiceRowCacheCapacityPostParams { + var () + return &CacheServiceRowCacheCapacityPostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceRowCacheCapacityPostParamsWithContext creates a new CacheServiceRowCacheCapacityPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceRowCacheCapacityPostParamsWithContext(ctx context.Context) *CacheServiceRowCacheCapacityPostParams { + var () + return &CacheServiceRowCacheCapacityPostParams{ + + Context: ctx, + } +} + +// NewCacheServiceRowCacheCapacityPostParamsWithHTTPClient creates a new CacheServiceRowCacheCapacityPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceRowCacheCapacityPostParamsWithHTTPClient(client *http.Client) *CacheServiceRowCacheCapacityPostParams { + var () + return &CacheServiceRowCacheCapacityPostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceRowCacheCapacityPostParams contains all the parameters to send to the API endpoint +for the cache service row cache capacity post operation typically these are written to a http.Request +*/ +type CacheServiceRowCacheCapacityPostParams struct { + + /*Capacity + row cache capacity in mb + + */ + Capacity string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service row cache capacity post params +func (o *CacheServiceRowCacheCapacityPostParams) WithTimeout(timeout time.Duration) *CacheServiceRowCacheCapacityPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service row cache capacity post params +func (o *CacheServiceRowCacheCapacityPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service row cache capacity post params +func (o *CacheServiceRowCacheCapacityPostParams) WithContext(ctx context.Context) *CacheServiceRowCacheCapacityPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service row cache capacity post params +func (o *CacheServiceRowCacheCapacityPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service row cache capacity post params +func (o *CacheServiceRowCacheCapacityPostParams) WithHTTPClient(client *http.Client) *CacheServiceRowCacheCapacityPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service row cache capacity post params +func (o *CacheServiceRowCacheCapacityPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCapacity adds the capacity to the cache service row cache capacity post params +func (o *CacheServiceRowCacheCapacityPostParams) WithCapacity(capacity string) *CacheServiceRowCacheCapacityPostParams { + o.SetCapacity(capacity) + return o +} + +// SetCapacity adds the capacity to the cache service row cache capacity post params +func (o *CacheServiceRowCacheCapacityPostParams) SetCapacity(capacity string) { + o.Capacity = capacity +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceRowCacheCapacityPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param capacity + qrCapacity := o.Capacity + qCapacity := qrCapacity + if qCapacity != "" { + if err := r.SetQueryParam("capacity", qCapacity); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_capacity_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_capacity_post_responses.go new file mode 100644 index 00000000000..f83c1470b9f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_capacity_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceRowCacheCapacityPostReader is a Reader for the CacheServiceRowCacheCapacityPost structure. +type CacheServiceRowCacheCapacityPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceRowCacheCapacityPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceRowCacheCapacityPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceRowCacheCapacityPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceRowCacheCapacityPostOK creates a CacheServiceRowCacheCapacityPostOK with default headers values +func NewCacheServiceRowCacheCapacityPostOK() *CacheServiceRowCacheCapacityPostOK { + return &CacheServiceRowCacheCapacityPostOK{} +} + +/* +CacheServiceRowCacheCapacityPostOK handles this case with default header values. + +Success +*/ +type CacheServiceRowCacheCapacityPostOK struct { +} + +func (o *CacheServiceRowCacheCapacityPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceRowCacheCapacityPostDefault creates a CacheServiceRowCacheCapacityPostDefault with default headers values +func NewCacheServiceRowCacheCapacityPostDefault(code int) *CacheServiceRowCacheCapacityPostDefault { + return &CacheServiceRowCacheCapacityPostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceRowCacheCapacityPostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceRowCacheCapacityPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service row cache capacity post default response +func (o *CacheServiceRowCacheCapacityPostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceRowCacheCapacityPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceRowCacheCapacityPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceRowCacheCapacityPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_get_parameters.go new file mode 100644 index 00000000000..ec8068ac07e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceRowCacheKeysToSaveGetParams creates a new CacheServiceRowCacheKeysToSaveGetParams object +// with the default values initialized. +func NewCacheServiceRowCacheKeysToSaveGetParams() *CacheServiceRowCacheKeysToSaveGetParams { + + return &CacheServiceRowCacheKeysToSaveGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceRowCacheKeysToSaveGetParamsWithTimeout creates a new CacheServiceRowCacheKeysToSaveGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceRowCacheKeysToSaveGetParamsWithTimeout(timeout time.Duration) *CacheServiceRowCacheKeysToSaveGetParams { + + return &CacheServiceRowCacheKeysToSaveGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceRowCacheKeysToSaveGetParamsWithContext creates a new CacheServiceRowCacheKeysToSaveGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceRowCacheKeysToSaveGetParamsWithContext(ctx context.Context) *CacheServiceRowCacheKeysToSaveGetParams { + + return &CacheServiceRowCacheKeysToSaveGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceRowCacheKeysToSaveGetParamsWithHTTPClient creates a new CacheServiceRowCacheKeysToSaveGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceRowCacheKeysToSaveGetParamsWithHTTPClient(client *http.Client) *CacheServiceRowCacheKeysToSaveGetParams { + + return &CacheServiceRowCacheKeysToSaveGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceRowCacheKeysToSaveGetParams contains all the parameters to send to the API endpoint +for the cache service row cache keys to save get operation typically these are written to a http.Request +*/ +type CacheServiceRowCacheKeysToSaveGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service row cache keys to save get params +func (o *CacheServiceRowCacheKeysToSaveGetParams) WithTimeout(timeout time.Duration) *CacheServiceRowCacheKeysToSaveGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service row cache keys to save get params +func (o *CacheServiceRowCacheKeysToSaveGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service row cache keys to save get params +func (o *CacheServiceRowCacheKeysToSaveGetParams) WithContext(ctx context.Context) *CacheServiceRowCacheKeysToSaveGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service row cache keys to save get params +func (o *CacheServiceRowCacheKeysToSaveGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service row cache keys to save get params +func (o *CacheServiceRowCacheKeysToSaveGetParams) WithHTTPClient(client *http.Client) *CacheServiceRowCacheKeysToSaveGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service row cache keys to save get params +func (o *CacheServiceRowCacheKeysToSaveGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceRowCacheKeysToSaveGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_get_responses.go new file mode 100644 index 00000000000..ee55946eb38 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceRowCacheKeysToSaveGetReader is a Reader for the CacheServiceRowCacheKeysToSaveGet structure. +type CacheServiceRowCacheKeysToSaveGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceRowCacheKeysToSaveGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceRowCacheKeysToSaveGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceRowCacheKeysToSaveGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceRowCacheKeysToSaveGetOK creates a CacheServiceRowCacheKeysToSaveGetOK with default headers values +func NewCacheServiceRowCacheKeysToSaveGetOK() *CacheServiceRowCacheKeysToSaveGetOK { + return &CacheServiceRowCacheKeysToSaveGetOK{} +} + +/* +CacheServiceRowCacheKeysToSaveGetOK handles this case with default header values. + +Success +*/ +type CacheServiceRowCacheKeysToSaveGetOK struct { + Payload int32 +} + +func (o *CacheServiceRowCacheKeysToSaveGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CacheServiceRowCacheKeysToSaveGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceRowCacheKeysToSaveGetDefault creates a CacheServiceRowCacheKeysToSaveGetDefault with default headers values +func NewCacheServiceRowCacheKeysToSaveGetDefault(code int) *CacheServiceRowCacheKeysToSaveGetDefault { + return &CacheServiceRowCacheKeysToSaveGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceRowCacheKeysToSaveGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceRowCacheKeysToSaveGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service row cache keys to save get default response +func (o *CacheServiceRowCacheKeysToSaveGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceRowCacheKeysToSaveGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceRowCacheKeysToSaveGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceRowCacheKeysToSaveGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_post_parameters.go new file mode 100644 index 00000000000..f552f235803 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewCacheServiceRowCacheKeysToSavePostParams creates a new CacheServiceRowCacheKeysToSavePostParams object +// with the default values initialized. +func NewCacheServiceRowCacheKeysToSavePostParams() *CacheServiceRowCacheKeysToSavePostParams { + var () + return &CacheServiceRowCacheKeysToSavePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceRowCacheKeysToSavePostParamsWithTimeout creates a new CacheServiceRowCacheKeysToSavePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceRowCacheKeysToSavePostParamsWithTimeout(timeout time.Duration) *CacheServiceRowCacheKeysToSavePostParams { + var () + return &CacheServiceRowCacheKeysToSavePostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceRowCacheKeysToSavePostParamsWithContext creates a new CacheServiceRowCacheKeysToSavePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceRowCacheKeysToSavePostParamsWithContext(ctx context.Context) *CacheServiceRowCacheKeysToSavePostParams { + var () + return &CacheServiceRowCacheKeysToSavePostParams{ + + Context: ctx, + } +} + +// NewCacheServiceRowCacheKeysToSavePostParamsWithHTTPClient creates a new CacheServiceRowCacheKeysToSavePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceRowCacheKeysToSavePostParamsWithHTTPClient(client *http.Client) *CacheServiceRowCacheKeysToSavePostParams { + var () + return &CacheServiceRowCacheKeysToSavePostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceRowCacheKeysToSavePostParams contains all the parameters to send to the API endpoint +for the cache service row cache keys to save post operation typically these are written to a http.Request +*/ +type CacheServiceRowCacheKeysToSavePostParams struct { + + /*Rckts + row cache keys to save + + */ + Rckts int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service row cache keys to save post params +func (o *CacheServiceRowCacheKeysToSavePostParams) WithTimeout(timeout time.Duration) *CacheServiceRowCacheKeysToSavePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service row cache keys to save post params +func (o *CacheServiceRowCacheKeysToSavePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service row cache keys to save post params +func (o *CacheServiceRowCacheKeysToSavePostParams) WithContext(ctx context.Context) *CacheServiceRowCacheKeysToSavePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service row cache keys to save post params +func (o *CacheServiceRowCacheKeysToSavePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service row cache keys to save post params +func (o *CacheServiceRowCacheKeysToSavePostParams) WithHTTPClient(client *http.Client) *CacheServiceRowCacheKeysToSavePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service row cache keys to save post params +func (o *CacheServiceRowCacheKeysToSavePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithRckts adds the rckts to the cache service row cache keys to save post params +func (o *CacheServiceRowCacheKeysToSavePostParams) WithRckts(rckts int32) *CacheServiceRowCacheKeysToSavePostParams { + o.SetRckts(rckts) + return o +} + +// SetRckts adds the rckts to the cache service row cache keys to save post params +func (o *CacheServiceRowCacheKeysToSavePostParams) SetRckts(rckts int32) { + o.Rckts = rckts +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceRowCacheKeysToSavePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param rckts + qrRckts := o.Rckts + qRckts := swag.FormatInt32(qrRckts) + if qRckts != "" { + if err := r.SetQueryParam("rckts", qRckts); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_post_responses.go new file mode 100644 index 00000000000..2ca360176f1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_keys_to_save_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceRowCacheKeysToSavePostReader is a Reader for the CacheServiceRowCacheKeysToSavePost structure. +type CacheServiceRowCacheKeysToSavePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceRowCacheKeysToSavePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceRowCacheKeysToSavePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceRowCacheKeysToSavePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceRowCacheKeysToSavePostOK creates a CacheServiceRowCacheKeysToSavePostOK with default headers values +func NewCacheServiceRowCacheKeysToSavePostOK() *CacheServiceRowCacheKeysToSavePostOK { + return &CacheServiceRowCacheKeysToSavePostOK{} +} + +/* +CacheServiceRowCacheKeysToSavePostOK handles this case with default header values. + +Success +*/ +type CacheServiceRowCacheKeysToSavePostOK struct { +} + +func (o *CacheServiceRowCacheKeysToSavePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceRowCacheKeysToSavePostDefault creates a CacheServiceRowCacheKeysToSavePostDefault with default headers values +func NewCacheServiceRowCacheKeysToSavePostDefault(code int) *CacheServiceRowCacheKeysToSavePostDefault { + return &CacheServiceRowCacheKeysToSavePostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceRowCacheKeysToSavePostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceRowCacheKeysToSavePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service row cache keys to save post default response +func (o *CacheServiceRowCacheKeysToSavePostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceRowCacheKeysToSavePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceRowCacheKeysToSavePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceRowCacheKeysToSavePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_get_parameters.go new file mode 100644 index 00000000000..326f6fbb415 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceRowCacheSavePeriodGetParams creates a new CacheServiceRowCacheSavePeriodGetParams object +// with the default values initialized. +func NewCacheServiceRowCacheSavePeriodGetParams() *CacheServiceRowCacheSavePeriodGetParams { + + return &CacheServiceRowCacheSavePeriodGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceRowCacheSavePeriodGetParamsWithTimeout creates a new CacheServiceRowCacheSavePeriodGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceRowCacheSavePeriodGetParamsWithTimeout(timeout time.Duration) *CacheServiceRowCacheSavePeriodGetParams { + + return &CacheServiceRowCacheSavePeriodGetParams{ + + timeout: timeout, + } +} + +// NewCacheServiceRowCacheSavePeriodGetParamsWithContext creates a new CacheServiceRowCacheSavePeriodGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceRowCacheSavePeriodGetParamsWithContext(ctx context.Context) *CacheServiceRowCacheSavePeriodGetParams { + + return &CacheServiceRowCacheSavePeriodGetParams{ + + Context: ctx, + } +} + +// NewCacheServiceRowCacheSavePeriodGetParamsWithHTTPClient creates a new CacheServiceRowCacheSavePeriodGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceRowCacheSavePeriodGetParamsWithHTTPClient(client *http.Client) *CacheServiceRowCacheSavePeriodGetParams { + + return &CacheServiceRowCacheSavePeriodGetParams{ + HTTPClient: client, + } +} + +/* +CacheServiceRowCacheSavePeriodGetParams contains all the parameters to send to the API endpoint +for the cache service row cache save period get operation typically these are written to a http.Request +*/ +type CacheServiceRowCacheSavePeriodGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service row cache save period get params +func (o *CacheServiceRowCacheSavePeriodGetParams) WithTimeout(timeout time.Duration) *CacheServiceRowCacheSavePeriodGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service row cache save period get params +func (o *CacheServiceRowCacheSavePeriodGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service row cache save period get params +func (o *CacheServiceRowCacheSavePeriodGetParams) WithContext(ctx context.Context) *CacheServiceRowCacheSavePeriodGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service row cache save period get params +func (o *CacheServiceRowCacheSavePeriodGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service row cache save period get params +func (o *CacheServiceRowCacheSavePeriodGetParams) WithHTTPClient(client *http.Client) *CacheServiceRowCacheSavePeriodGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service row cache save period get params +func (o *CacheServiceRowCacheSavePeriodGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceRowCacheSavePeriodGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_get_responses.go new file mode 100644 index 00000000000..6fbc7e49e5a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceRowCacheSavePeriodGetReader is a Reader for the CacheServiceRowCacheSavePeriodGet structure. +type CacheServiceRowCacheSavePeriodGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceRowCacheSavePeriodGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceRowCacheSavePeriodGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceRowCacheSavePeriodGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceRowCacheSavePeriodGetOK creates a CacheServiceRowCacheSavePeriodGetOK with default headers values +func NewCacheServiceRowCacheSavePeriodGetOK() *CacheServiceRowCacheSavePeriodGetOK { + return &CacheServiceRowCacheSavePeriodGetOK{} +} + +/* +CacheServiceRowCacheSavePeriodGetOK handles this case with default header values. + +Success +*/ +type CacheServiceRowCacheSavePeriodGetOK struct { + Payload int32 +} + +func (o *CacheServiceRowCacheSavePeriodGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CacheServiceRowCacheSavePeriodGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCacheServiceRowCacheSavePeriodGetDefault creates a CacheServiceRowCacheSavePeriodGetDefault with default headers values +func NewCacheServiceRowCacheSavePeriodGetDefault(code int) *CacheServiceRowCacheSavePeriodGetDefault { + return &CacheServiceRowCacheSavePeriodGetDefault{ + _statusCode: code, + } +} + +/* +CacheServiceRowCacheSavePeriodGetDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceRowCacheSavePeriodGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service row cache save period get default response +func (o *CacheServiceRowCacheSavePeriodGetDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceRowCacheSavePeriodGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceRowCacheSavePeriodGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceRowCacheSavePeriodGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_post_parameters.go new file mode 100644 index 00000000000..ccc2d3a3321 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewCacheServiceRowCacheSavePeriodPostParams creates a new CacheServiceRowCacheSavePeriodPostParams object +// with the default values initialized. +func NewCacheServiceRowCacheSavePeriodPostParams() *CacheServiceRowCacheSavePeriodPostParams { + var () + return &CacheServiceRowCacheSavePeriodPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceRowCacheSavePeriodPostParamsWithTimeout creates a new CacheServiceRowCacheSavePeriodPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceRowCacheSavePeriodPostParamsWithTimeout(timeout time.Duration) *CacheServiceRowCacheSavePeriodPostParams { + var () + return &CacheServiceRowCacheSavePeriodPostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceRowCacheSavePeriodPostParamsWithContext creates a new CacheServiceRowCacheSavePeriodPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceRowCacheSavePeriodPostParamsWithContext(ctx context.Context) *CacheServiceRowCacheSavePeriodPostParams { + var () + return &CacheServiceRowCacheSavePeriodPostParams{ + + Context: ctx, + } +} + +// NewCacheServiceRowCacheSavePeriodPostParamsWithHTTPClient creates a new CacheServiceRowCacheSavePeriodPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceRowCacheSavePeriodPostParamsWithHTTPClient(client *http.Client) *CacheServiceRowCacheSavePeriodPostParams { + var () + return &CacheServiceRowCacheSavePeriodPostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceRowCacheSavePeriodPostParams contains all the parameters to send to the API endpoint +for the cache service row cache save period post operation typically these are written to a http.Request +*/ +type CacheServiceRowCacheSavePeriodPostParams struct { + + /*Period + row cache save period in seconds + + */ + Period int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service row cache save period post params +func (o *CacheServiceRowCacheSavePeriodPostParams) WithTimeout(timeout time.Duration) *CacheServiceRowCacheSavePeriodPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service row cache save period post params +func (o *CacheServiceRowCacheSavePeriodPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service row cache save period post params +func (o *CacheServiceRowCacheSavePeriodPostParams) WithContext(ctx context.Context) *CacheServiceRowCacheSavePeriodPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service row cache save period post params +func (o *CacheServiceRowCacheSavePeriodPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service row cache save period post params +func (o *CacheServiceRowCacheSavePeriodPostParams) WithHTTPClient(client *http.Client) *CacheServiceRowCacheSavePeriodPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service row cache save period post params +func (o *CacheServiceRowCacheSavePeriodPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithPeriod adds the period to the cache service row cache save period post params +func (o *CacheServiceRowCacheSavePeriodPostParams) WithPeriod(period int32) *CacheServiceRowCacheSavePeriodPostParams { + o.SetPeriod(period) + return o +} + +// SetPeriod adds the period to the cache service row cache save period post params +func (o *CacheServiceRowCacheSavePeriodPostParams) SetPeriod(period int32) { + o.Period = period +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceRowCacheSavePeriodPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param period + qrPeriod := o.Period + qPeriod := swag.FormatInt32(qrPeriod) + if qPeriod != "" { + if err := r.SetQueryParam("period", qPeriod); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_post_responses.go new file mode 100644 index 00000000000..3c99dace601 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_row_cache_save_period_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceRowCacheSavePeriodPostReader is a Reader for the CacheServiceRowCacheSavePeriodPost structure. +type CacheServiceRowCacheSavePeriodPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceRowCacheSavePeriodPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceRowCacheSavePeriodPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceRowCacheSavePeriodPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceRowCacheSavePeriodPostOK creates a CacheServiceRowCacheSavePeriodPostOK with default headers values +func NewCacheServiceRowCacheSavePeriodPostOK() *CacheServiceRowCacheSavePeriodPostOK { + return &CacheServiceRowCacheSavePeriodPostOK{} +} + +/* +CacheServiceRowCacheSavePeriodPostOK handles this case with default header values. + +Success +*/ +type CacheServiceRowCacheSavePeriodPostOK struct { +} + +func (o *CacheServiceRowCacheSavePeriodPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceRowCacheSavePeriodPostDefault creates a CacheServiceRowCacheSavePeriodPostDefault with default headers values +func NewCacheServiceRowCacheSavePeriodPostDefault(code int) *CacheServiceRowCacheSavePeriodPostDefault { + return &CacheServiceRowCacheSavePeriodPostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceRowCacheSavePeriodPostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceRowCacheSavePeriodPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service row cache save period post default response +func (o *CacheServiceRowCacheSavePeriodPostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceRowCacheSavePeriodPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceRowCacheSavePeriodPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceRowCacheSavePeriodPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_save_caches_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_save_caches_post_parameters.go new file mode 100644 index 00000000000..a10cfcac079 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_save_caches_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCacheServiceSaveCachesPostParams creates a new CacheServiceSaveCachesPostParams object +// with the default values initialized. +func NewCacheServiceSaveCachesPostParams() *CacheServiceSaveCachesPostParams { + + return &CacheServiceSaveCachesPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCacheServiceSaveCachesPostParamsWithTimeout creates a new CacheServiceSaveCachesPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCacheServiceSaveCachesPostParamsWithTimeout(timeout time.Duration) *CacheServiceSaveCachesPostParams { + + return &CacheServiceSaveCachesPostParams{ + + timeout: timeout, + } +} + +// NewCacheServiceSaveCachesPostParamsWithContext creates a new CacheServiceSaveCachesPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCacheServiceSaveCachesPostParamsWithContext(ctx context.Context) *CacheServiceSaveCachesPostParams { + + return &CacheServiceSaveCachesPostParams{ + + Context: ctx, + } +} + +// NewCacheServiceSaveCachesPostParamsWithHTTPClient creates a new CacheServiceSaveCachesPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCacheServiceSaveCachesPostParamsWithHTTPClient(client *http.Client) *CacheServiceSaveCachesPostParams { + + return &CacheServiceSaveCachesPostParams{ + HTTPClient: client, + } +} + +/* +CacheServiceSaveCachesPostParams contains all the parameters to send to the API endpoint +for the cache service save caches post operation typically these are written to a http.Request +*/ +type CacheServiceSaveCachesPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the cache service save caches post params +func (o *CacheServiceSaveCachesPostParams) WithTimeout(timeout time.Duration) *CacheServiceSaveCachesPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the cache service save caches post params +func (o *CacheServiceSaveCachesPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the cache service save caches post params +func (o *CacheServiceSaveCachesPostParams) WithContext(ctx context.Context) *CacheServiceSaveCachesPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the cache service save caches post params +func (o *CacheServiceSaveCachesPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the cache service save caches post params +func (o *CacheServiceSaveCachesPostParams) WithHTTPClient(client *http.Client) *CacheServiceSaveCachesPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the cache service save caches post params +func (o *CacheServiceSaveCachesPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CacheServiceSaveCachesPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_save_caches_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_save_caches_post_responses.go new file mode 100644 index 00000000000..9223429e253 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/cache_service_save_caches_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CacheServiceSaveCachesPostReader is a Reader for the CacheServiceSaveCachesPost structure. +type CacheServiceSaveCachesPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CacheServiceSaveCachesPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCacheServiceSaveCachesPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCacheServiceSaveCachesPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCacheServiceSaveCachesPostOK creates a CacheServiceSaveCachesPostOK with default headers values +func NewCacheServiceSaveCachesPostOK() *CacheServiceSaveCachesPostOK { + return &CacheServiceSaveCachesPostOK{} +} + +/* +CacheServiceSaveCachesPostOK handles this case with default header values. + +Success +*/ +type CacheServiceSaveCachesPostOK struct { +} + +func (o *CacheServiceSaveCachesPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCacheServiceSaveCachesPostDefault creates a CacheServiceSaveCachesPostDefault with default headers values +func NewCacheServiceSaveCachesPostDefault(code int) *CacheServiceSaveCachesPostDefault { + return &CacheServiceSaveCachesPostDefault{ + _statusCode: code, + } +} + +/* +CacheServiceSaveCachesPostDefault handles this case with default header values. + +internal server error +*/ +type CacheServiceSaveCachesPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the cache service save caches post default response +func (o *CacheServiceSaveCachesPostDefault) Code() int { + return o._statusCode +} + +func (o *CacheServiceSaveCachesPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CacheServiceSaveCachesPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CacheServiceSaveCachesPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_get_parameters.go new file mode 100644 index 00000000000..65f03b65c44 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_get_parameters.go @@ -0,0 +1,225 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCollectdByPluginidGetParams creates a new CollectdByPluginidGetParams object +// with the default values initialized. +func NewCollectdByPluginidGetParams() *CollectdByPluginidGetParams { + var () + return &CollectdByPluginidGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCollectdByPluginidGetParamsWithTimeout creates a new CollectdByPluginidGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCollectdByPluginidGetParamsWithTimeout(timeout time.Duration) *CollectdByPluginidGetParams { + var () + return &CollectdByPluginidGetParams{ + + timeout: timeout, + } +} + +// NewCollectdByPluginidGetParamsWithContext creates a new CollectdByPluginidGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCollectdByPluginidGetParamsWithContext(ctx context.Context) *CollectdByPluginidGetParams { + var () + return &CollectdByPluginidGetParams{ + + Context: ctx, + } +} + +// NewCollectdByPluginidGetParamsWithHTTPClient creates a new CollectdByPluginidGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCollectdByPluginidGetParamsWithHTTPClient(client *http.Client) *CollectdByPluginidGetParams { + var () + return &CollectdByPluginidGetParams{ + HTTPClient: client, + } +} + +/* +CollectdByPluginidGetParams contains all the parameters to send to the API endpoint +for the collectd by pluginid get operation typically these are written to a http.Request +*/ +type CollectdByPluginidGetParams struct { + + /*Instance + The plugin instance + + */ + Instance *string + /*Pluginid + The plugin ID + + */ + Pluginid string + /*Type + The plugin type + + */ + Type string + /*TypeInstance + The plugin type instance + + */ + TypeInstance *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) WithTimeout(timeout time.Duration) *CollectdByPluginidGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) WithContext(ctx context.Context) *CollectdByPluginidGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) WithHTTPClient(client *http.Client) *CollectdByPluginidGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithInstance adds the instance to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) WithInstance(instance *string) *CollectdByPluginidGetParams { + o.SetInstance(instance) + return o +} + +// SetInstance adds the instance to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) SetInstance(instance *string) { + o.Instance = instance +} + +// WithPluginid adds the pluginid to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) WithPluginid(pluginid string) *CollectdByPluginidGetParams { + o.SetPluginid(pluginid) + return o +} + +// SetPluginid adds the pluginid to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) SetPluginid(pluginid string) { + o.Pluginid = pluginid +} + +// WithType adds the typeVar to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) WithType(typeVar string) *CollectdByPluginidGetParams { + o.SetType(typeVar) + return o +} + +// SetType adds the type to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) SetType(typeVar string) { + o.Type = typeVar +} + +// WithTypeInstance adds the typeInstance to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) WithTypeInstance(typeInstance *string) *CollectdByPluginidGetParams { + o.SetTypeInstance(typeInstance) + return o +} + +// SetTypeInstance adds the typeInstance to the collectd by pluginid get params +func (o *CollectdByPluginidGetParams) SetTypeInstance(typeInstance *string) { + o.TypeInstance = typeInstance +} + +// WriteToRequest writes these params to a swagger request +func (o *CollectdByPluginidGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Instance != nil { + + // query param instance + var qrInstance string + if o.Instance != nil { + qrInstance = *o.Instance + } + qInstance := qrInstance + if qInstance != "" { + if err := r.SetQueryParam("instance", qInstance); err != nil { + return err + } + } + + } + + // path param pluginid + if err := r.SetPathParam("pluginid", o.Pluginid); err != nil { + return err + } + + // query param type + qrType := o.Type + qType := qrType + if qType != "" { + if err := r.SetQueryParam("type", qType); err != nil { + return err + } + } + + if o.TypeInstance != nil { + + // query param type_instance + var qrTypeInstance string + if o.TypeInstance != nil { + qrTypeInstance = *o.TypeInstance + } + qTypeInstance := qrTypeInstance + if qTypeInstance != "" { + if err := r.SetQueryParam("type_instance", qTypeInstance); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_get_responses.go new file mode 100644 index 00000000000..76cff6d103a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CollectdByPluginidGetReader is a Reader for the CollectdByPluginidGet structure. +type CollectdByPluginidGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CollectdByPluginidGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCollectdByPluginidGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCollectdByPluginidGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCollectdByPluginidGetOK creates a CollectdByPluginidGetOK with default headers values +func NewCollectdByPluginidGetOK() *CollectdByPluginidGetOK { + return &CollectdByPluginidGetOK{} +} + +/* +CollectdByPluginidGetOK handles this case with default header values. + +Success +*/ +type CollectdByPluginidGetOK struct { + Payload []*models.CollectdValue +} + +func (o *CollectdByPluginidGetOK) GetPayload() []*models.CollectdValue { + return o.Payload +} + +func (o *CollectdByPluginidGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCollectdByPluginidGetDefault creates a CollectdByPluginidGetDefault with default headers values +func NewCollectdByPluginidGetDefault(code int) *CollectdByPluginidGetDefault { + return &CollectdByPluginidGetDefault{ + _statusCode: code, + } +} + +/* +CollectdByPluginidGetDefault handles this case with default header values. + +internal server error +*/ +type CollectdByPluginidGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the collectd by pluginid get default response +func (o *CollectdByPluginidGetDefault) Code() int { + return o._statusCode +} + +func (o *CollectdByPluginidGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CollectdByPluginidGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CollectdByPluginidGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_post_parameters.go new file mode 100644 index 00000000000..b5f2f233c86 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_post_parameters.go @@ -0,0 +1,265 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewCollectdByPluginidPostParams creates a new CollectdByPluginidPostParams object +// with the default values initialized. +func NewCollectdByPluginidPostParams() *CollectdByPluginidPostParams { + var () + return &CollectdByPluginidPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCollectdByPluginidPostParamsWithTimeout creates a new CollectdByPluginidPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCollectdByPluginidPostParamsWithTimeout(timeout time.Duration) *CollectdByPluginidPostParams { + var () + return &CollectdByPluginidPostParams{ + + timeout: timeout, + } +} + +// NewCollectdByPluginidPostParamsWithContext creates a new CollectdByPluginidPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCollectdByPluginidPostParamsWithContext(ctx context.Context) *CollectdByPluginidPostParams { + var () + return &CollectdByPluginidPostParams{ + + Context: ctx, + } +} + +// NewCollectdByPluginidPostParamsWithHTTPClient creates a new CollectdByPluginidPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCollectdByPluginidPostParamsWithHTTPClient(client *http.Client) *CollectdByPluginidPostParams { + var () + return &CollectdByPluginidPostParams{ + HTTPClient: client, + } +} + +/* +CollectdByPluginidPostParams contains all the parameters to send to the API endpoint +for the collectd by pluginid post operation typically these are written to a http.Request +*/ +type CollectdByPluginidPostParams struct { + + /*Enable + set to true to enable all, anything else or omit to disable + + */ + Enable *bool + /*Instance + The plugin instance typically #CPU indicating per CPU metric. Regex are supported. Omit for all + + */ + Instance *string + /*Pluginid + The plugin ID, describe the component the metric belongs to. Examples are cache, thrift, etc'. Regex are supported.The plugin ID, describe the component the metric belong to. Examples are: cache, thrift etc'. regex are supported + + */ + Pluginid string + /*Type + The plugin type, the type of the information. Examples are total_operations, bytes, total_operations, etc'. Regex are supported. Omit for all + + */ + Type *string + /*TypeInstance + The plugin type instance, the specific metric. Exampls are total_writes, total_size, zones, etc'. Regex are supported, Omit for all + + */ + TypeInstance *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) WithTimeout(timeout time.Duration) *CollectdByPluginidPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) WithContext(ctx context.Context) *CollectdByPluginidPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) WithHTTPClient(client *http.Client) *CollectdByPluginidPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEnable adds the enable to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) WithEnable(enable *bool) *CollectdByPluginidPostParams { + o.SetEnable(enable) + return o +} + +// SetEnable adds the enable to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) SetEnable(enable *bool) { + o.Enable = enable +} + +// WithInstance adds the instance to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) WithInstance(instance *string) *CollectdByPluginidPostParams { + o.SetInstance(instance) + return o +} + +// SetInstance adds the instance to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) SetInstance(instance *string) { + o.Instance = instance +} + +// WithPluginid adds the pluginid to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) WithPluginid(pluginid string) *CollectdByPluginidPostParams { + o.SetPluginid(pluginid) + return o +} + +// SetPluginid adds the pluginid to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) SetPluginid(pluginid string) { + o.Pluginid = pluginid +} + +// WithType adds the typeVar to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) WithType(typeVar *string) *CollectdByPluginidPostParams { + o.SetType(typeVar) + return o +} + +// SetType adds the type to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) SetType(typeVar *string) { + o.Type = typeVar +} + +// WithTypeInstance adds the typeInstance to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) WithTypeInstance(typeInstance *string) *CollectdByPluginidPostParams { + o.SetTypeInstance(typeInstance) + return o +} + +// SetTypeInstance adds the typeInstance to the collectd by pluginid post params +func (o *CollectdByPluginidPostParams) SetTypeInstance(typeInstance *string) { + o.TypeInstance = typeInstance +} + +// WriteToRequest writes these params to a swagger request +func (o *CollectdByPluginidPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Enable != nil { + + // query param enable + var qrEnable bool + if o.Enable != nil { + qrEnable = *o.Enable + } + qEnable := swag.FormatBool(qrEnable) + if qEnable != "" { + if err := r.SetQueryParam("enable", qEnable); err != nil { + return err + } + } + + } + + if o.Instance != nil { + + // query param instance + var qrInstance string + if o.Instance != nil { + qrInstance = *o.Instance + } + qInstance := qrInstance + if qInstance != "" { + if err := r.SetQueryParam("instance", qInstance); err != nil { + return err + } + } + + } + + // path param pluginid + if err := r.SetPathParam("pluginid", o.Pluginid); err != nil { + return err + } + + if o.Type != nil { + + // query param type + var qrType string + if o.Type != nil { + qrType = *o.Type + } + qType := qrType + if qType != "" { + if err := r.SetQueryParam("type", qType); err != nil { + return err + } + } + + } + + if o.TypeInstance != nil { + + // query param type_instance + var qrTypeInstance string + if o.TypeInstance != nil { + qrTypeInstance = *o.TypeInstance + } + qTypeInstance := qrTypeInstance + if qTypeInstance != "" { + if err := r.SetQueryParam("type_instance", qTypeInstance); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_post_responses.go new file mode 100644 index 00000000000..7a55b5fbae7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_by_pluginid_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CollectdByPluginidPostReader is a Reader for the CollectdByPluginidPost structure. +type CollectdByPluginidPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CollectdByPluginidPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCollectdByPluginidPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCollectdByPluginidPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCollectdByPluginidPostOK creates a CollectdByPluginidPostOK with default headers values +func NewCollectdByPluginidPostOK() *CollectdByPluginidPostOK { + return &CollectdByPluginidPostOK{} +} + +/* +CollectdByPluginidPostOK handles this case with default header values. + +Success +*/ +type CollectdByPluginidPostOK struct { +} + +func (o *CollectdByPluginidPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCollectdByPluginidPostDefault creates a CollectdByPluginidPostDefault with default headers values +func NewCollectdByPluginidPostDefault(code int) *CollectdByPluginidPostDefault { + return &CollectdByPluginidPostDefault{ + _statusCode: code, + } +} + +/* +CollectdByPluginidPostDefault handles this case with default header values. + +internal server error +*/ +type CollectdByPluginidPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the collectd by pluginid post default response +func (o *CollectdByPluginidPostDefault) Code() int { + return o._statusCode +} + +func (o *CollectdByPluginidPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CollectdByPluginidPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CollectdByPluginidPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_get_parameters.go new file mode 100644 index 00000000000..cfc9a2e50bf --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCollectdGetParams creates a new CollectdGetParams object +// with the default values initialized. +func NewCollectdGetParams() *CollectdGetParams { + + return &CollectdGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCollectdGetParamsWithTimeout creates a new CollectdGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCollectdGetParamsWithTimeout(timeout time.Duration) *CollectdGetParams { + + return &CollectdGetParams{ + + timeout: timeout, + } +} + +// NewCollectdGetParamsWithContext creates a new CollectdGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCollectdGetParamsWithContext(ctx context.Context) *CollectdGetParams { + + return &CollectdGetParams{ + + Context: ctx, + } +} + +// NewCollectdGetParamsWithHTTPClient creates a new CollectdGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCollectdGetParamsWithHTTPClient(client *http.Client) *CollectdGetParams { + + return &CollectdGetParams{ + HTTPClient: client, + } +} + +/* +CollectdGetParams contains all the parameters to send to the API endpoint +for the collectd get operation typically these are written to a http.Request +*/ +type CollectdGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the collectd get params +func (o *CollectdGetParams) WithTimeout(timeout time.Duration) *CollectdGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the collectd get params +func (o *CollectdGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the collectd get params +func (o *CollectdGetParams) WithContext(ctx context.Context) *CollectdGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the collectd get params +func (o *CollectdGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the collectd get params +func (o *CollectdGetParams) WithHTTPClient(client *http.Client) *CollectdGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the collectd get params +func (o *CollectdGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CollectdGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_get_responses.go new file mode 100644 index 00000000000..59e748b0c75 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CollectdGetReader is a Reader for the CollectdGet structure. +type CollectdGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CollectdGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCollectdGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCollectdGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCollectdGetOK creates a CollectdGetOK with default headers values +func NewCollectdGetOK() *CollectdGetOK { + return &CollectdGetOK{} +} + +/* +CollectdGetOK handles this case with default header values. + +Success +*/ +type CollectdGetOK struct { + Payload []*models.CollectdMetricStatus +} + +func (o *CollectdGetOK) GetPayload() []*models.CollectdMetricStatus { + return o.Payload +} + +func (o *CollectdGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCollectdGetDefault creates a CollectdGetDefault with default headers values +func NewCollectdGetDefault(code int) *CollectdGetDefault { + return &CollectdGetDefault{ + _statusCode: code, + } +} + +/* +CollectdGetDefault handles this case with default header values. + +internal server error +*/ +type CollectdGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the collectd get default response +func (o *CollectdGetDefault) Code() int { + return o._statusCode +} + +func (o *CollectdGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CollectdGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CollectdGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_post_parameters.go new file mode 100644 index 00000000000..e951d749ec0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_post_parameters.go @@ -0,0 +1,148 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewCollectdPostParams creates a new CollectdPostParams object +// with the default values initialized. +func NewCollectdPostParams() *CollectdPostParams { + var () + return &CollectdPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCollectdPostParamsWithTimeout creates a new CollectdPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCollectdPostParamsWithTimeout(timeout time.Duration) *CollectdPostParams { + var () + return &CollectdPostParams{ + + timeout: timeout, + } +} + +// NewCollectdPostParamsWithContext creates a new CollectdPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCollectdPostParamsWithContext(ctx context.Context) *CollectdPostParams { + var () + return &CollectdPostParams{ + + Context: ctx, + } +} + +// NewCollectdPostParamsWithHTTPClient creates a new CollectdPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCollectdPostParamsWithHTTPClient(client *http.Client) *CollectdPostParams { + var () + return &CollectdPostParams{ + HTTPClient: client, + } +} + +/* +CollectdPostParams contains all the parameters to send to the API endpoint +for the collectd post operation typically these are written to a http.Request +*/ +type CollectdPostParams struct { + + /*Enable + set to true to enable all, anything else or omit to disable + + */ + Enable *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the collectd post params +func (o *CollectdPostParams) WithTimeout(timeout time.Duration) *CollectdPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the collectd post params +func (o *CollectdPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the collectd post params +func (o *CollectdPostParams) WithContext(ctx context.Context) *CollectdPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the collectd post params +func (o *CollectdPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the collectd post params +func (o *CollectdPostParams) WithHTTPClient(client *http.Client) *CollectdPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the collectd post params +func (o *CollectdPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEnable adds the enable to the collectd post params +func (o *CollectdPostParams) WithEnable(enable *bool) *CollectdPostParams { + o.SetEnable(enable) + return o +} + +// SetEnable adds the enable to the collectd post params +func (o *CollectdPostParams) SetEnable(enable *bool) { + o.Enable = enable +} + +// WriteToRequest writes these params to a swagger request +func (o *CollectdPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Enable != nil { + + // query param enable + var qrEnable bool + if o.Enable != nil { + qrEnable = *o.Enable + } + qEnable := swag.FormatBool(qrEnable) + if qEnable != "" { + if err := r.SetQueryParam("enable", qEnable); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_post_responses.go new file mode 100644 index 00000000000..e108b03d5e9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/collectd_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CollectdPostReader is a Reader for the CollectdPost structure. +type CollectdPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CollectdPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCollectdPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCollectdPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCollectdPostOK creates a CollectdPostOK with default headers values +func NewCollectdPostOK() *CollectdPostOK { + return &CollectdPostOK{} +} + +/* +CollectdPostOK handles this case with default header values. + +Success +*/ +type CollectdPostOK struct { +} + +func (o *CollectdPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCollectdPostDefault creates a CollectdPostDefault with default headers values +func NewCollectdPostDefault(code int) *CollectdPostDefault { + return &CollectdPostDefault{ + _statusCode: code, + } +} + +/* +CollectdPostDefault handles this case with default header values. + +internal server error +*/ +type CollectdPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the collectd post default response +func (o *CollectdPostDefault) Code() int { + return o._statusCode +} + +func (o *CollectdPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CollectdPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CollectdPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_delete_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_delete_parameters.go new file mode 100644 index 00000000000..da72d85252e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_delete_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyAutocompactionByNameDeleteParams creates a new ColumnFamilyAutocompactionByNameDeleteParams object +// with the default values initialized. +func NewColumnFamilyAutocompactionByNameDeleteParams() *ColumnFamilyAutocompactionByNameDeleteParams { + var () + return &ColumnFamilyAutocompactionByNameDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyAutocompactionByNameDeleteParamsWithTimeout creates a new ColumnFamilyAutocompactionByNameDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyAutocompactionByNameDeleteParamsWithTimeout(timeout time.Duration) *ColumnFamilyAutocompactionByNameDeleteParams { + var () + return &ColumnFamilyAutocompactionByNameDeleteParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyAutocompactionByNameDeleteParamsWithContext creates a new ColumnFamilyAutocompactionByNameDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyAutocompactionByNameDeleteParamsWithContext(ctx context.Context) *ColumnFamilyAutocompactionByNameDeleteParams { + var () + return &ColumnFamilyAutocompactionByNameDeleteParams{ + + Context: ctx, + } +} + +// NewColumnFamilyAutocompactionByNameDeleteParamsWithHTTPClient creates a new ColumnFamilyAutocompactionByNameDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyAutocompactionByNameDeleteParamsWithHTTPClient(client *http.Client) *ColumnFamilyAutocompactionByNameDeleteParams { + var () + return &ColumnFamilyAutocompactionByNameDeleteParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyAutocompactionByNameDeleteParams contains all the parameters to send to the API endpoint +for the column family autocompaction by name delete operation typically these are written to a http.Request +*/ +type ColumnFamilyAutocompactionByNameDeleteParams struct { + + /*Name + The table name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family autocompaction by name delete params +func (o *ColumnFamilyAutocompactionByNameDeleteParams) WithTimeout(timeout time.Duration) *ColumnFamilyAutocompactionByNameDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family autocompaction by name delete params +func (o *ColumnFamilyAutocompactionByNameDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family autocompaction by name delete params +func (o *ColumnFamilyAutocompactionByNameDeleteParams) WithContext(ctx context.Context) *ColumnFamilyAutocompactionByNameDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family autocompaction by name delete params +func (o *ColumnFamilyAutocompactionByNameDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family autocompaction by name delete params +func (o *ColumnFamilyAutocompactionByNameDeleteParams) WithHTTPClient(client *http.Client) *ColumnFamilyAutocompactionByNameDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family autocompaction by name delete params +func (o *ColumnFamilyAutocompactionByNameDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family autocompaction by name delete params +func (o *ColumnFamilyAutocompactionByNameDeleteParams) WithName(name string) *ColumnFamilyAutocompactionByNameDeleteParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family autocompaction by name delete params +func (o *ColumnFamilyAutocompactionByNameDeleteParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyAutocompactionByNameDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_delete_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_delete_responses.go new file mode 100644 index 00000000000..80d0f074229 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_delete_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyAutocompactionByNameDeleteReader is a Reader for the ColumnFamilyAutocompactionByNameDelete structure. +type ColumnFamilyAutocompactionByNameDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyAutocompactionByNameDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyAutocompactionByNameDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyAutocompactionByNameDeleteDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyAutocompactionByNameDeleteOK creates a ColumnFamilyAutocompactionByNameDeleteOK with default headers values +func NewColumnFamilyAutocompactionByNameDeleteOK() *ColumnFamilyAutocompactionByNameDeleteOK { + return &ColumnFamilyAutocompactionByNameDeleteOK{} +} + +/* +ColumnFamilyAutocompactionByNameDeleteOK handles this case with default header values. + +Success +*/ +type ColumnFamilyAutocompactionByNameDeleteOK struct { +} + +func (o *ColumnFamilyAutocompactionByNameDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyAutocompactionByNameDeleteDefault creates a ColumnFamilyAutocompactionByNameDeleteDefault with default headers values +func NewColumnFamilyAutocompactionByNameDeleteDefault(code int) *ColumnFamilyAutocompactionByNameDeleteDefault { + return &ColumnFamilyAutocompactionByNameDeleteDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyAutocompactionByNameDeleteDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyAutocompactionByNameDeleteDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family autocompaction by name delete default response +func (o *ColumnFamilyAutocompactionByNameDeleteDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyAutocompactionByNameDeleteDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyAutocompactionByNameDeleteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyAutocompactionByNameDeleteDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_get_parameters.go new file mode 100644 index 00000000000..0dd06fb084d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyAutocompactionByNameGetParams creates a new ColumnFamilyAutocompactionByNameGetParams object +// with the default values initialized. +func NewColumnFamilyAutocompactionByNameGetParams() *ColumnFamilyAutocompactionByNameGetParams { + var () + return &ColumnFamilyAutocompactionByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyAutocompactionByNameGetParamsWithTimeout creates a new ColumnFamilyAutocompactionByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyAutocompactionByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyAutocompactionByNameGetParams { + var () + return &ColumnFamilyAutocompactionByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyAutocompactionByNameGetParamsWithContext creates a new ColumnFamilyAutocompactionByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyAutocompactionByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyAutocompactionByNameGetParams { + var () + return &ColumnFamilyAutocompactionByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyAutocompactionByNameGetParamsWithHTTPClient creates a new ColumnFamilyAutocompactionByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyAutocompactionByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyAutocompactionByNameGetParams { + var () + return &ColumnFamilyAutocompactionByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyAutocompactionByNameGetParams contains all the parameters to send to the API endpoint +for the column family autocompaction by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyAutocompactionByNameGetParams struct { + + /*Name + The table name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family autocompaction by name get params +func (o *ColumnFamilyAutocompactionByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyAutocompactionByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family autocompaction by name get params +func (o *ColumnFamilyAutocompactionByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family autocompaction by name get params +func (o *ColumnFamilyAutocompactionByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyAutocompactionByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family autocompaction by name get params +func (o *ColumnFamilyAutocompactionByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family autocompaction by name get params +func (o *ColumnFamilyAutocompactionByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyAutocompactionByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family autocompaction by name get params +func (o *ColumnFamilyAutocompactionByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family autocompaction by name get params +func (o *ColumnFamilyAutocompactionByNameGetParams) WithName(name string) *ColumnFamilyAutocompactionByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family autocompaction by name get params +func (o *ColumnFamilyAutocompactionByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyAutocompactionByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_get_responses.go new file mode 100644 index 00000000000..2c68daccba9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyAutocompactionByNameGetReader is a Reader for the ColumnFamilyAutocompactionByNameGet structure. +type ColumnFamilyAutocompactionByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyAutocompactionByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyAutocompactionByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyAutocompactionByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyAutocompactionByNameGetOK creates a ColumnFamilyAutocompactionByNameGetOK with default headers values +func NewColumnFamilyAutocompactionByNameGetOK() *ColumnFamilyAutocompactionByNameGetOK { + return &ColumnFamilyAutocompactionByNameGetOK{} +} + +/* +ColumnFamilyAutocompactionByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyAutocompactionByNameGetOK struct { + Payload bool +} + +func (o *ColumnFamilyAutocompactionByNameGetOK) GetPayload() bool { + return o.Payload +} + +func (o *ColumnFamilyAutocompactionByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyAutocompactionByNameGetDefault creates a ColumnFamilyAutocompactionByNameGetDefault with default headers values +func NewColumnFamilyAutocompactionByNameGetDefault(code int) *ColumnFamilyAutocompactionByNameGetDefault { + return &ColumnFamilyAutocompactionByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyAutocompactionByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyAutocompactionByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family autocompaction by name get default response +func (o *ColumnFamilyAutocompactionByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyAutocompactionByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyAutocompactionByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyAutocompactionByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_post_parameters.go new file mode 100644 index 00000000000..df9523b2008 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_post_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyAutocompactionByNamePostParams creates a new ColumnFamilyAutocompactionByNamePostParams object +// with the default values initialized. +func NewColumnFamilyAutocompactionByNamePostParams() *ColumnFamilyAutocompactionByNamePostParams { + var () + return &ColumnFamilyAutocompactionByNamePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyAutocompactionByNamePostParamsWithTimeout creates a new ColumnFamilyAutocompactionByNamePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyAutocompactionByNamePostParamsWithTimeout(timeout time.Duration) *ColumnFamilyAutocompactionByNamePostParams { + var () + return &ColumnFamilyAutocompactionByNamePostParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyAutocompactionByNamePostParamsWithContext creates a new ColumnFamilyAutocompactionByNamePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyAutocompactionByNamePostParamsWithContext(ctx context.Context) *ColumnFamilyAutocompactionByNamePostParams { + var () + return &ColumnFamilyAutocompactionByNamePostParams{ + + Context: ctx, + } +} + +// NewColumnFamilyAutocompactionByNamePostParamsWithHTTPClient creates a new ColumnFamilyAutocompactionByNamePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyAutocompactionByNamePostParamsWithHTTPClient(client *http.Client) *ColumnFamilyAutocompactionByNamePostParams { + var () + return &ColumnFamilyAutocompactionByNamePostParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyAutocompactionByNamePostParams contains all the parameters to send to the API endpoint +for the column family autocompaction by name post operation typically these are written to a http.Request +*/ +type ColumnFamilyAutocompactionByNamePostParams struct { + + /*Name + The table name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family autocompaction by name post params +func (o *ColumnFamilyAutocompactionByNamePostParams) WithTimeout(timeout time.Duration) *ColumnFamilyAutocompactionByNamePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family autocompaction by name post params +func (o *ColumnFamilyAutocompactionByNamePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family autocompaction by name post params +func (o *ColumnFamilyAutocompactionByNamePostParams) WithContext(ctx context.Context) *ColumnFamilyAutocompactionByNamePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family autocompaction by name post params +func (o *ColumnFamilyAutocompactionByNamePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family autocompaction by name post params +func (o *ColumnFamilyAutocompactionByNamePostParams) WithHTTPClient(client *http.Client) *ColumnFamilyAutocompactionByNamePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family autocompaction by name post params +func (o *ColumnFamilyAutocompactionByNamePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family autocompaction by name post params +func (o *ColumnFamilyAutocompactionByNamePostParams) WithName(name string) *ColumnFamilyAutocompactionByNamePostParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family autocompaction by name post params +func (o *ColumnFamilyAutocompactionByNamePostParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyAutocompactionByNamePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_post_responses.go new file mode 100644 index 00000000000..5312e03442a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_autocompaction_by_name_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyAutocompactionByNamePostReader is a Reader for the ColumnFamilyAutocompactionByNamePost structure. +type ColumnFamilyAutocompactionByNamePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyAutocompactionByNamePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyAutocompactionByNamePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyAutocompactionByNamePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyAutocompactionByNamePostOK creates a ColumnFamilyAutocompactionByNamePostOK with default headers values +func NewColumnFamilyAutocompactionByNamePostOK() *ColumnFamilyAutocompactionByNamePostOK { + return &ColumnFamilyAutocompactionByNamePostOK{} +} + +/* +ColumnFamilyAutocompactionByNamePostOK handles this case with default header values. + +Success +*/ +type ColumnFamilyAutocompactionByNamePostOK struct { +} + +func (o *ColumnFamilyAutocompactionByNamePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyAutocompactionByNamePostDefault creates a ColumnFamilyAutocompactionByNamePostDefault with default headers values +func NewColumnFamilyAutocompactionByNamePostDefault(code int) *ColumnFamilyAutocompactionByNamePostDefault { + return &ColumnFamilyAutocompactionByNamePostDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyAutocompactionByNamePostDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyAutocompactionByNamePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family autocompaction by name post default response +func (o *ColumnFamilyAutocompactionByNamePostDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyAutocompactionByNamePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyAutocompactionByNamePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyAutocompactionByNamePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_built_indexes_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_built_indexes_by_name_get_parameters.go new file mode 100644 index 00000000000..1db3ffc46da --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_built_indexes_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyBuiltIndexesByNameGetParams creates a new ColumnFamilyBuiltIndexesByNameGetParams object +// with the default values initialized. +func NewColumnFamilyBuiltIndexesByNameGetParams() *ColumnFamilyBuiltIndexesByNameGetParams { + var () + return &ColumnFamilyBuiltIndexesByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyBuiltIndexesByNameGetParamsWithTimeout creates a new ColumnFamilyBuiltIndexesByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyBuiltIndexesByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyBuiltIndexesByNameGetParams { + var () + return &ColumnFamilyBuiltIndexesByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyBuiltIndexesByNameGetParamsWithContext creates a new ColumnFamilyBuiltIndexesByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyBuiltIndexesByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyBuiltIndexesByNameGetParams { + var () + return &ColumnFamilyBuiltIndexesByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyBuiltIndexesByNameGetParamsWithHTTPClient creates a new ColumnFamilyBuiltIndexesByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyBuiltIndexesByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyBuiltIndexesByNameGetParams { + var () + return &ColumnFamilyBuiltIndexesByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyBuiltIndexesByNameGetParams contains all the parameters to send to the API endpoint +for the column family built indexes by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyBuiltIndexesByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family built indexes by name get params +func (o *ColumnFamilyBuiltIndexesByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyBuiltIndexesByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family built indexes by name get params +func (o *ColumnFamilyBuiltIndexesByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family built indexes by name get params +func (o *ColumnFamilyBuiltIndexesByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyBuiltIndexesByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family built indexes by name get params +func (o *ColumnFamilyBuiltIndexesByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family built indexes by name get params +func (o *ColumnFamilyBuiltIndexesByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyBuiltIndexesByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family built indexes by name get params +func (o *ColumnFamilyBuiltIndexesByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family built indexes by name get params +func (o *ColumnFamilyBuiltIndexesByNameGetParams) WithName(name string) *ColumnFamilyBuiltIndexesByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family built indexes by name get params +func (o *ColumnFamilyBuiltIndexesByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyBuiltIndexesByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_built_indexes_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_built_indexes_by_name_get_responses.go new file mode 100644 index 00000000000..73ef1f2a76a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_built_indexes_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyBuiltIndexesByNameGetReader is a Reader for the ColumnFamilyBuiltIndexesByNameGet structure. +type ColumnFamilyBuiltIndexesByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyBuiltIndexesByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyBuiltIndexesByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyBuiltIndexesByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyBuiltIndexesByNameGetOK creates a ColumnFamilyBuiltIndexesByNameGetOK with default headers values +func NewColumnFamilyBuiltIndexesByNameGetOK() *ColumnFamilyBuiltIndexesByNameGetOK { + return &ColumnFamilyBuiltIndexesByNameGetOK{} +} + +/* +ColumnFamilyBuiltIndexesByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyBuiltIndexesByNameGetOK struct { + Payload []string +} + +func (o *ColumnFamilyBuiltIndexesByNameGetOK) GetPayload() []string { + return o.Payload +} + +func (o *ColumnFamilyBuiltIndexesByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyBuiltIndexesByNameGetDefault creates a ColumnFamilyBuiltIndexesByNameGetDefault with default headers values +func NewColumnFamilyBuiltIndexesByNameGetDefault(code int) *ColumnFamilyBuiltIndexesByNameGetDefault { + return &ColumnFamilyBuiltIndexesByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyBuiltIndexesByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyBuiltIndexesByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family built indexes by name get default response +func (o *ColumnFamilyBuiltIndexesByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyBuiltIndexesByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyBuiltIndexesByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyBuiltIndexesByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_by_name_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_by_name_post_parameters.go new file mode 100644 index 00000000000..cac0499e6f8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_by_name_post_parameters.go @@ -0,0 +1,187 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewColumnFamilyCompactionByNamePostParams creates a new ColumnFamilyCompactionByNamePostParams object +// with the default values initialized. +func NewColumnFamilyCompactionByNamePostParams() *ColumnFamilyCompactionByNamePostParams { + var () + return &ColumnFamilyCompactionByNamePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyCompactionByNamePostParamsWithTimeout creates a new ColumnFamilyCompactionByNamePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyCompactionByNamePostParamsWithTimeout(timeout time.Duration) *ColumnFamilyCompactionByNamePostParams { + var () + return &ColumnFamilyCompactionByNamePostParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyCompactionByNamePostParamsWithContext creates a new ColumnFamilyCompactionByNamePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyCompactionByNamePostParamsWithContext(ctx context.Context) *ColumnFamilyCompactionByNamePostParams { + var () + return &ColumnFamilyCompactionByNamePostParams{ + + Context: ctx, + } +} + +// NewColumnFamilyCompactionByNamePostParamsWithHTTPClient creates a new ColumnFamilyCompactionByNamePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyCompactionByNamePostParamsWithHTTPClient(client *http.Client) *ColumnFamilyCompactionByNamePostParams { + var () + return &ColumnFamilyCompactionByNamePostParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyCompactionByNamePostParams contains all the parameters to send to the API endpoint +for the column family compaction by name post operation typically these are written to a http.Request +*/ +type ColumnFamilyCompactionByNamePostParams struct { + + /*Maximum + The maximum number of sstables in queue before compaction kicks off + + */ + Maximum int32 + /*Minimum + The minimum number of sstables in queue before compaction kicks off + + */ + Minimum int32 + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) WithTimeout(timeout time.Duration) *ColumnFamilyCompactionByNamePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) WithContext(ctx context.Context) *ColumnFamilyCompactionByNamePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) WithHTTPClient(client *http.Client) *ColumnFamilyCompactionByNamePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithMaximum adds the maximum to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) WithMaximum(maximum int32) *ColumnFamilyCompactionByNamePostParams { + o.SetMaximum(maximum) + return o +} + +// SetMaximum adds the maximum to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) SetMaximum(maximum int32) { + o.Maximum = maximum +} + +// WithMinimum adds the minimum to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) WithMinimum(minimum int32) *ColumnFamilyCompactionByNamePostParams { + o.SetMinimum(minimum) + return o +} + +// SetMinimum adds the minimum to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) SetMinimum(minimum int32) { + o.Minimum = minimum +} + +// WithName adds the name to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) WithName(name string) *ColumnFamilyCompactionByNamePostParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family compaction by name post params +func (o *ColumnFamilyCompactionByNamePostParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyCompactionByNamePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param maximum + qrMaximum := o.Maximum + qMaximum := swag.FormatInt32(qrMaximum) + if qMaximum != "" { + if err := r.SetQueryParam("maximum", qMaximum); err != nil { + return err + } + } + + // query param minimum + qrMinimum := o.Minimum + qMinimum := swag.FormatInt32(qrMinimum) + if qMinimum != "" { + if err := r.SetQueryParam("minimum", qMinimum); err != nil { + return err + } + } + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_by_name_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_by_name_post_responses.go new file mode 100644 index 00000000000..204c08f663b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_by_name_post_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyCompactionByNamePostReader is a Reader for the ColumnFamilyCompactionByNamePost structure. +type ColumnFamilyCompactionByNamePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyCompactionByNamePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyCompactionByNamePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyCompactionByNamePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyCompactionByNamePostOK creates a ColumnFamilyCompactionByNamePostOK with default headers values +func NewColumnFamilyCompactionByNamePostOK() *ColumnFamilyCompactionByNamePostOK { + return &ColumnFamilyCompactionByNamePostOK{} +} + +/* +ColumnFamilyCompactionByNamePostOK handles this case with default header values. + +Success +*/ +type ColumnFamilyCompactionByNamePostOK struct { + Payload string +} + +func (o *ColumnFamilyCompactionByNamePostOK) GetPayload() string { + return o.Payload +} + +func (o *ColumnFamilyCompactionByNamePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyCompactionByNamePostDefault creates a ColumnFamilyCompactionByNamePostDefault with default headers values +func NewColumnFamilyCompactionByNamePostDefault(code int) *ColumnFamilyCompactionByNamePostDefault { + return &ColumnFamilyCompactionByNamePostDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyCompactionByNamePostDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyCompactionByNamePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family compaction by name post default response +func (o *ColumnFamilyCompactionByNamePostDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyCompactionByNamePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyCompactionByNamePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyCompactionByNamePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_get_parameters.go new file mode 100644 index 00000000000..4eee48277e1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyCompactionStrategyByNameGetParams creates a new ColumnFamilyCompactionStrategyByNameGetParams object +// with the default values initialized. +func NewColumnFamilyCompactionStrategyByNameGetParams() *ColumnFamilyCompactionStrategyByNameGetParams { + var () + return &ColumnFamilyCompactionStrategyByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyCompactionStrategyByNameGetParamsWithTimeout creates a new ColumnFamilyCompactionStrategyByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyCompactionStrategyByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyCompactionStrategyByNameGetParams { + var () + return &ColumnFamilyCompactionStrategyByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyCompactionStrategyByNameGetParamsWithContext creates a new ColumnFamilyCompactionStrategyByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyCompactionStrategyByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyCompactionStrategyByNameGetParams { + var () + return &ColumnFamilyCompactionStrategyByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyCompactionStrategyByNameGetParamsWithHTTPClient creates a new ColumnFamilyCompactionStrategyByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyCompactionStrategyByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyCompactionStrategyByNameGetParams { + var () + return &ColumnFamilyCompactionStrategyByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyCompactionStrategyByNameGetParams contains all the parameters to send to the API endpoint +for the column family compaction strategy by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyCompactionStrategyByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family compaction strategy by name get params +func (o *ColumnFamilyCompactionStrategyByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyCompactionStrategyByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family compaction strategy by name get params +func (o *ColumnFamilyCompactionStrategyByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family compaction strategy by name get params +func (o *ColumnFamilyCompactionStrategyByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyCompactionStrategyByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family compaction strategy by name get params +func (o *ColumnFamilyCompactionStrategyByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family compaction strategy by name get params +func (o *ColumnFamilyCompactionStrategyByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyCompactionStrategyByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family compaction strategy by name get params +func (o *ColumnFamilyCompactionStrategyByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family compaction strategy by name get params +func (o *ColumnFamilyCompactionStrategyByNameGetParams) WithName(name string) *ColumnFamilyCompactionStrategyByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family compaction strategy by name get params +func (o *ColumnFamilyCompactionStrategyByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyCompactionStrategyByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_get_responses.go new file mode 100644 index 00000000000..9dfd6712e1d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyCompactionStrategyByNameGetReader is a Reader for the ColumnFamilyCompactionStrategyByNameGet structure. +type ColumnFamilyCompactionStrategyByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyCompactionStrategyByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyCompactionStrategyByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyCompactionStrategyByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyCompactionStrategyByNameGetOK creates a ColumnFamilyCompactionStrategyByNameGetOK with default headers values +func NewColumnFamilyCompactionStrategyByNameGetOK() *ColumnFamilyCompactionStrategyByNameGetOK { + return &ColumnFamilyCompactionStrategyByNameGetOK{} +} + +/* +ColumnFamilyCompactionStrategyByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyCompactionStrategyByNameGetOK struct { + Payload string +} + +func (o *ColumnFamilyCompactionStrategyByNameGetOK) GetPayload() string { + return o.Payload +} + +func (o *ColumnFamilyCompactionStrategyByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyCompactionStrategyByNameGetDefault creates a ColumnFamilyCompactionStrategyByNameGetDefault with default headers values +func NewColumnFamilyCompactionStrategyByNameGetDefault(code int) *ColumnFamilyCompactionStrategyByNameGetDefault { + return &ColumnFamilyCompactionStrategyByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyCompactionStrategyByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyCompactionStrategyByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family compaction strategy by name get default response +func (o *ColumnFamilyCompactionStrategyByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyCompactionStrategyByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyCompactionStrategyByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyCompactionStrategyByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_post_parameters.go new file mode 100644 index 00000000000..eb2fff15371 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_post_parameters.go @@ -0,0 +1,161 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyCompactionStrategyByNamePostParams creates a new ColumnFamilyCompactionStrategyByNamePostParams object +// with the default values initialized. +func NewColumnFamilyCompactionStrategyByNamePostParams() *ColumnFamilyCompactionStrategyByNamePostParams { + var () + return &ColumnFamilyCompactionStrategyByNamePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyCompactionStrategyByNamePostParamsWithTimeout creates a new ColumnFamilyCompactionStrategyByNamePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyCompactionStrategyByNamePostParamsWithTimeout(timeout time.Duration) *ColumnFamilyCompactionStrategyByNamePostParams { + var () + return &ColumnFamilyCompactionStrategyByNamePostParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyCompactionStrategyByNamePostParamsWithContext creates a new ColumnFamilyCompactionStrategyByNamePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyCompactionStrategyByNamePostParamsWithContext(ctx context.Context) *ColumnFamilyCompactionStrategyByNamePostParams { + var () + return &ColumnFamilyCompactionStrategyByNamePostParams{ + + Context: ctx, + } +} + +// NewColumnFamilyCompactionStrategyByNamePostParamsWithHTTPClient creates a new ColumnFamilyCompactionStrategyByNamePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyCompactionStrategyByNamePostParamsWithHTTPClient(client *http.Client) *ColumnFamilyCompactionStrategyByNamePostParams { + var () + return &ColumnFamilyCompactionStrategyByNamePostParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyCompactionStrategyByNamePostParams contains all the parameters to send to the API endpoint +for the column family compaction strategy by name post operation typically these are written to a http.Request +*/ +type ColumnFamilyCompactionStrategyByNamePostParams struct { + + /*ClassName + The class name + + */ + ClassName string + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family compaction strategy by name post params +func (o *ColumnFamilyCompactionStrategyByNamePostParams) WithTimeout(timeout time.Duration) *ColumnFamilyCompactionStrategyByNamePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family compaction strategy by name post params +func (o *ColumnFamilyCompactionStrategyByNamePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family compaction strategy by name post params +func (o *ColumnFamilyCompactionStrategyByNamePostParams) WithContext(ctx context.Context) *ColumnFamilyCompactionStrategyByNamePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family compaction strategy by name post params +func (o *ColumnFamilyCompactionStrategyByNamePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family compaction strategy by name post params +func (o *ColumnFamilyCompactionStrategyByNamePostParams) WithHTTPClient(client *http.Client) *ColumnFamilyCompactionStrategyByNamePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family compaction strategy by name post params +func (o *ColumnFamilyCompactionStrategyByNamePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassName adds the className to the column family compaction strategy by name post params +func (o *ColumnFamilyCompactionStrategyByNamePostParams) WithClassName(className string) *ColumnFamilyCompactionStrategyByNamePostParams { + o.SetClassName(className) + return o +} + +// SetClassName adds the className to the column family compaction strategy by name post params +func (o *ColumnFamilyCompactionStrategyByNamePostParams) SetClassName(className string) { + o.ClassName = className +} + +// WithName adds the name to the column family compaction strategy by name post params +func (o *ColumnFamilyCompactionStrategyByNamePostParams) WithName(name string) *ColumnFamilyCompactionStrategyByNamePostParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family compaction strategy by name post params +func (o *ColumnFamilyCompactionStrategyByNamePostParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyCompactionStrategyByNamePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param class_name + qrClassName := o.ClassName + qClassName := qrClassName + if qClassName != "" { + if err := r.SetQueryParam("class_name", qClassName); err != nil { + return err + } + } + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_post_responses.go new file mode 100644 index 00000000000..e4bb802d325 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compaction_strategy_by_name_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyCompactionStrategyByNamePostReader is a Reader for the ColumnFamilyCompactionStrategyByNamePost structure. +type ColumnFamilyCompactionStrategyByNamePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyCompactionStrategyByNamePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyCompactionStrategyByNamePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyCompactionStrategyByNamePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyCompactionStrategyByNamePostOK creates a ColumnFamilyCompactionStrategyByNamePostOK with default headers values +func NewColumnFamilyCompactionStrategyByNamePostOK() *ColumnFamilyCompactionStrategyByNamePostOK { + return &ColumnFamilyCompactionStrategyByNamePostOK{} +} + +/* +ColumnFamilyCompactionStrategyByNamePostOK handles this case with default header values. + +Success +*/ +type ColumnFamilyCompactionStrategyByNamePostOK struct { +} + +func (o *ColumnFamilyCompactionStrategyByNamePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyCompactionStrategyByNamePostDefault creates a ColumnFamilyCompactionStrategyByNamePostDefault with default headers values +func NewColumnFamilyCompactionStrategyByNamePostDefault(code int) *ColumnFamilyCompactionStrategyByNamePostDefault { + return &ColumnFamilyCompactionStrategyByNamePostDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyCompactionStrategyByNamePostDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyCompactionStrategyByNamePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family compaction strategy by name post default response +func (o *ColumnFamilyCompactionStrategyByNamePostDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyCompactionStrategyByNamePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyCompactionStrategyByNamePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyCompactionStrategyByNamePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_get_parameters.go new file mode 100644 index 00000000000..0d6a322d0e8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyCompressionParametersByNameGetParams creates a new ColumnFamilyCompressionParametersByNameGetParams object +// with the default values initialized. +func NewColumnFamilyCompressionParametersByNameGetParams() *ColumnFamilyCompressionParametersByNameGetParams { + var () + return &ColumnFamilyCompressionParametersByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyCompressionParametersByNameGetParamsWithTimeout creates a new ColumnFamilyCompressionParametersByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyCompressionParametersByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyCompressionParametersByNameGetParams { + var () + return &ColumnFamilyCompressionParametersByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyCompressionParametersByNameGetParamsWithContext creates a new ColumnFamilyCompressionParametersByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyCompressionParametersByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyCompressionParametersByNameGetParams { + var () + return &ColumnFamilyCompressionParametersByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyCompressionParametersByNameGetParamsWithHTTPClient creates a new ColumnFamilyCompressionParametersByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyCompressionParametersByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyCompressionParametersByNameGetParams { + var () + return &ColumnFamilyCompressionParametersByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyCompressionParametersByNameGetParams contains all the parameters to send to the API endpoint +for the column family compression parameters by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyCompressionParametersByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family compression parameters by name get params +func (o *ColumnFamilyCompressionParametersByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyCompressionParametersByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family compression parameters by name get params +func (o *ColumnFamilyCompressionParametersByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family compression parameters by name get params +func (o *ColumnFamilyCompressionParametersByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyCompressionParametersByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family compression parameters by name get params +func (o *ColumnFamilyCompressionParametersByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family compression parameters by name get params +func (o *ColumnFamilyCompressionParametersByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyCompressionParametersByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family compression parameters by name get params +func (o *ColumnFamilyCompressionParametersByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family compression parameters by name get params +func (o *ColumnFamilyCompressionParametersByNameGetParams) WithName(name string) *ColumnFamilyCompressionParametersByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family compression parameters by name get params +func (o *ColumnFamilyCompressionParametersByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyCompressionParametersByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_get_responses.go new file mode 100644 index 00000000000..494bae06a4a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyCompressionParametersByNameGetReader is a Reader for the ColumnFamilyCompressionParametersByNameGet structure. +type ColumnFamilyCompressionParametersByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyCompressionParametersByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyCompressionParametersByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyCompressionParametersByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyCompressionParametersByNameGetOK creates a ColumnFamilyCompressionParametersByNameGetOK with default headers values +func NewColumnFamilyCompressionParametersByNameGetOK() *ColumnFamilyCompressionParametersByNameGetOK { + return &ColumnFamilyCompressionParametersByNameGetOK{} +} + +/* +ColumnFamilyCompressionParametersByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyCompressionParametersByNameGetOK struct { + Payload []*models.Mapper +} + +func (o *ColumnFamilyCompressionParametersByNameGetOK) GetPayload() []*models.Mapper { + return o.Payload +} + +func (o *ColumnFamilyCompressionParametersByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyCompressionParametersByNameGetDefault creates a ColumnFamilyCompressionParametersByNameGetDefault with default headers values +func NewColumnFamilyCompressionParametersByNameGetDefault(code int) *ColumnFamilyCompressionParametersByNameGetDefault { + return &ColumnFamilyCompressionParametersByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyCompressionParametersByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyCompressionParametersByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family compression parameters by name get default response +func (o *ColumnFamilyCompressionParametersByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyCompressionParametersByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyCompressionParametersByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyCompressionParametersByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_post_parameters.go new file mode 100644 index 00000000000..d3a17a31b2f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_post_parameters.go @@ -0,0 +1,161 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyCompressionParametersByNamePostParams creates a new ColumnFamilyCompressionParametersByNamePostParams object +// with the default values initialized. +func NewColumnFamilyCompressionParametersByNamePostParams() *ColumnFamilyCompressionParametersByNamePostParams { + var () + return &ColumnFamilyCompressionParametersByNamePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyCompressionParametersByNamePostParamsWithTimeout creates a new ColumnFamilyCompressionParametersByNamePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyCompressionParametersByNamePostParamsWithTimeout(timeout time.Duration) *ColumnFamilyCompressionParametersByNamePostParams { + var () + return &ColumnFamilyCompressionParametersByNamePostParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyCompressionParametersByNamePostParamsWithContext creates a new ColumnFamilyCompressionParametersByNamePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyCompressionParametersByNamePostParamsWithContext(ctx context.Context) *ColumnFamilyCompressionParametersByNamePostParams { + var () + return &ColumnFamilyCompressionParametersByNamePostParams{ + + Context: ctx, + } +} + +// NewColumnFamilyCompressionParametersByNamePostParamsWithHTTPClient creates a new ColumnFamilyCompressionParametersByNamePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyCompressionParametersByNamePostParamsWithHTTPClient(client *http.Client) *ColumnFamilyCompressionParametersByNamePostParams { + var () + return &ColumnFamilyCompressionParametersByNamePostParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyCompressionParametersByNamePostParams contains all the parameters to send to the API endpoint +for the column family compression parameters by name post operation typically these are written to a http.Request +*/ +type ColumnFamilyCompressionParametersByNamePostParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + /*Opts + The options to set + + */ + Opts string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family compression parameters by name post params +func (o *ColumnFamilyCompressionParametersByNamePostParams) WithTimeout(timeout time.Duration) *ColumnFamilyCompressionParametersByNamePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family compression parameters by name post params +func (o *ColumnFamilyCompressionParametersByNamePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family compression parameters by name post params +func (o *ColumnFamilyCompressionParametersByNamePostParams) WithContext(ctx context.Context) *ColumnFamilyCompressionParametersByNamePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family compression parameters by name post params +func (o *ColumnFamilyCompressionParametersByNamePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family compression parameters by name post params +func (o *ColumnFamilyCompressionParametersByNamePostParams) WithHTTPClient(client *http.Client) *ColumnFamilyCompressionParametersByNamePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family compression parameters by name post params +func (o *ColumnFamilyCompressionParametersByNamePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family compression parameters by name post params +func (o *ColumnFamilyCompressionParametersByNamePostParams) WithName(name string) *ColumnFamilyCompressionParametersByNamePostParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family compression parameters by name post params +func (o *ColumnFamilyCompressionParametersByNamePostParams) SetName(name string) { + o.Name = name +} + +// WithOpts adds the opts to the column family compression parameters by name post params +func (o *ColumnFamilyCompressionParametersByNamePostParams) WithOpts(opts string) *ColumnFamilyCompressionParametersByNamePostParams { + o.SetOpts(opts) + return o +} + +// SetOpts adds the opts to the column family compression parameters by name post params +func (o *ColumnFamilyCompressionParametersByNamePostParams) SetOpts(opts string) { + o.Opts = opts +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyCompressionParametersByNamePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + // query param opts + qrOpts := o.Opts + qOpts := qrOpts + if qOpts != "" { + if err := r.SetQueryParam("opts", qOpts); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_post_responses.go new file mode 100644 index 00000000000..8cd43e8bc20 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_compression_parameters_by_name_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyCompressionParametersByNamePostReader is a Reader for the ColumnFamilyCompressionParametersByNamePost structure. +type ColumnFamilyCompressionParametersByNamePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyCompressionParametersByNamePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyCompressionParametersByNamePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyCompressionParametersByNamePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyCompressionParametersByNamePostOK creates a ColumnFamilyCompressionParametersByNamePostOK with default headers values +func NewColumnFamilyCompressionParametersByNamePostOK() *ColumnFamilyCompressionParametersByNamePostOK { + return &ColumnFamilyCompressionParametersByNamePostOK{} +} + +/* +ColumnFamilyCompressionParametersByNamePostOK handles this case with default header values. + +Success +*/ +type ColumnFamilyCompressionParametersByNamePostOK struct { +} + +func (o *ColumnFamilyCompressionParametersByNamePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyCompressionParametersByNamePostDefault creates a ColumnFamilyCompressionParametersByNamePostDefault with default headers values +func NewColumnFamilyCompressionParametersByNamePostDefault(code int) *ColumnFamilyCompressionParametersByNamePostDefault { + return &ColumnFamilyCompressionParametersByNamePostDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyCompressionParametersByNamePostDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyCompressionParametersByNamePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family compression parameters by name post default response +func (o *ColumnFamilyCompressionParametersByNamePostDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyCompressionParametersByNamePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyCompressionParametersByNamePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyCompressionParametersByNamePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_crc_check_chance_by_name_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_crc_check_chance_by_name_post_parameters.go new file mode 100644 index 00000000000..51a2dfc15f4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_crc_check_chance_by_name_post_parameters.go @@ -0,0 +1,161 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyCrcCheckChanceByNamePostParams creates a new ColumnFamilyCrcCheckChanceByNamePostParams object +// with the default values initialized. +func NewColumnFamilyCrcCheckChanceByNamePostParams() *ColumnFamilyCrcCheckChanceByNamePostParams { + var () + return &ColumnFamilyCrcCheckChanceByNamePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyCrcCheckChanceByNamePostParamsWithTimeout creates a new ColumnFamilyCrcCheckChanceByNamePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyCrcCheckChanceByNamePostParamsWithTimeout(timeout time.Duration) *ColumnFamilyCrcCheckChanceByNamePostParams { + var () + return &ColumnFamilyCrcCheckChanceByNamePostParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyCrcCheckChanceByNamePostParamsWithContext creates a new ColumnFamilyCrcCheckChanceByNamePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyCrcCheckChanceByNamePostParamsWithContext(ctx context.Context) *ColumnFamilyCrcCheckChanceByNamePostParams { + var () + return &ColumnFamilyCrcCheckChanceByNamePostParams{ + + Context: ctx, + } +} + +// NewColumnFamilyCrcCheckChanceByNamePostParamsWithHTTPClient creates a new ColumnFamilyCrcCheckChanceByNamePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyCrcCheckChanceByNamePostParamsWithHTTPClient(client *http.Client) *ColumnFamilyCrcCheckChanceByNamePostParams { + var () + return &ColumnFamilyCrcCheckChanceByNamePostParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyCrcCheckChanceByNamePostParams contains all the parameters to send to the API endpoint +for the column family crc check chance by name post operation typically these are written to a http.Request +*/ +type ColumnFamilyCrcCheckChanceByNamePostParams struct { + + /*CheckChance + CRC check chance + + */ + CheckChance string + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family crc check chance by name post params +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) WithTimeout(timeout time.Duration) *ColumnFamilyCrcCheckChanceByNamePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family crc check chance by name post params +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family crc check chance by name post params +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) WithContext(ctx context.Context) *ColumnFamilyCrcCheckChanceByNamePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family crc check chance by name post params +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family crc check chance by name post params +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) WithHTTPClient(client *http.Client) *ColumnFamilyCrcCheckChanceByNamePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family crc check chance by name post params +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCheckChance adds the checkChance to the column family crc check chance by name post params +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) WithCheckChance(checkChance string) *ColumnFamilyCrcCheckChanceByNamePostParams { + o.SetCheckChance(checkChance) + return o +} + +// SetCheckChance adds the checkChance to the column family crc check chance by name post params +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) SetCheckChance(checkChance string) { + o.CheckChance = checkChance +} + +// WithName adds the name to the column family crc check chance by name post params +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) WithName(name string) *ColumnFamilyCrcCheckChanceByNamePostParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family crc check chance by name post params +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyCrcCheckChanceByNamePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param check_chance + qrCheckChance := o.CheckChance + qCheckChance := qrCheckChance + if qCheckChance != "" { + if err := r.SetQueryParam("check_chance", qCheckChance); err != nil { + return err + } + } + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_crc_check_chance_by_name_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_crc_check_chance_by_name_post_responses.go new file mode 100644 index 00000000000..0ecc6acd420 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_crc_check_chance_by_name_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyCrcCheckChanceByNamePostReader is a Reader for the ColumnFamilyCrcCheckChanceByNamePost structure. +type ColumnFamilyCrcCheckChanceByNamePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyCrcCheckChanceByNamePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyCrcCheckChanceByNamePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyCrcCheckChanceByNamePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyCrcCheckChanceByNamePostOK creates a ColumnFamilyCrcCheckChanceByNamePostOK with default headers values +func NewColumnFamilyCrcCheckChanceByNamePostOK() *ColumnFamilyCrcCheckChanceByNamePostOK { + return &ColumnFamilyCrcCheckChanceByNamePostOK{} +} + +/* +ColumnFamilyCrcCheckChanceByNamePostOK handles this case with default header values. + +Success +*/ +type ColumnFamilyCrcCheckChanceByNamePostOK struct { +} + +func (o *ColumnFamilyCrcCheckChanceByNamePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyCrcCheckChanceByNamePostDefault creates a ColumnFamilyCrcCheckChanceByNamePostDefault with default headers values +func NewColumnFamilyCrcCheckChanceByNamePostDefault(code int) *ColumnFamilyCrcCheckChanceByNamePostDefault { + return &ColumnFamilyCrcCheckChanceByNamePostDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyCrcCheckChanceByNamePostDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyCrcCheckChanceByNamePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family crc check chance by name post default response +func (o *ColumnFamilyCrcCheckChanceByNamePostDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyCrcCheckChanceByNamePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyCrcCheckChanceByNamePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyCrcCheckChanceByNamePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_droppable_ratio_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_droppable_ratio_by_name_get_parameters.go new file mode 100644 index 00000000000..be65465774d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_droppable_ratio_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyDroppableRatioByNameGetParams creates a new ColumnFamilyDroppableRatioByNameGetParams object +// with the default values initialized. +func NewColumnFamilyDroppableRatioByNameGetParams() *ColumnFamilyDroppableRatioByNameGetParams { + var () + return &ColumnFamilyDroppableRatioByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyDroppableRatioByNameGetParamsWithTimeout creates a new ColumnFamilyDroppableRatioByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyDroppableRatioByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyDroppableRatioByNameGetParams { + var () + return &ColumnFamilyDroppableRatioByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyDroppableRatioByNameGetParamsWithContext creates a new ColumnFamilyDroppableRatioByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyDroppableRatioByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyDroppableRatioByNameGetParams { + var () + return &ColumnFamilyDroppableRatioByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyDroppableRatioByNameGetParamsWithHTTPClient creates a new ColumnFamilyDroppableRatioByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyDroppableRatioByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyDroppableRatioByNameGetParams { + var () + return &ColumnFamilyDroppableRatioByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyDroppableRatioByNameGetParams contains all the parameters to send to the API endpoint +for the column family droppable ratio by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyDroppableRatioByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family droppable ratio by name get params +func (o *ColumnFamilyDroppableRatioByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyDroppableRatioByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family droppable ratio by name get params +func (o *ColumnFamilyDroppableRatioByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family droppable ratio by name get params +func (o *ColumnFamilyDroppableRatioByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyDroppableRatioByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family droppable ratio by name get params +func (o *ColumnFamilyDroppableRatioByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family droppable ratio by name get params +func (o *ColumnFamilyDroppableRatioByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyDroppableRatioByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family droppable ratio by name get params +func (o *ColumnFamilyDroppableRatioByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family droppable ratio by name get params +func (o *ColumnFamilyDroppableRatioByNameGetParams) WithName(name string) *ColumnFamilyDroppableRatioByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family droppable ratio by name get params +func (o *ColumnFamilyDroppableRatioByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyDroppableRatioByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_droppable_ratio_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_droppable_ratio_by_name_get_responses.go new file mode 100644 index 00000000000..dd629505891 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_droppable_ratio_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyDroppableRatioByNameGetReader is a Reader for the ColumnFamilyDroppableRatioByNameGet structure. +type ColumnFamilyDroppableRatioByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyDroppableRatioByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyDroppableRatioByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyDroppableRatioByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyDroppableRatioByNameGetOK creates a ColumnFamilyDroppableRatioByNameGetOK with default headers values +func NewColumnFamilyDroppableRatioByNameGetOK() *ColumnFamilyDroppableRatioByNameGetOK { + return &ColumnFamilyDroppableRatioByNameGetOK{} +} + +/* +ColumnFamilyDroppableRatioByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyDroppableRatioByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyDroppableRatioByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyDroppableRatioByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyDroppableRatioByNameGetDefault creates a ColumnFamilyDroppableRatioByNameGetDefault with default headers values +func NewColumnFamilyDroppableRatioByNameGetDefault(code int) *ColumnFamilyDroppableRatioByNameGetDefault { + return &ColumnFamilyDroppableRatioByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyDroppableRatioByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyDroppableRatioByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family droppable ratio by name get default response +func (o *ColumnFamilyDroppableRatioByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyDroppableRatioByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyDroppableRatioByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyDroppableRatioByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_estimate_keys_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_estimate_keys_by_name_get_parameters.go new file mode 100644 index 00000000000..6b5a446c4a3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_estimate_keys_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyEstimateKeysByNameGetParams creates a new ColumnFamilyEstimateKeysByNameGetParams object +// with the default values initialized. +func NewColumnFamilyEstimateKeysByNameGetParams() *ColumnFamilyEstimateKeysByNameGetParams { + var () + return &ColumnFamilyEstimateKeysByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyEstimateKeysByNameGetParamsWithTimeout creates a new ColumnFamilyEstimateKeysByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyEstimateKeysByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyEstimateKeysByNameGetParams { + var () + return &ColumnFamilyEstimateKeysByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyEstimateKeysByNameGetParamsWithContext creates a new ColumnFamilyEstimateKeysByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyEstimateKeysByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyEstimateKeysByNameGetParams { + var () + return &ColumnFamilyEstimateKeysByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyEstimateKeysByNameGetParamsWithHTTPClient creates a new ColumnFamilyEstimateKeysByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyEstimateKeysByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyEstimateKeysByNameGetParams { + var () + return &ColumnFamilyEstimateKeysByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyEstimateKeysByNameGetParams contains all the parameters to send to the API endpoint +for the column family estimate keys by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyEstimateKeysByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family estimate keys by name get params +func (o *ColumnFamilyEstimateKeysByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyEstimateKeysByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family estimate keys by name get params +func (o *ColumnFamilyEstimateKeysByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family estimate keys by name get params +func (o *ColumnFamilyEstimateKeysByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyEstimateKeysByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family estimate keys by name get params +func (o *ColumnFamilyEstimateKeysByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family estimate keys by name get params +func (o *ColumnFamilyEstimateKeysByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyEstimateKeysByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family estimate keys by name get params +func (o *ColumnFamilyEstimateKeysByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family estimate keys by name get params +func (o *ColumnFamilyEstimateKeysByNameGetParams) WithName(name string) *ColumnFamilyEstimateKeysByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family estimate keys by name get params +func (o *ColumnFamilyEstimateKeysByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyEstimateKeysByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_estimate_keys_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_estimate_keys_by_name_get_responses.go new file mode 100644 index 00000000000..3afbb015167 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_estimate_keys_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyEstimateKeysByNameGetReader is a Reader for the ColumnFamilyEstimateKeysByNameGet structure. +type ColumnFamilyEstimateKeysByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyEstimateKeysByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyEstimateKeysByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyEstimateKeysByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyEstimateKeysByNameGetOK creates a ColumnFamilyEstimateKeysByNameGetOK with default headers values +func NewColumnFamilyEstimateKeysByNameGetOK() *ColumnFamilyEstimateKeysByNameGetOK { + return &ColumnFamilyEstimateKeysByNameGetOK{} +} + +/* +ColumnFamilyEstimateKeysByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyEstimateKeysByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyEstimateKeysByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyEstimateKeysByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyEstimateKeysByNameGetDefault creates a ColumnFamilyEstimateKeysByNameGetDefault with default headers values +func NewColumnFamilyEstimateKeysByNameGetDefault(code int) *ColumnFamilyEstimateKeysByNameGetDefault { + return &ColumnFamilyEstimateKeysByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyEstimateKeysByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyEstimateKeysByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family estimate keys by name get default response +func (o *ColumnFamilyEstimateKeysByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyEstimateKeysByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyEstimateKeysByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyEstimateKeysByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_get_parameters.go new file mode 100644 index 00000000000..253a2292037 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyGetParams creates a new ColumnFamilyGetParams object +// with the default values initialized. +func NewColumnFamilyGetParams() *ColumnFamilyGetParams { + + return &ColumnFamilyGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyGetParamsWithTimeout creates a new ColumnFamilyGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyGetParams { + + return &ColumnFamilyGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyGetParamsWithContext creates a new ColumnFamilyGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyGetParamsWithContext(ctx context.Context) *ColumnFamilyGetParams { + + return &ColumnFamilyGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyGetParamsWithHTTPClient creates a new ColumnFamilyGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyGetParams { + + return &ColumnFamilyGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyGetParams contains all the parameters to send to the API endpoint +for the column family get operation typically these are written to a http.Request +*/ +type ColumnFamilyGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family get params +func (o *ColumnFamilyGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family get params +func (o *ColumnFamilyGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family get params +func (o *ColumnFamilyGetParams) WithContext(ctx context.Context) *ColumnFamilyGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family get params +func (o *ColumnFamilyGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family get params +func (o *ColumnFamilyGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family get params +func (o *ColumnFamilyGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_get_responses.go new file mode 100644 index 00000000000..9761eccba4b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyGetReader is a Reader for the ColumnFamilyGet structure. +type ColumnFamilyGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyGetOK creates a ColumnFamilyGetOK with default headers values +func NewColumnFamilyGetOK() *ColumnFamilyGetOK { + return &ColumnFamilyGetOK{} +} + +/* +ColumnFamilyGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyGetOK struct { + Payload []*models.ColumnFamilyInfo +} + +func (o *ColumnFamilyGetOK) GetPayload() []*models.ColumnFamilyInfo { + return o.Payload +} + +func (o *ColumnFamilyGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyGetDefault creates a ColumnFamilyGetDefault with default headers values +func NewColumnFamilyGetDefault(code int) *ColumnFamilyGetDefault { + return &ColumnFamilyGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family get default response +func (o *ColumnFamilyGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_load_sstable_by_name_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_load_sstable_by_name_post_parameters.go new file mode 100644 index 00000000000..e6a107eb8d3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_load_sstable_by_name_post_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyLoadSstableByNamePostParams creates a new ColumnFamilyLoadSstableByNamePostParams object +// with the default values initialized. +func NewColumnFamilyLoadSstableByNamePostParams() *ColumnFamilyLoadSstableByNamePostParams { + var () + return &ColumnFamilyLoadSstableByNamePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyLoadSstableByNamePostParamsWithTimeout creates a new ColumnFamilyLoadSstableByNamePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyLoadSstableByNamePostParamsWithTimeout(timeout time.Duration) *ColumnFamilyLoadSstableByNamePostParams { + var () + return &ColumnFamilyLoadSstableByNamePostParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyLoadSstableByNamePostParamsWithContext creates a new ColumnFamilyLoadSstableByNamePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyLoadSstableByNamePostParamsWithContext(ctx context.Context) *ColumnFamilyLoadSstableByNamePostParams { + var () + return &ColumnFamilyLoadSstableByNamePostParams{ + + Context: ctx, + } +} + +// NewColumnFamilyLoadSstableByNamePostParamsWithHTTPClient creates a new ColumnFamilyLoadSstableByNamePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyLoadSstableByNamePostParamsWithHTTPClient(client *http.Client) *ColumnFamilyLoadSstableByNamePostParams { + var () + return &ColumnFamilyLoadSstableByNamePostParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyLoadSstableByNamePostParams contains all the parameters to send to the API endpoint +for the column family load sstable by name post operation typically these are written to a http.Request +*/ +type ColumnFamilyLoadSstableByNamePostParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family load sstable by name post params +func (o *ColumnFamilyLoadSstableByNamePostParams) WithTimeout(timeout time.Duration) *ColumnFamilyLoadSstableByNamePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family load sstable by name post params +func (o *ColumnFamilyLoadSstableByNamePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family load sstable by name post params +func (o *ColumnFamilyLoadSstableByNamePostParams) WithContext(ctx context.Context) *ColumnFamilyLoadSstableByNamePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family load sstable by name post params +func (o *ColumnFamilyLoadSstableByNamePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family load sstable by name post params +func (o *ColumnFamilyLoadSstableByNamePostParams) WithHTTPClient(client *http.Client) *ColumnFamilyLoadSstableByNamePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family load sstable by name post params +func (o *ColumnFamilyLoadSstableByNamePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family load sstable by name post params +func (o *ColumnFamilyLoadSstableByNamePostParams) WithName(name string) *ColumnFamilyLoadSstableByNamePostParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family load sstable by name post params +func (o *ColumnFamilyLoadSstableByNamePostParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyLoadSstableByNamePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_load_sstable_by_name_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_load_sstable_by_name_post_responses.go new file mode 100644 index 00000000000..bab35a1ba9b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_load_sstable_by_name_post_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyLoadSstableByNamePostReader is a Reader for the ColumnFamilyLoadSstableByNamePost structure. +type ColumnFamilyLoadSstableByNamePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyLoadSstableByNamePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyLoadSstableByNamePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyLoadSstableByNamePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyLoadSstableByNamePostOK creates a ColumnFamilyLoadSstableByNamePostOK with default headers values +func NewColumnFamilyLoadSstableByNamePostOK() *ColumnFamilyLoadSstableByNamePostOK { + return &ColumnFamilyLoadSstableByNamePostOK{} +} + +/* +ColumnFamilyLoadSstableByNamePostOK handles this case with default header values. + +Success +*/ +type ColumnFamilyLoadSstableByNamePostOK struct { + Payload string +} + +func (o *ColumnFamilyLoadSstableByNamePostOK) GetPayload() string { + return o.Payload +} + +func (o *ColumnFamilyLoadSstableByNamePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyLoadSstableByNamePostDefault creates a ColumnFamilyLoadSstableByNamePostDefault with default headers values +func NewColumnFamilyLoadSstableByNamePostDefault(code int) *ColumnFamilyLoadSstableByNamePostDefault { + return &ColumnFamilyLoadSstableByNamePostDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyLoadSstableByNamePostDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyLoadSstableByNamePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family load sstable by name post default response +func (o *ColumnFamilyLoadSstableByNamePostDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyLoadSstableByNamePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyLoadSstableByNamePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyLoadSstableByNamePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_major_compaction_by_name_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_major_compaction_by_name_post_parameters.go new file mode 100644 index 00000000000..8a641ab0f52 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_major_compaction_by_name_post_parameters.go @@ -0,0 +1,169 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewColumnFamilyMajorCompactionByNamePostParams creates a new ColumnFamilyMajorCompactionByNamePostParams object +// with the default values initialized. +func NewColumnFamilyMajorCompactionByNamePostParams() *ColumnFamilyMajorCompactionByNamePostParams { + var () + return &ColumnFamilyMajorCompactionByNamePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMajorCompactionByNamePostParamsWithTimeout creates a new ColumnFamilyMajorCompactionByNamePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMajorCompactionByNamePostParamsWithTimeout(timeout time.Duration) *ColumnFamilyMajorCompactionByNamePostParams { + var () + return &ColumnFamilyMajorCompactionByNamePostParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMajorCompactionByNamePostParamsWithContext creates a new ColumnFamilyMajorCompactionByNamePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMajorCompactionByNamePostParamsWithContext(ctx context.Context) *ColumnFamilyMajorCompactionByNamePostParams { + var () + return &ColumnFamilyMajorCompactionByNamePostParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMajorCompactionByNamePostParamsWithHTTPClient creates a new ColumnFamilyMajorCompactionByNamePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMajorCompactionByNamePostParamsWithHTTPClient(client *http.Client) *ColumnFamilyMajorCompactionByNamePostParams { + var () + return &ColumnFamilyMajorCompactionByNamePostParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMajorCompactionByNamePostParams contains all the parameters to send to the API endpoint +for the column family major compaction by name post operation typically these are written to a http.Request +*/ +type ColumnFamilyMajorCompactionByNamePostParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + /*SplitOutput + true if the output of the major compaction should be split in several sstables + + */ + SplitOutput *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family major compaction by name post params +func (o *ColumnFamilyMajorCompactionByNamePostParams) WithTimeout(timeout time.Duration) *ColumnFamilyMajorCompactionByNamePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family major compaction by name post params +func (o *ColumnFamilyMajorCompactionByNamePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family major compaction by name post params +func (o *ColumnFamilyMajorCompactionByNamePostParams) WithContext(ctx context.Context) *ColumnFamilyMajorCompactionByNamePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family major compaction by name post params +func (o *ColumnFamilyMajorCompactionByNamePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family major compaction by name post params +func (o *ColumnFamilyMajorCompactionByNamePostParams) WithHTTPClient(client *http.Client) *ColumnFamilyMajorCompactionByNamePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family major compaction by name post params +func (o *ColumnFamilyMajorCompactionByNamePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family major compaction by name post params +func (o *ColumnFamilyMajorCompactionByNamePostParams) WithName(name string) *ColumnFamilyMajorCompactionByNamePostParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family major compaction by name post params +func (o *ColumnFamilyMajorCompactionByNamePostParams) SetName(name string) { + o.Name = name +} + +// WithSplitOutput adds the splitOutput to the column family major compaction by name post params +func (o *ColumnFamilyMajorCompactionByNamePostParams) WithSplitOutput(splitOutput *bool) *ColumnFamilyMajorCompactionByNamePostParams { + o.SetSplitOutput(splitOutput) + return o +} + +// SetSplitOutput adds the splitOutput to the column family major compaction by name post params +func (o *ColumnFamilyMajorCompactionByNamePostParams) SetSplitOutput(splitOutput *bool) { + o.SplitOutput = splitOutput +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMajorCompactionByNamePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if o.SplitOutput != nil { + + // query param split_output + var qrSplitOutput bool + if o.SplitOutput != nil { + qrSplitOutput = *o.SplitOutput + } + qSplitOutput := swag.FormatBool(qrSplitOutput) + if qSplitOutput != "" { + if err := r.SetQueryParam("split_output", qSplitOutput); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_major_compaction_by_name_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_major_compaction_by_name_post_responses.go new file mode 100644 index 00000000000..67718776d9a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_major_compaction_by_name_post_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMajorCompactionByNamePostReader is a Reader for the ColumnFamilyMajorCompactionByNamePost structure. +type ColumnFamilyMajorCompactionByNamePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMajorCompactionByNamePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMajorCompactionByNamePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMajorCompactionByNamePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMajorCompactionByNamePostOK creates a ColumnFamilyMajorCompactionByNamePostOK with default headers values +func NewColumnFamilyMajorCompactionByNamePostOK() *ColumnFamilyMajorCompactionByNamePostOK { + return &ColumnFamilyMajorCompactionByNamePostOK{} +} + +/* +ColumnFamilyMajorCompactionByNamePostOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMajorCompactionByNamePostOK struct { + Payload string +} + +func (o *ColumnFamilyMajorCompactionByNamePostOK) GetPayload() string { + return o.Payload +} + +func (o *ColumnFamilyMajorCompactionByNamePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMajorCompactionByNamePostDefault creates a ColumnFamilyMajorCompactionByNamePostDefault with default headers values +func NewColumnFamilyMajorCompactionByNamePostDefault(code int) *ColumnFamilyMajorCompactionByNamePostDefault { + return &ColumnFamilyMajorCompactionByNamePostDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMajorCompactionByNamePostDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMajorCompactionByNamePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family major compaction by name post default response +func (o *ColumnFamilyMajorCompactionByNamePostDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMajorCompactionByNamePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMajorCompactionByNamePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMajorCompactionByNamePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_get_parameters.go new file mode 100644 index 00000000000..cd924785743 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMaximumCompactionByNameGetParams creates a new ColumnFamilyMaximumCompactionByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMaximumCompactionByNameGetParams() *ColumnFamilyMaximumCompactionByNameGetParams { + var () + return &ColumnFamilyMaximumCompactionByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMaximumCompactionByNameGetParamsWithTimeout creates a new ColumnFamilyMaximumCompactionByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMaximumCompactionByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMaximumCompactionByNameGetParams { + var () + return &ColumnFamilyMaximumCompactionByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMaximumCompactionByNameGetParamsWithContext creates a new ColumnFamilyMaximumCompactionByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMaximumCompactionByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMaximumCompactionByNameGetParams { + var () + return &ColumnFamilyMaximumCompactionByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMaximumCompactionByNameGetParamsWithHTTPClient creates a new ColumnFamilyMaximumCompactionByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMaximumCompactionByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMaximumCompactionByNameGetParams { + var () + return &ColumnFamilyMaximumCompactionByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMaximumCompactionByNameGetParams contains all the parameters to send to the API endpoint +for the column family maximum compaction by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMaximumCompactionByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family maximum compaction by name get params +func (o *ColumnFamilyMaximumCompactionByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMaximumCompactionByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family maximum compaction by name get params +func (o *ColumnFamilyMaximumCompactionByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family maximum compaction by name get params +func (o *ColumnFamilyMaximumCompactionByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMaximumCompactionByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family maximum compaction by name get params +func (o *ColumnFamilyMaximumCompactionByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family maximum compaction by name get params +func (o *ColumnFamilyMaximumCompactionByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMaximumCompactionByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family maximum compaction by name get params +func (o *ColumnFamilyMaximumCompactionByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family maximum compaction by name get params +func (o *ColumnFamilyMaximumCompactionByNameGetParams) WithName(name string) *ColumnFamilyMaximumCompactionByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family maximum compaction by name get params +func (o *ColumnFamilyMaximumCompactionByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMaximumCompactionByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_get_responses.go new file mode 100644 index 00000000000..54976143526 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMaximumCompactionByNameGetReader is a Reader for the ColumnFamilyMaximumCompactionByNameGet structure. +type ColumnFamilyMaximumCompactionByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMaximumCompactionByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMaximumCompactionByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMaximumCompactionByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMaximumCompactionByNameGetOK creates a ColumnFamilyMaximumCompactionByNameGetOK with default headers values +func NewColumnFamilyMaximumCompactionByNameGetOK() *ColumnFamilyMaximumCompactionByNameGetOK { + return &ColumnFamilyMaximumCompactionByNameGetOK{} +} + +/* +ColumnFamilyMaximumCompactionByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMaximumCompactionByNameGetOK struct { + Payload string +} + +func (o *ColumnFamilyMaximumCompactionByNameGetOK) GetPayload() string { + return o.Payload +} + +func (o *ColumnFamilyMaximumCompactionByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMaximumCompactionByNameGetDefault creates a ColumnFamilyMaximumCompactionByNameGetDefault with default headers values +func NewColumnFamilyMaximumCompactionByNameGetDefault(code int) *ColumnFamilyMaximumCompactionByNameGetDefault { + return &ColumnFamilyMaximumCompactionByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMaximumCompactionByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMaximumCompactionByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family maximum compaction by name get default response +func (o *ColumnFamilyMaximumCompactionByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMaximumCompactionByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMaximumCompactionByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMaximumCompactionByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_post_parameters.go new file mode 100644 index 00000000000..a28fdfab483 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_post_parameters.go @@ -0,0 +1,162 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewColumnFamilyMaximumCompactionByNamePostParams creates a new ColumnFamilyMaximumCompactionByNamePostParams object +// with the default values initialized. +func NewColumnFamilyMaximumCompactionByNamePostParams() *ColumnFamilyMaximumCompactionByNamePostParams { + var () + return &ColumnFamilyMaximumCompactionByNamePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMaximumCompactionByNamePostParamsWithTimeout creates a new ColumnFamilyMaximumCompactionByNamePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMaximumCompactionByNamePostParamsWithTimeout(timeout time.Duration) *ColumnFamilyMaximumCompactionByNamePostParams { + var () + return &ColumnFamilyMaximumCompactionByNamePostParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMaximumCompactionByNamePostParamsWithContext creates a new ColumnFamilyMaximumCompactionByNamePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMaximumCompactionByNamePostParamsWithContext(ctx context.Context) *ColumnFamilyMaximumCompactionByNamePostParams { + var () + return &ColumnFamilyMaximumCompactionByNamePostParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMaximumCompactionByNamePostParamsWithHTTPClient creates a new ColumnFamilyMaximumCompactionByNamePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMaximumCompactionByNamePostParamsWithHTTPClient(client *http.Client) *ColumnFamilyMaximumCompactionByNamePostParams { + var () + return &ColumnFamilyMaximumCompactionByNamePostParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMaximumCompactionByNamePostParams contains all the parameters to send to the API endpoint +for the column family maximum compaction by name post operation typically these are written to a http.Request +*/ +type ColumnFamilyMaximumCompactionByNamePostParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + /*Value + The maximum number of sstables in queue before compaction kicks off + + */ + Value int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family maximum compaction by name post params +func (o *ColumnFamilyMaximumCompactionByNamePostParams) WithTimeout(timeout time.Duration) *ColumnFamilyMaximumCompactionByNamePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family maximum compaction by name post params +func (o *ColumnFamilyMaximumCompactionByNamePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family maximum compaction by name post params +func (o *ColumnFamilyMaximumCompactionByNamePostParams) WithContext(ctx context.Context) *ColumnFamilyMaximumCompactionByNamePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family maximum compaction by name post params +func (o *ColumnFamilyMaximumCompactionByNamePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family maximum compaction by name post params +func (o *ColumnFamilyMaximumCompactionByNamePostParams) WithHTTPClient(client *http.Client) *ColumnFamilyMaximumCompactionByNamePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family maximum compaction by name post params +func (o *ColumnFamilyMaximumCompactionByNamePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family maximum compaction by name post params +func (o *ColumnFamilyMaximumCompactionByNamePostParams) WithName(name string) *ColumnFamilyMaximumCompactionByNamePostParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family maximum compaction by name post params +func (o *ColumnFamilyMaximumCompactionByNamePostParams) SetName(name string) { + o.Name = name +} + +// WithValue adds the value to the column family maximum compaction by name post params +func (o *ColumnFamilyMaximumCompactionByNamePostParams) WithValue(value int32) *ColumnFamilyMaximumCompactionByNamePostParams { + o.SetValue(value) + return o +} + +// SetValue adds the value to the column family maximum compaction by name post params +func (o *ColumnFamilyMaximumCompactionByNamePostParams) SetValue(value int32) { + o.Value = value +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMaximumCompactionByNamePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + // query param value + qrValue := o.Value + qValue := swag.FormatInt32(qrValue) + if qValue != "" { + if err := r.SetQueryParam("value", qValue); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_post_responses.go new file mode 100644 index 00000000000..26d1f69529a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_maximum_compaction_by_name_post_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMaximumCompactionByNamePostReader is a Reader for the ColumnFamilyMaximumCompactionByNamePost structure. +type ColumnFamilyMaximumCompactionByNamePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMaximumCompactionByNamePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMaximumCompactionByNamePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMaximumCompactionByNamePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMaximumCompactionByNamePostOK creates a ColumnFamilyMaximumCompactionByNamePostOK with default headers values +func NewColumnFamilyMaximumCompactionByNamePostOK() *ColumnFamilyMaximumCompactionByNamePostOK { + return &ColumnFamilyMaximumCompactionByNamePostOK{} +} + +/* +ColumnFamilyMaximumCompactionByNamePostOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMaximumCompactionByNamePostOK struct { + Payload string +} + +func (o *ColumnFamilyMaximumCompactionByNamePostOK) GetPayload() string { + return o.Payload +} + +func (o *ColumnFamilyMaximumCompactionByNamePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMaximumCompactionByNamePostDefault creates a ColumnFamilyMaximumCompactionByNamePostDefault with default headers values +func NewColumnFamilyMaximumCompactionByNamePostDefault(code int) *ColumnFamilyMaximumCompactionByNamePostDefault { + return &ColumnFamilyMaximumCompactionByNamePostDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMaximumCompactionByNamePostDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMaximumCompactionByNamePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family maximum compaction by name post default response +func (o *ColumnFamilyMaximumCompactionByNamePostDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMaximumCompactionByNamePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMaximumCompactionByNamePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMaximumCompactionByNamePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_by_name_get_parameters.go new file mode 100644 index 00000000000..196eff5f1af --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams creates a new ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams() *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParamsWithContext creates a new ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics all memtables live data size by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics all memtables live data size by name get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics all memtables live data size by name get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics all memtables live data size by name get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics all memtables live data size by name get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics all memtables live data size by name get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics all memtables live data size by name get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics all memtables live data size by name get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) WithName(name string) *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics all memtables live data size by name get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_by_name_get_responses.go new file mode 100644 index 00000000000..40a21e6c87c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetReader is a Reader for the ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGet structure. +type ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK creates a ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK with default headers values +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK() *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK { + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK{} +} + +/* +ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault creates a ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault with default headers values +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault(code int) *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault { + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics all memtables live data size by name get default response +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_get_parameters.go new file mode 100644 index 00000000000..6d2da4ad0ef --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams creates a new ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams() *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetParamsWithTimeout creates a new ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetParamsWithContext creates a new ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams contains all the parameters to send to the API endpoint +for the column family metrics all memtables live data size get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics all memtables live data size get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics all memtables live data size get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics all memtables live data size get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics all memtables live data size get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics all memtables live data size get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics all memtables live data size get params +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_get_responses.go new file mode 100644 index 00000000000..7f3beb73eb8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_live_data_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsAllMemtablesLiveDataSizeGetReader is a Reader for the ColumnFamilyMetricsAllMemtablesLiveDataSizeGet structure. +type ColumnFamilyMetricsAllMemtablesLiveDataSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK creates a ColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK with default headers values +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK() *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK { + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK{} +} + +/* +ColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault creates a ColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault with default headers values +func NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault(code int) *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault { + return &ColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics all memtables live data size get default response +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_by_name_get_parameters.go new file mode 100644 index 00000000000..b99284072ac --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams creates a new ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams() *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParamsWithContext creates a new ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics all memtables off heap size by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics all memtables off heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics all memtables off heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics all memtables off heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics all memtables off heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics all memtables off heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics all memtables off heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics all memtables off heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) WithName(name string) *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics all memtables off heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_by_name_get_responses.go new file mode 100644 index 00000000000..3148e953b59 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetReader is a Reader for the ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGet structure. +type ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK creates a ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK with default headers values +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK() *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK { + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK{} +} + +/* +ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault creates a ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault with default headers values +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault(code int) *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault { + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics all memtables off heap size by name get default response +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_get_parameters.go new file mode 100644 index 00000000000..aad49a13af2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams creates a new ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams() *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetParamsWithTimeout creates a new ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetParamsWithContext creates a new ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams contains all the parameters to send to the API endpoint +for the column family metrics all memtables off heap size get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics all memtables off heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics all memtables off heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics all memtables off heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics all memtables off heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics all memtables off heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics all memtables off heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_get_responses.go new file mode 100644 index 00000000000..34085de4b95 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_off_heap_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsAllMemtablesOffHeapSizeGetReader is a Reader for the ColumnFamilyMetricsAllMemtablesOffHeapSizeGet structure. +type ColumnFamilyMetricsAllMemtablesOffHeapSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK creates a ColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK with default headers values +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK() *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK { + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK{} +} + +/* +ColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault creates a ColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault with default headers values +func NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault(code int) *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault { + return &ColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics all memtables off heap size get default response +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_by_name_get_parameters.go new file mode 100644 index 00000000000..595c266efb2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams creates a new ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams() *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParamsWithContext creates a new ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics all memtables on heap size by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics all memtables on heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics all memtables on heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics all memtables on heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics all memtables on heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics all memtables on heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics all memtables on heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics all memtables on heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) WithName(name string) *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics all memtables on heap size by name get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_by_name_get_responses.go new file mode 100644 index 00000000000..2fae364fd1f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetReader is a Reader for the ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGet structure. +type ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK creates a ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK with default headers values +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK() *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK { + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK{} +} + +/* +ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault creates a ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault with default headers values +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault(code int) *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault { + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics all memtables on heap size by name get default response +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_get_parameters.go new file mode 100644 index 00000000000..91ede151c6d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams creates a new ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams() *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetParamsWithTimeout creates a new ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetParamsWithContext creates a new ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams { + + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams contains all the parameters to send to the API endpoint +for the column family metrics all memtables on heap size get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics all memtables on heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics all memtables on heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics all memtables on heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics all memtables on heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics all memtables on heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics all memtables on heap size get params +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_get_responses.go new file mode 100644 index 00000000000..3f66f17852c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_all_memtables_on_heap_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsAllMemtablesOnHeapSizeGetReader is a Reader for the ColumnFamilyMetricsAllMemtablesOnHeapSizeGet structure. +type ColumnFamilyMetricsAllMemtablesOnHeapSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK creates a ColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK with default headers values +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK() *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK { + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK{} +} + +/* +ColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault creates a ColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault with default headers values +func NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault(code int) *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault { + return &ColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics all memtables on heap size get default response +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_by_name_get_parameters.go new file mode 100644 index 00000000000..7d177dea272 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams creates a new ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams() *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParamsWithContext creates a new ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics bloom filter disk space used by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics bloom filter disk space used by name get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics bloom filter disk space used by name get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics bloom filter disk space used by name get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics bloom filter disk space used by name get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics bloom filter disk space used by name get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics bloom filter disk space used by name get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics bloom filter disk space used by name get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) WithName(name string) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics bloom filter disk space used by name get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_by_name_get_responses.go new file mode 100644 index 00000000000..4cdcff2dcf4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetReader is a Reader for the ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGet structure. +type ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK creates a ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK with default headers values +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK() *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK { + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK{} +} + +/* +ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault creates a ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault with default headers values +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault(code int) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault { + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics bloom filter disk space used by name get default response +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_get_parameters.go new file mode 100644 index 00000000000..144a0f0e229 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams creates a new ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams() *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParamsWithTimeout creates a new ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParamsWithContext creates a new ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParamsWithHTTPClient creates a new ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams contains all the parameters to send to the API endpoint +for the column family metrics bloom filter disk space used get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics bloom filter disk space used get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics bloom filter disk space used get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics bloom filter disk space used get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics bloom filter disk space used get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics bloom filter disk space used get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics bloom filter disk space used get params +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_get_responses.go new file mode 100644 index 00000000000..92f9616d770 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_disk_space_used_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetReader is a Reader for the ColumnFamilyMetricsBloomFilterDiskSpaceUsedGet structure. +type ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK creates a ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK with default headers values +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK() *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK { + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK{} +} + +/* +ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault creates a ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault with default headers values +func NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault(code int) *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault { + return &ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics bloom filter disk space used get default response +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_by_name_get_parameters.go new file mode 100644 index 00000000000..2c2ba78ee74 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams creates a new ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams() *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParamsWithContext creates a new ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics bloom filter false positives by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics bloom filter false positives by name get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics bloom filter false positives by name get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics bloom filter false positives by name get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics bloom filter false positives by name get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics bloom filter false positives by name get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics bloom filter false positives by name get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics bloom filter false positives by name get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) WithName(name string) *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics bloom filter false positives by name get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_by_name_get_responses.go new file mode 100644 index 00000000000..8455a4400fc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetReader is a Reader for the ColumnFamilyMetricsBloomFilterFalsePositivesByNameGet structure. +type ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK creates a ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK with default headers values +func NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK() *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK { + return &ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK{} +} + +/* +ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault creates a ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault with default headers values +func NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault(code int) *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault { + return &ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics bloom filter false positives by name get default response +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_get_parameters.go new file mode 100644 index 00000000000..69b0f7e108f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsBloomFilterFalsePositivesGetParams creates a new ColumnFamilyMetricsBloomFilterFalsePositivesGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsBloomFilterFalsePositivesGetParams() *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams { + + return &ColumnFamilyMetricsBloomFilterFalsePositivesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsBloomFilterFalsePositivesGetParamsWithTimeout creates a new ColumnFamilyMetricsBloomFilterFalsePositivesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsBloomFilterFalsePositivesGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams { + + return &ColumnFamilyMetricsBloomFilterFalsePositivesGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsBloomFilterFalsePositivesGetParamsWithContext creates a new ColumnFamilyMetricsBloomFilterFalsePositivesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsBloomFilterFalsePositivesGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams { + + return &ColumnFamilyMetricsBloomFilterFalsePositivesGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsBloomFilterFalsePositivesGetParamsWithHTTPClient creates a new ColumnFamilyMetricsBloomFilterFalsePositivesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsBloomFilterFalsePositivesGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams { + + return &ColumnFamilyMetricsBloomFilterFalsePositivesGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsBloomFilterFalsePositivesGetParams contains all the parameters to send to the API endpoint +for the column family metrics bloom filter false positives get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsBloomFilterFalsePositivesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics bloom filter false positives get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics bloom filter false positives get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics bloom filter false positives get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics bloom filter false positives get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics bloom filter false positives get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics bloom filter false positives get params +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_get_responses.go new file mode 100644 index 00000000000..5b7b3fa4d7e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_positives_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsBloomFilterFalsePositivesGetReader is a Reader for the ColumnFamilyMetricsBloomFilterFalsePositivesGet structure. +type ColumnFamilyMetricsBloomFilterFalsePositivesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsBloomFilterFalsePositivesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsBloomFilterFalsePositivesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsBloomFilterFalsePositivesGetOK creates a ColumnFamilyMetricsBloomFilterFalsePositivesGetOK with default headers values +func NewColumnFamilyMetricsBloomFilterFalsePositivesGetOK() *ColumnFamilyMetricsBloomFilterFalsePositivesGetOK { + return &ColumnFamilyMetricsBloomFilterFalsePositivesGetOK{} +} + +/* +ColumnFamilyMetricsBloomFilterFalsePositivesGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsBloomFilterFalsePositivesGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsBloomFilterFalsePositivesGetDefault creates a ColumnFamilyMetricsBloomFilterFalsePositivesGetDefault with default headers values +func NewColumnFamilyMetricsBloomFilterFalsePositivesGetDefault(code int) *ColumnFamilyMetricsBloomFilterFalsePositivesGetDefault { + return &ColumnFamilyMetricsBloomFilterFalsePositivesGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsBloomFilterFalsePositivesGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsBloomFilterFalsePositivesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics bloom filter false positives get default response +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsBloomFilterFalsePositivesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_by_name_get_parameters.go new file mode 100644 index 00000000000..3a8e76dfb1c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams creates a new ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams() *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetParamsWithContext creates a new ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics bloom filter false ratio by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) WithName(name string) *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_by_name_get_responses.go new file mode 100644 index 00000000000..1adb05de9be --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsBloomFilterFalseRatioByNameGetReader is a Reader for the ColumnFamilyMetricsBloomFilterFalseRatioByNameGet structure. +type ColumnFamilyMetricsBloomFilterFalseRatioByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK creates a ColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK with default headers values +func NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK() *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK { + return &ColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK{} +} + +/* +ColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault creates a ColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault with default headers values +func NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault(code int) *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault { + return &ColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics bloom filter false ratio by name get default response +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_get_parameters.go new file mode 100644 index 00000000000..a7b731cca45 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsBloomFilterFalseRatioGetParams creates a new ColumnFamilyMetricsBloomFilterFalseRatioGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsBloomFilterFalseRatioGetParams() *ColumnFamilyMetricsBloomFilterFalseRatioGetParams { + + return &ColumnFamilyMetricsBloomFilterFalseRatioGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsBloomFilterFalseRatioGetParamsWithTimeout creates a new ColumnFamilyMetricsBloomFilterFalseRatioGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsBloomFilterFalseRatioGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterFalseRatioGetParams { + + return &ColumnFamilyMetricsBloomFilterFalseRatioGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsBloomFilterFalseRatioGetParamsWithContext creates a new ColumnFamilyMetricsBloomFilterFalseRatioGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsBloomFilterFalseRatioGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterFalseRatioGetParams { + + return &ColumnFamilyMetricsBloomFilterFalseRatioGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsBloomFilterFalseRatioGetParamsWithHTTPClient creates a new ColumnFamilyMetricsBloomFilterFalseRatioGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsBloomFilterFalseRatioGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterFalseRatioGetParams { + + return &ColumnFamilyMetricsBloomFilterFalseRatioGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsBloomFilterFalseRatioGetParams contains all the parameters to send to the API endpoint +for the column family metrics bloom filter false ratio get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsBloomFilterFalseRatioGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics bloom filter false ratio get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterFalseRatioGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics bloom filter false ratio get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics bloom filter false ratio get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterFalseRatioGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics bloom filter false ratio get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics bloom filter false ratio get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterFalseRatioGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics bloom filter false ratio get params +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_get_responses.go new file mode 100644 index 00000000000..3e37e426e10 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_false_ratio_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsBloomFilterFalseRatioGetReader is a Reader for the ColumnFamilyMetricsBloomFilterFalseRatioGet structure. +type ColumnFamilyMetricsBloomFilterFalseRatioGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsBloomFilterFalseRatioGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsBloomFilterFalseRatioGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsBloomFilterFalseRatioGetOK creates a ColumnFamilyMetricsBloomFilterFalseRatioGetOK with default headers values +func NewColumnFamilyMetricsBloomFilterFalseRatioGetOK() *ColumnFamilyMetricsBloomFilterFalseRatioGetOK { + return &ColumnFamilyMetricsBloomFilterFalseRatioGetOK{} +} + +/* +ColumnFamilyMetricsBloomFilterFalseRatioGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsBloomFilterFalseRatioGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsBloomFilterFalseRatioGetDefault creates a ColumnFamilyMetricsBloomFilterFalseRatioGetDefault with default headers values +func NewColumnFamilyMetricsBloomFilterFalseRatioGetDefault(code int) *ColumnFamilyMetricsBloomFilterFalseRatioGetDefault { + return &ColumnFamilyMetricsBloomFilterFalseRatioGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsBloomFilterFalseRatioGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsBloomFilterFalseRatioGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics bloom filter false ratio get default response +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsBloomFilterFalseRatioGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_by_name_get_parameters.go new file mode 100644 index 00000000000..fce05146f6f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams creates a new ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams() *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParamsWithContext creates a new ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics bloom filter off heap memory used by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics bloom filter off heap memory used by name get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics bloom filter off heap memory used by name get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics bloom filter off heap memory used by name get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics bloom filter off heap memory used by name get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics bloom filter off heap memory used by name get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics bloom filter off heap memory used by name get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics bloom filter off heap memory used by name get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) WithName(name string) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics bloom filter off heap memory used by name get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_by_name_get_responses.go new file mode 100644 index 00000000000..42c63d73f06 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetReader is a Reader for the ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGet structure. +type ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK creates a ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK with default headers values +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK() *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK { + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK{} +} + +/* +ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault creates a ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault with default headers values +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault(code int) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault { + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics bloom filter off heap memory used by name get default response +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_get_parameters.go new file mode 100644 index 00000000000..3e8443ab8d8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams creates a new ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams() *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParamsWithTimeout creates a new ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParamsWithContext creates a new ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParamsWithHTTPClient creates a new ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams contains all the parameters to send to the API endpoint +for the column family metrics bloom filter off heap memory used get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics bloom filter off heap memory used get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics bloom filter off heap memory used get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics bloom filter off heap memory used get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics bloom filter off heap memory used get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics bloom filter off heap memory used get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics bloom filter off heap memory used get params +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_get_responses.go new file mode 100644 index 00000000000..b3d499cdc82 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_bloom_filter_off_heap_memory_used_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetReader is a Reader for the ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGet structure. +type ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK creates a ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK with default headers values +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK() *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK { + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK{} +} + +/* +ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault creates a ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault with default headers values +func NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault(code int) *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault { + return &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics bloom filter off heap memory used get default response +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_by_name_get_parameters.go new file mode 100644 index 00000000000..ff326b15e35 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCasCommitByNameGetParams creates a new ColumnFamilyMetricsCasCommitByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCasCommitByNameGetParams() *ColumnFamilyMetricsCasCommitByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCasCommitByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCasCommitByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCasCommitByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasCommitByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCasCommitByNameGetParamsWithContext creates a new ColumnFamilyMetricsCasCommitByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCasCommitByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCasCommitByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCasCommitByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCasCommitByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCasCommitByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasCommitByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCasCommitByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics cas commit by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCasCommitByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics cas commit by name get params +func (o *ColumnFamilyMetricsCasCommitByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasCommitByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics cas commit by name get params +func (o *ColumnFamilyMetricsCasCommitByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics cas commit by name get params +func (o *ColumnFamilyMetricsCasCommitByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCasCommitByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics cas commit by name get params +func (o *ColumnFamilyMetricsCasCommitByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics cas commit by name get params +func (o *ColumnFamilyMetricsCasCommitByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasCommitByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics cas commit by name get params +func (o *ColumnFamilyMetricsCasCommitByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics cas commit by name get params +func (o *ColumnFamilyMetricsCasCommitByNameGetParams) WithName(name string) *ColumnFamilyMetricsCasCommitByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics cas commit by name get params +func (o *ColumnFamilyMetricsCasCommitByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCasCommitByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_by_name_get_responses.go new file mode 100644 index 00000000000..06eb7c359ed --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCasCommitByNameGetReader is a Reader for the ColumnFamilyMetricsCasCommitByNameGet structure. +type ColumnFamilyMetricsCasCommitByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCasCommitByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCasCommitByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCasCommitByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCasCommitByNameGetOK creates a ColumnFamilyMetricsCasCommitByNameGetOK with default headers values +func NewColumnFamilyMetricsCasCommitByNameGetOK() *ColumnFamilyMetricsCasCommitByNameGetOK { + return &ColumnFamilyMetricsCasCommitByNameGetOK{} +} + +/* +ColumnFamilyMetricsCasCommitByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCasCommitByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsCasCommitByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasCommitByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsCasCommitByNameGetDefault creates a ColumnFamilyMetricsCasCommitByNameGetDefault with default headers values +func NewColumnFamilyMetricsCasCommitByNameGetDefault(code int) *ColumnFamilyMetricsCasCommitByNameGetDefault { + return &ColumnFamilyMetricsCasCommitByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCasCommitByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCasCommitByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics cas commit by name get default response +func (o *ColumnFamilyMetricsCasCommitByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCasCommitByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasCommitByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCasCommitByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..36477470d2a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams creates a new ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams() *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics cas commit estimated histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics cas commit estimated histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics cas commit estimated histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics cas commit estimated histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics cas commit estimated histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics cas commit estimated histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics cas commit estimated histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics cas commit estimated histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics cas commit estimated histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..4642ec00772 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGet structure. +type ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK creates a ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK() *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK { + return &ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault creates a ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault(code int) *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault { + return &ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics cas commit estimated histogram by name get default response +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_recent_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_recent_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..254da6bb058 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_recent_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams creates a new ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams() *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics cas commit estimated recent histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics cas commit estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics cas commit estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics cas commit estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics cas commit estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics cas commit estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics cas commit estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics cas commit estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics cas commit estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_recent_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_recent_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..cba510998c1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_commit_estimated_recent_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGet structure. +type ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK creates a ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK() *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK { + return &ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault creates a ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault(code int) *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault { + return &ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics cas commit estimated recent histogram by name get default response +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_by_name_get_parameters.go new file mode 100644 index 00000000000..5368d6bbcd8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCasPrepareByNameGetParams creates a new ColumnFamilyMetricsCasPrepareByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCasPrepareByNameGetParams() *ColumnFamilyMetricsCasPrepareByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCasPrepareByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCasPrepareByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCasPrepareByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasPrepareByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCasPrepareByNameGetParamsWithContext creates a new ColumnFamilyMetricsCasPrepareByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCasPrepareByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCasPrepareByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCasPrepareByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCasPrepareByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCasPrepareByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasPrepareByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCasPrepareByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics cas prepare by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCasPrepareByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics cas prepare by name get params +func (o *ColumnFamilyMetricsCasPrepareByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasPrepareByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics cas prepare by name get params +func (o *ColumnFamilyMetricsCasPrepareByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics cas prepare by name get params +func (o *ColumnFamilyMetricsCasPrepareByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCasPrepareByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics cas prepare by name get params +func (o *ColumnFamilyMetricsCasPrepareByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics cas prepare by name get params +func (o *ColumnFamilyMetricsCasPrepareByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasPrepareByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics cas prepare by name get params +func (o *ColumnFamilyMetricsCasPrepareByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics cas prepare by name get params +func (o *ColumnFamilyMetricsCasPrepareByNameGetParams) WithName(name string) *ColumnFamilyMetricsCasPrepareByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics cas prepare by name get params +func (o *ColumnFamilyMetricsCasPrepareByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCasPrepareByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_by_name_get_responses.go new file mode 100644 index 00000000000..e800ec2b2d7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCasPrepareByNameGetReader is a Reader for the ColumnFamilyMetricsCasPrepareByNameGet structure. +type ColumnFamilyMetricsCasPrepareByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCasPrepareByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCasPrepareByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCasPrepareByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCasPrepareByNameGetOK creates a ColumnFamilyMetricsCasPrepareByNameGetOK with default headers values +func NewColumnFamilyMetricsCasPrepareByNameGetOK() *ColumnFamilyMetricsCasPrepareByNameGetOK { + return &ColumnFamilyMetricsCasPrepareByNameGetOK{} +} + +/* +ColumnFamilyMetricsCasPrepareByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCasPrepareByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsCasPrepareByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasPrepareByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsCasPrepareByNameGetDefault creates a ColumnFamilyMetricsCasPrepareByNameGetDefault with default headers values +func NewColumnFamilyMetricsCasPrepareByNameGetDefault(code int) *ColumnFamilyMetricsCasPrepareByNameGetDefault { + return &ColumnFamilyMetricsCasPrepareByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCasPrepareByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCasPrepareByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics cas prepare by name get default response +func (o *ColumnFamilyMetricsCasPrepareByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCasPrepareByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasPrepareByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCasPrepareByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..e380a38c9db --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams creates a new ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams() *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics cas prepare estimated histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics cas prepare estimated histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics cas prepare estimated histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics cas prepare estimated histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics cas prepare estimated histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics cas prepare estimated histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics cas prepare estimated histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics cas prepare estimated histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics cas prepare estimated histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..d2b5723777a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGet structure. +type ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK creates a ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK() *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK { + return &ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault creates a ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault(code int) *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault { + return &ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics cas prepare estimated histogram by name get default response +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_recent_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_recent_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..92c21842bd9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_recent_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams creates a new ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams() *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics cas prepare estimated recent histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics cas prepare estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics cas prepare estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics cas prepare estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics cas prepare estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics cas prepare estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics cas prepare estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics cas prepare estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics cas prepare estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_recent_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_recent_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..4dadbc4a888 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_prepare_estimated_recent_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGet structure. +type ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK creates a ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK() *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK { + return &ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault creates a ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault(code int) *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault { + return &ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics cas prepare estimated recent histogram by name get default response +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_by_name_get_parameters.go new file mode 100644 index 00000000000..43089a188e2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCasProposeByNameGetParams creates a new ColumnFamilyMetricsCasProposeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCasProposeByNameGetParams() *ColumnFamilyMetricsCasProposeByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCasProposeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCasProposeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCasProposeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasProposeByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCasProposeByNameGetParamsWithContext creates a new ColumnFamilyMetricsCasProposeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCasProposeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCasProposeByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCasProposeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCasProposeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCasProposeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasProposeByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCasProposeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics cas propose by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCasProposeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics cas propose by name get params +func (o *ColumnFamilyMetricsCasProposeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasProposeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics cas propose by name get params +func (o *ColumnFamilyMetricsCasProposeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics cas propose by name get params +func (o *ColumnFamilyMetricsCasProposeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCasProposeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics cas propose by name get params +func (o *ColumnFamilyMetricsCasProposeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics cas propose by name get params +func (o *ColumnFamilyMetricsCasProposeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasProposeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics cas propose by name get params +func (o *ColumnFamilyMetricsCasProposeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics cas propose by name get params +func (o *ColumnFamilyMetricsCasProposeByNameGetParams) WithName(name string) *ColumnFamilyMetricsCasProposeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics cas propose by name get params +func (o *ColumnFamilyMetricsCasProposeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCasProposeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_by_name_get_responses.go new file mode 100644 index 00000000000..2bda59f9533 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCasProposeByNameGetReader is a Reader for the ColumnFamilyMetricsCasProposeByNameGet structure. +type ColumnFamilyMetricsCasProposeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCasProposeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCasProposeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCasProposeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCasProposeByNameGetOK creates a ColumnFamilyMetricsCasProposeByNameGetOK with default headers values +func NewColumnFamilyMetricsCasProposeByNameGetOK() *ColumnFamilyMetricsCasProposeByNameGetOK { + return &ColumnFamilyMetricsCasProposeByNameGetOK{} +} + +/* +ColumnFamilyMetricsCasProposeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCasProposeByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsCasProposeByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasProposeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsCasProposeByNameGetDefault creates a ColumnFamilyMetricsCasProposeByNameGetDefault with default headers values +func NewColumnFamilyMetricsCasProposeByNameGetDefault(code int) *ColumnFamilyMetricsCasProposeByNameGetDefault { + return &ColumnFamilyMetricsCasProposeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCasProposeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCasProposeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics cas propose by name get default response +func (o *ColumnFamilyMetricsCasProposeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCasProposeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasProposeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCasProposeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..209ff544d5b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams creates a new ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams() *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics cas propose estimated histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics cas propose estimated histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics cas propose estimated histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics cas propose estimated histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics cas propose estimated histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics cas propose estimated histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics cas propose estimated histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics cas propose estimated histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics cas propose estimated histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..6bb389c0274 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGet structure. +type ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK creates a ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK() *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK { + return &ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault creates a ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault(code int) *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault { + return &ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics cas propose estimated histogram by name get default response +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_recent_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_recent_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..597c8633057 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_recent_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams creates a new ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams() *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics cas propose estimated recent histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics cas propose estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics cas propose estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics cas propose estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics cas propose estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics cas propose estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics cas propose estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics cas propose estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics cas propose estimated recent histogram by name get params +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_recent_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_recent_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..bb34782a1b5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_cas_propose_estimated_recent_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGet structure. +type ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK creates a ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK() *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK { + return &ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault creates a ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault(code int) *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault { + return &ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics cas propose estimated recent histogram by name get default response +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_col_update_time_delta_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_col_update_time_delta_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..6fa3679f38f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_col_update_time_delta_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams creates a new ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams() *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics col update time delta histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics col update time delta histogram by name get params +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics col update time delta histogram by name get params +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics col update time delta histogram by name get params +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics col update time delta histogram by name get params +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics col update time delta histogram by name get params +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics col update time delta histogram by name get params +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics col update time delta histogram by name get params +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics col update time delta histogram by name get params +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_col_update_time_delta_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_col_update_time_delta_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..5a838461a98 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_col_update_time_delta_histogram_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGet structure. +type ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK creates a ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK() *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK { + return &ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault creates a ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault(code int) *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault { + return &ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics col update time delta histogram by name get default response +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_by_name_get_parameters.go new file mode 100644 index 00000000000..1ba6953c2c8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams creates a new ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams() *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParamsWithContext creates a new ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics compression metadata off heap memory used by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics compression metadata off heap memory used by name get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics compression metadata off heap memory used by name get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics compression metadata off heap memory used by name get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics compression metadata off heap memory used by name get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics compression metadata off heap memory used by name get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics compression metadata off heap memory used by name get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics compression metadata off heap memory used by name get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) WithName(name string) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics compression metadata off heap memory used by name get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_by_name_get_responses.go new file mode 100644 index 00000000000..79bf1a5df4f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetReader is a Reader for the ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGet structure. +type ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK creates a ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK with default headers values +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK() *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK { + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK{} +} + +/* +ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault creates a ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault with default headers values +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault(code int) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault { + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics compression metadata off heap memory used by name get default response +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_get_parameters.go new file mode 100644 index 00000000000..9884751cc43 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams creates a new ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams() *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParamsWithTimeout creates a new ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParamsWithContext creates a new ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams contains all the parameters to send to the API endpoint +for the column family metrics compression metadata off heap memory used get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics compression metadata off heap memory used get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics compression metadata off heap memory used get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics compression metadata off heap memory used get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics compression metadata off heap memory used get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics compression metadata off heap memory used get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics compression metadata off heap memory used get params +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_get_responses.go new file mode 100644 index 00000000000..46919eb9777 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_metadata_off_heap_memory_used_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetReader is a Reader for the ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGet structure. +type ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK creates a ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK with default headers values +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK() *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK { + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK{} +} + +/* +ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault creates a ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault with default headers values +func NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault(code int) *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault { + return &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics compression metadata off heap memory used get default response +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_by_name_get_parameters.go new file mode 100644 index 00000000000..3fb119bfac4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCompressionRatioByNameGetParams creates a new ColumnFamilyMetricsCompressionRatioByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCompressionRatioByNameGetParams() *ColumnFamilyMetricsCompressionRatioByNameGetParams { + var () + return &ColumnFamilyMetricsCompressionRatioByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCompressionRatioByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsCompressionRatioByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCompressionRatioByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCompressionRatioByNameGetParams { + var () + return &ColumnFamilyMetricsCompressionRatioByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCompressionRatioByNameGetParamsWithContext creates a new ColumnFamilyMetricsCompressionRatioByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCompressionRatioByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCompressionRatioByNameGetParams { + var () + return &ColumnFamilyMetricsCompressionRatioByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCompressionRatioByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCompressionRatioByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCompressionRatioByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCompressionRatioByNameGetParams { + var () + return &ColumnFamilyMetricsCompressionRatioByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCompressionRatioByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics compression ratio by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCompressionRatioByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics compression ratio by name get params +func (o *ColumnFamilyMetricsCompressionRatioByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCompressionRatioByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics compression ratio by name get params +func (o *ColumnFamilyMetricsCompressionRatioByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics compression ratio by name get params +func (o *ColumnFamilyMetricsCompressionRatioByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCompressionRatioByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics compression ratio by name get params +func (o *ColumnFamilyMetricsCompressionRatioByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics compression ratio by name get params +func (o *ColumnFamilyMetricsCompressionRatioByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCompressionRatioByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics compression ratio by name get params +func (o *ColumnFamilyMetricsCompressionRatioByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics compression ratio by name get params +func (o *ColumnFamilyMetricsCompressionRatioByNameGetParams) WithName(name string) *ColumnFamilyMetricsCompressionRatioByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics compression ratio by name get params +func (o *ColumnFamilyMetricsCompressionRatioByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCompressionRatioByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_by_name_get_responses.go new file mode 100644 index 00000000000..01fdb785af5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCompressionRatioByNameGetReader is a Reader for the ColumnFamilyMetricsCompressionRatioByNameGet structure. +type ColumnFamilyMetricsCompressionRatioByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCompressionRatioByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCompressionRatioByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCompressionRatioByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCompressionRatioByNameGetOK creates a ColumnFamilyMetricsCompressionRatioByNameGetOK with default headers values +func NewColumnFamilyMetricsCompressionRatioByNameGetOK() *ColumnFamilyMetricsCompressionRatioByNameGetOK { + return &ColumnFamilyMetricsCompressionRatioByNameGetOK{} +} + +/* +ColumnFamilyMetricsCompressionRatioByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCompressionRatioByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsCompressionRatioByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsCompressionRatioByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsCompressionRatioByNameGetDefault creates a ColumnFamilyMetricsCompressionRatioByNameGetDefault with default headers values +func NewColumnFamilyMetricsCompressionRatioByNameGetDefault(code int) *ColumnFamilyMetricsCompressionRatioByNameGetDefault { + return &ColumnFamilyMetricsCompressionRatioByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCompressionRatioByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCompressionRatioByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics compression ratio by name get default response +func (o *ColumnFamilyMetricsCompressionRatioByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCompressionRatioByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCompressionRatioByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCompressionRatioByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_get_parameters.go new file mode 100644 index 00000000000..71d795d4709 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCompressionRatioGetParams creates a new ColumnFamilyMetricsCompressionRatioGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCompressionRatioGetParams() *ColumnFamilyMetricsCompressionRatioGetParams { + + return &ColumnFamilyMetricsCompressionRatioGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCompressionRatioGetParamsWithTimeout creates a new ColumnFamilyMetricsCompressionRatioGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCompressionRatioGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCompressionRatioGetParams { + + return &ColumnFamilyMetricsCompressionRatioGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCompressionRatioGetParamsWithContext creates a new ColumnFamilyMetricsCompressionRatioGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCompressionRatioGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCompressionRatioGetParams { + + return &ColumnFamilyMetricsCompressionRatioGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCompressionRatioGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCompressionRatioGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCompressionRatioGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCompressionRatioGetParams { + + return &ColumnFamilyMetricsCompressionRatioGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCompressionRatioGetParams contains all the parameters to send to the API endpoint +for the column family metrics compression ratio get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCompressionRatioGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics compression ratio get params +func (o *ColumnFamilyMetricsCompressionRatioGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCompressionRatioGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics compression ratio get params +func (o *ColumnFamilyMetricsCompressionRatioGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics compression ratio get params +func (o *ColumnFamilyMetricsCompressionRatioGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCompressionRatioGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics compression ratio get params +func (o *ColumnFamilyMetricsCompressionRatioGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics compression ratio get params +func (o *ColumnFamilyMetricsCompressionRatioGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCompressionRatioGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics compression ratio get params +func (o *ColumnFamilyMetricsCompressionRatioGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCompressionRatioGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_get_responses.go new file mode 100644 index 00000000000..0e5ad187fa6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_compression_ratio_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCompressionRatioGetReader is a Reader for the ColumnFamilyMetricsCompressionRatioGet structure. +type ColumnFamilyMetricsCompressionRatioGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCompressionRatioGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCompressionRatioGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCompressionRatioGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCompressionRatioGetOK creates a ColumnFamilyMetricsCompressionRatioGetOK with default headers values +func NewColumnFamilyMetricsCompressionRatioGetOK() *ColumnFamilyMetricsCompressionRatioGetOK { + return &ColumnFamilyMetricsCompressionRatioGetOK{} +} + +/* +ColumnFamilyMetricsCompressionRatioGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCompressionRatioGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsCompressionRatioGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsCompressionRatioGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsCompressionRatioGetDefault creates a ColumnFamilyMetricsCompressionRatioGetDefault with default headers values +func NewColumnFamilyMetricsCompressionRatioGetDefault(code int) *ColumnFamilyMetricsCompressionRatioGetDefault { + return &ColumnFamilyMetricsCompressionRatioGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCompressionRatioGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCompressionRatioGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics compression ratio get default response +func (o *ColumnFamilyMetricsCompressionRatioGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCompressionRatioGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCompressionRatioGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCompressionRatioGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_read_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_read_get_parameters.go new file mode 100644 index 00000000000..86cd0af3351 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_read_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCoordinatorReadGetParams creates a new ColumnFamilyMetricsCoordinatorReadGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCoordinatorReadGetParams() *ColumnFamilyMetricsCoordinatorReadGetParams { + + return &ColumnFamilyMetricsCoordinatorReadGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCoordinatorReadGetParamsWithTimeout creates a new ColumnFamilyMetricsCoordinatorReadGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCoordinatorReadGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCoordinatorReadGetParams { + + return &ColumnFamilyMetricsCoordinatorReadGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCoordinatorReadGetParamsWithContext creates a new ColumnFamilyMetricsCoordinatorReadGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCoordinatorReadGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCoordinatorReadGetParams { + + return &ColumnFamilyMetricsCoordinatorReadGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCoordinatorReadGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCoordinatorReadGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCoordinatorReadGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCoordinatorReadGetParams { + + return &ColumnFamilyMetricsCoordinatorReadGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCoordinatorReadGetParams contains all the parameters to send to the API endpoint +for the column family metrics coordinator read get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCoordinatorReadGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics coordinator read get params +func (o *ColumnFamilyMetricsCoordinatorReadGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCoordinatorReadGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics coordinator read get params +func (o *ColumnFamilyMetricsCoordinatorReadGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics coordinator read get params +func (o *ColumnFamilyMetricsCoordinatorReadGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCoordinatorReadGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics coordinator read get params +func (o *ColumnFamilyMetricsCoordinatorReadGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics coordinator read get params +func (o *ColumnFamilyMetricsCoordinatorReadGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCoordinatorReadGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics coordinator read get params +func (o *ColumnFamilyMetricsCoordinatorReadGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCoordinatorReadGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_read_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_read_get_responses.go new file mode 100644 index 00000000000..58e4938fae3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_read_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCoordinatorReadGetReader is a Reader for the ColumnFamilyMetricsCoordinatorReadGet structure. +type ColumnFamilyMetricsCoordinatorReadGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCoordinatorReadGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCoordinatorReadGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCoordinatorReadGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCoordinatorReadGetOK creates a ColumnFamilyMetricsCoordinatorReadGetOK with default headers values +func NewColumnFamilyMetricsCoordinatorReadGetOK() *ColumnFamilyMetricsCoordinatorReadGetOK { + return &ColumnFamilyMetricsCoordinatorReadGetOK{} +} + +/* +ColumnFamilyMetricsCoordinatorReadGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCoordinatorReadGetOK struct { +} + +func (o *ColumnFamilyMetricsCoordinatorReadGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsCoordinatorReadGetDefault creates a ColumnFamilyMetricsCoordinatorReadGetDefault with default headers values +func NewColumnFamilyMetricsCoordinatorReadGetDefault(code int) *ColumnFamilyMetricsCoordinatorReadGetDefault { + return &ColumnFamilyMetricsCoordinatorReadGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCoordinatorReadGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCoordinatorReadGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics coordinator read get default response +func (o *ColumnFamilyMetricsCoordinatorReadGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCoordinatorReadGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCoordinatorReadGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCoordinatorReadGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_scan_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_scan_get_parameters.go new file mode 100644 index 00000000000..0bc8e9ffa9a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_scan_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsCoordinatorScanGetParams creates a new ColumnFamilyMetricsCoordinatorScanGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsCoordinatorScanGetParams() *ColumnFamilyMetricsCoordinatorScanGetParams { + + return &ColumnFamilyMetricsCoordinatorScanGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsCoordinatorScanGetParamsWithTimeout creates a new ColumnFamilyMetricsCoordinatorScanGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsCoordinatorScanGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsCoordinatorScanGetParams { + + return &ColumnFamilyMetricsCoordinatorScanGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsCoordinatorScanGetParamsWithContext creates a new ColumnFamilyMetricsCoordinatorScanGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsCoordinatorScanGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsCoordinatorScanGetParams { + + return &ColumnFamilyMetricsCoordinatorScanGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsCoordinatorScanGetParamsWithHTTPClient creates a new ColumnFamilyMetricsCoordinatorScanGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsCoordinatorScanGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsCoordinatorScanGetParams { + + return &ColumnFamilyMetricsCoordinatorScanGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsCoordinatorScanGetParams contains all the parameters to send to the API endpoint +for the column family metrics coordinator scan get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsCoordinatorScanGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics coordinator scan get params +func (o *ColumnFamilyMetricsCoordinatorScanGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsCoordinatorScanGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics coordinator scan get params +func (o *ColumnFamilyMetricsCoordinatorScanGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics coordinator scan get params +func (o *ColumnFamilyMetricsCoordinatorScanGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsCoordinatorScanGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics coordinator scan get params +func (o *ColumnFamilyMetricsCoordinatorScanGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics coordinator scan get params +func (o *ColumnFamilyMetricsCoordinatorScanGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsCoordinatorScanGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics coordinator scan get params +func (o *ColumnFamilyMetricsCoordinatorScanGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsCoordinatorScanGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_scan_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_scan_get_responses.go new file mode 100644 index 00000000000..07364e88681 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_coordinator_scan_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsCoordinatorScanGetReader is a Reader for the ColumnFamilyMetricsCoordinatorScanGet structure. +type ColumnFamilyMetricsCoordinatorScanGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsCoordinatorScanGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsCoordinatorScanGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsCoordinatorScanGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsCoordinatorScanGetOK creates a ColumnFamilyMetricsCoordinatorScanGetOK with default headers values +func NewColumnFamilyMetricsCoordinatorScanGetOK() *ColumnFamilyMetricsCoordinatorScanGetOK { + return &ColumnFamilyMetricsCoordinatorScanGetOK{} +} + +/* +ColumnFamilyMetricsCoordinatorScanGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsCoordinatorScanGetOK struct { +} + +func (o *ColumnFamilyMetricsCoordinatorScanGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsCoordinatorScanGetDefault creates a ColumnFamilyMetricsCoordinatorScanGetDefault with default headers values +func NewColumnFamilyMetricsCoordinatorScanGetDefault(code int) *ColumnFamilyMetricsCoordinatorScanGetDefault { + return &ColumnFamilyMetricsCoordinatorScanGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsCoordinatorScanGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsCoordinatorScanGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics coordinator scan get default response +func (o *ColumnFamilyMetricsCoordinatorScanGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsCoordinatorScanGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsCoordinatorScanGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsCoordinatorScanGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_column_count_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_column_count_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..cfcd146ce05 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_column_count_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams creates a new ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams() *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics estimated column count histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics estimated column count histogram by name get params +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics estimated column count histogram by name get params +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics estimated column count histogram by name get params +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics estimated column count histogram by name get params +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics estimated column count histogram by name get params +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics estimated column count histogram by name get params +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics estimated column count histogram by name get params +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics estimated column count histogram by name get params +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_column_count_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_column_count_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..05c2ece2fb9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_column_count_histogram_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGet structure. +type ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK creates a ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK() *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK { + return &ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault creates a ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault(code int) *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault { + return &ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics estimated column count histogram by name get default response +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_count_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_count_by_name_get_parameters.go new file mode 100644 index 00000000000..2c11f032ce5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_count_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsEstimatedRowCountByNameGetParams creates a new ColumnFamilyMetricsEstimatedRowCountByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsEstimatedRowCountByNameGetParams() *ColumnFamilyMetricsEstimatedRowCountByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedRowCountByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsEstimatedRowCountByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsEstimatedRowCountByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsEstimatedRowCountByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsEstimatedRowCountByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedRowCountByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsEstimatedRowCountByNameGetParamsWithContext creates a new ColumnFamilyMetricsEstimatedRowCountByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsEstimatedRowCountByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsEstimatedRowCountByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedRowCountByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsEstimatedRowCountByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsEstimatedRowCountByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsEstimatedRowCountByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsEstimatedRowCountByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedRowCountByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsEstimatedRowCountByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics estimated row count by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsEstimatedRowCountByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics estimated row count by name get params +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsEstimatedRowCountByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics estimated row count by name get params +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics estimated row count by name get params +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsEstimatedRowCountByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics estimated row count by name get params +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics estimated row count by name get params +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsEstimatedRowCountByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics estimated row count by name get params +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics estimated row count by name get params +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) WithName(name string) *ColumnFamilyMetricsEstimatedRowCountByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics estimated row count by name get params +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_count_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_count_by_name_get_responses.go new file mode 100644 index 00000000000..3d05806272d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_count_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsEstimatedRowCountByNameGetReader is a Reader for the ColumnFamilyMetricsEstimatedRowCountByNameGet structure. +type ColumnFamilyMetricsEstimatedRowCountByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsEstimatedRowCountByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsEstimatedRowCountByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsEstimatedRowCountByNameGetOK creates a ColumnFamilyMetricsEstimatedRowCountByNameGetOK with default headers values +func NewColumnFamilyMetricsEstimatedRowCountByNameGetOK() *ColumnFamilyMetricsEstimatedRowCountByNameGetOK { + return &ColumnFamilyMetricsEstimatedRowCountByNameGetOK{} +} + +/* +ColumnFamilyMetricsEstimatedRowCountByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsEstimatedRowCountByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsEstimatedRowCountByNameGetDefault creates a ColumnFamilyMetricsEstimatedRowCountByNameGetDefault with default headers values +func NewColumnFamilyMetricsEstimatedRowCountByNameGetDefault(code int) *ColumnFamilyMetricsEstimatedRowCountByNameGetDefault { + return &ColumnFamilyMetricsEstimatedRowCountByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsEstimatedRowCountByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsEstimatedRowCountByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics estimated row count by name get default response +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsEstimatedRowCountByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_size_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_size_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..8205076305e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_size_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams creates a new ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams() *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics estimated row size histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics estimated row size histogram by name get params +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics estimated row size histogram by name get params +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics estimated row size histogram by name get params +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics estimated row size histogram by name get params +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics estimated row size histogram by name get params +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics estimated row size histogram by name get params +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics estimated row size histogram by name get params +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics estimated row size histogram by name get params +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_size_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_size_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..7694c1b9c54 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_estimated_row_size_histogram_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGet structure. +type ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK creates a ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK() *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK { + return &ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault creates a ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault(code int) *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault { + return &ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics estimated row size histogram by name get default response +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_by_name_get_parameters.go new file mode 100644 index 00000000000..fb62ee33cd5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams creates a new ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams() *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParamsWithContext creates a new ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams { + var () + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics index summary off heap memory used by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics index summary off heap memory used by name get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics index summary off heap memory used by name get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics index summary off heap memory used by name get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics index summary off heap memory used by name get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics index summary off heap memory used by name get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics index summary off heap memory used by name get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics index summary off heap memory used by name get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) WithName(name string) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics index summary off heap memory used by name get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_by_name_get_responses.go new file mode 100644 index 00000000000..16ac36e5942 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetReader is a Reader for the ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGet structure. +type ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK creates a ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK with default headers values +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK() *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK { + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK{} +} + +/* +ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault creates a ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault with default headers values +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault(code int) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault { + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics index summary off heap memory used by name get default response +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_get_parameters.go new file mode 100644 index 00000000000..d0ac967d42b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams creates a new ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams() *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParamsWithTimeout creates a new ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParamsWithContext creates a new ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParamsWithHTTPClient creates a new ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams { + + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams contains all the parameters to send to the API endpoint +for the column family metrics index summary off heap memory used get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics index summary off heap memory used get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics index summary off heap memory used get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics index summary off heap memory used get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics index summary off heap memory used get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics index summary off heap memory used get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics index summary off heap memory used get params +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_get_responses.go new file mode 100644 index 00000000000..536f246e9b8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_index_summary_off_heap_memory_used_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetReader is a Reader for the ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGet structure. +type ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK creates a ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK with default headers values +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK() *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK { + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK{} +} + +/* +ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault creates a ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault with default headers values +func NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault(code int) *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault { + return &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics index summary off heap memory used get default response +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_key_cache_hit_rate_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_key_cache_hit_rate_by_name_get_parameters.go new file mode 100644 index 00000000000..3b77a90cf2b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_key_cache_hit_rate_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsKeyCacheHitRateByNameGetParams creates a new ColumnFamilyMetricsKeyCacheHitRateByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsKeyCacheHitRateByNameGetParams() *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams { + var () + return &ColumnFamilyMetricsKeyCacheHitRateByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsKeyCacheHitRateByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsKeyCacheHitRateByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsKeyCacheHitRateByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams { + var () + return &ColumnFamilyMetricsKeyCacheHitRateByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsKeyCacheHitRateByNameGetParamsWithContext creates a new ColumnFamilyMetricsKeyCacheHitRateByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsKeyCacheHitRateByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams { + var () + return &ColumnFamilyMetricsKeyCacheHitRateByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsKeyCacheHitRateByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsKeyCacheHitRateByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsKeyCacheHitRateByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams { + var () + return &ColumnFamilyMetricsKeyCacheHitRateByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsKeyCacheHitRateByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics key cache hit rate by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsKeyCacheHitRateByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics key cache hit rate by name get params +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics key cache hit rate by name get params +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics key cache hit rate by name get params +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics key cache hit rate by name get params +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics key cache hit rate by name get params +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics key cache hit rate by name get params +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics key cache hit rate by name get params +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) WithName(name string) *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics key cache hit rate by name get params +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_key_cache_hit_rate_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_key_cache_hit_rate_by_name_get_responses.go new file mode 100644 index 00000000000..28b38764c7a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_key_cache_hit_rate_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsKeyCacheHitRateByNameGetReader is a Reader for the ColumnFamilyMetricsKeyCacheHitRateByNameGet structure. +type ColumnFamilyMetricsKeyCacheHitRateByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsKeyCacheHitRateByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsKeyCacheHitRateByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsKeyCacheHitRateByNameGetOK creates a ColumnFamilyMetricsKeyCacheHitRateByNameGetOK with default headers values +func NewColumnFamilyMetricsKeyCacheHitRateByNameGetOK() *ColumnFamilyMetricsKeyCacheHitRateByNameGetOK { + return &ColumnFamilyMetricsKeyCacheHitRateByNameGetOK{} +} + +/* +ColumnFamilyMetricsKeyCacheHitRateByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsKeyCacheHitRateByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsKeyCacheHitRateByNameGetDefault creates a ColumnFamilyMetricsKeyCacheHitRateByNameGetDefault with default headers values +func NewColumnFamilyMetricsKeyCacheHitRateByNameGetDefault(code int) *ColumnFamilyMetricsKeyCacheHitRateByNameGetDefault { + return &ColumnFamilyMetricsKeyCacheHitRateByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsKeyCacheHitRateByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsKeyCacheHitRateByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics key cache hit rate by name get default response +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsKeyCacheHitRateByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_by_name_get_parameters.go new file mode 100644 index 00000000000..db9f30cfec6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams creates a new ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams() *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParamsWithContext creates a new ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics live disk space used by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics live disk space used by name get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics live disk space used by name get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics live disk space used by name get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics live disk space used by name get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics live disk space used by name get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics live disk space used by name get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics live disk space used by name get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) WithName(name string) *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics live disk space used by name get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_by_name_get_responses.go new file mode 100644 index 00000000000..25c76bd8032 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetReader is a Reader for the ColumnFamilyMetricsLiveDiskSpaceUsedByNameGet structure. +type ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK creates a ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK with default headers values +func NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK() *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK { + return &ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK{} +} + +/* +ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault creates a ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault with default headers values +func NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault(code int) *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault { + return &ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics live disk space used by name get default response +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_get_parameters.go new file mode 100644 index 00000000000..199242bb11c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsLiveDiskSpaceUsedGetParams creates a new ColumnFamilyMetricsLiveDiskSpaceUsedGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsLiveDiskSpaceUsedGetParams() *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsLiveDiskSpaceUsedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsLiveDiskSpaceUsedGetParamsWithTimeout creates a new ColumnFamilyMetricsLiveDiskSpaceUsedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsLiveDiskSpaceUsedGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsLiveDiskSpaceUsedGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsLiveDiskSpaceUsedGetParamsWithContext creates a new ColumnFamilyMetricsLiveDiskSpaceUsedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsLiveDiskSpaceUsedGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsLiveDiskSpaceUsedGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsLiveDiskSpaceUsedGetParamsWithHTTPClient creates a new ColumnFamilyMetricsLiveDiskSpaceUsedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsLiveDiskSpaceUsedGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsLiveDiskSpaceUsedGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsLiveDiskSpaceUsedGetParams contains all the parameters to send to the API endpoint +for the column family metrics live disk space used get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsLiveDiskSpaceUsedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics live disk space used get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics live disk space used get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics live disk space used get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics live disk space used get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics live disk space used get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics live disk space used get params +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_get_responses.go new file mode 100644 index 00000000000..e85d8d4f3f5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_disk_space_used_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsLiveDiskSpaceUsedGetReader is a Reader for the ColumnFamilyMetricsLiveDiskSpaceUsedGet structure. +type ColumnFamilyMetricsLiveDiskSpaceUsedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsLiveDiskSpaceUsedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsLiveDiskSpaceUsedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsLiveDiskSpaceUsedGetOK creates a ColumnFamilyMetricsLiveDiskSpaceUsedGetOK with default headers values +func NewColumnFamilyMetricsLiveDiskSpaceUsedGetOK() *ColumnFamilyMetricsLiveDiskSpaceUsedGetOK { + return &ColumnFamilyMetricsLiveDiskSpaceUsedGetOK{} +} + +/* +ColumnFamilyMetricsLiveDiskSpaceUsedGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsLiveDiskSpaceUsedGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsLiveDiskSpaceUsedGetDefault creates a ColumnFamilyMetricsLiveDiskSpaceUsedGetDefault with default headers values +func NewColumnFamilyMetricsLiveDiskSpaceUsedGetDefault(code int) *ColumnFamilyMetricsLiveDiskSpaceUsedGetDefault { + return &ColumnFamilyMetricsLiveDiskSpaceUsedGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsLiveDiskSpaceUsedGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsLiveDiskSpaceUsedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics live disk space used get default response +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsLiveDiskSpaceUsedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_scanned_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_scanned_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..443b6716c52 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_scanned_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsLiveScannedHistogramByNameGetParams creates a new ColumnFamilyMetricsLiveScannedHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsLiveScannedHistogramByNameGetParams() *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsLiveScannedHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsLiveScannedHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsLiveScannedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsLiveScannedHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsLiveScannedHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsLiveScannedHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsLiveScannedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsLiveScannedHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsLiveScannedHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsLiveScannedHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsLiveScannedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsLiveScannedHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsLiveScannedHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsLiveScannedHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics live scanned histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsLiveScannedHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics live scanned histogram by name get params +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics live scanned histogram by name get params +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics live scanned histogram by name get params +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics live scanned histogram by name get params +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics live scanned histogram by name get params +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics live scanned histogram by name get params +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics live scanned histogram by name get params +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics live scanned histogram by name get params +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_scanned_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_scanned_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..f7a513fdb49 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_scanned_histogram_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsLiveScannedHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsLiveScannedHistogramByNameGet structure. +type ColumnFamilyMetricsLiveScannedHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsLiveScannedHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsLiveScannedHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsLiveScannedHistogramByNameGetOK creates a ColumnFamilyMetricsLiveScannedHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsLiveScannedHistogramByNameGetOK() *ColumnFamilyMetricsLiveScannedHistogramByNameGetOK { + return &ColumnFamilyMetricsLiveScannedHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsLiveScannedHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsLiveScannedHistogramByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsLiveScannedHistogramByNameGetDefault creates a ColumnFamilyMetricsLiveScannedHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsLiveScannedHistogramByNameGetDefault(code int) *ColumnFamilyMetricsLiveScannedHistogramByNameGetDefault { + return &ColumnFamilyMetricsLiveScannedHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsLiveScannedHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsLiveScannedHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics live scanned histogram by name get default response +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsLiveScannedHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_by_name_get_parameters.go new file mode 100644 index 00000000000..4774b8dbcbb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsLiveSsTableCountByNameGetParams creates a new ColumnFamilyMetricsLiveSsTableCountByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsLiveSsTableCountByNameGetParams() *ColumnFamilyMetricsLiveSsTableCountByNameGetParams { + var () + return &ColumnFamilyMetricsLiveSsTableCountByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsLiveSsTableCountByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsLiveSsTableCountByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsLiveSsTableCountByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsLiveSsTableCountByNameGetParams { + var () + return &ColumnFamilyMetricsLiveSsTableCountByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsLiveSsTableCountByNameGetParamsWithContext creates a new ColumnFamilyMetricsLiveSsTableCountByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsLiveSsTableCountByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsLiveSsTableCountByNameGetParams { + var () + return &ColumnFamilyMetricsLiveSsTableCountByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsLiveSsTableCountByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsLiveSsTableCountByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsLiveSsTableCountByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsLiveSsTableCountByNameGetParams { + var () + return &ColumnFamilyMetricsLiveSsTableCountByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsLiveSsTableCountByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics live ss table count by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsLiveSsTableCountByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics live ss table count by name get params +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsLiveSsTableCountByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics live ss table count by name get params +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics live ss table count by name get params +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsLiveSsTableCountByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics live ss table count by name get params +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics live ss table count by name get params +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsLiveSsTableCountByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics live ss table count by name get params +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics live ss table count by name get params +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) WithName(name string) *ColumnFamilyMetricsLiveSsTableCountByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics live ss table count by name get params +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_by_name_get_responses.go new file mode 100644 index 00000000000..dc17456df26 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsLiveSsTableCountByNameGetReader is a Reader for the ColumnFamilyMetricsLiveSsTableCountByNameGet structure. +type ColumnFamilyMetricsLiveSsTableCountByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsLiveSsTableCountByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsLiveSsTableCountByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsLiveSsTableCountByNameGetOK creates a ColumnFamilyMetricsLiveSsTableCountByNameGetOK with default headers values +func NewColumnFamilyMetricsLiveSsTableCountByNameGetOK() *ColumnFamilyMetricsLiveSsTableCountByNameGetOK { + return &ColumnFamilyMetricsLiveSsTableCountByNameGetOK{} +} + +/* +ColumnFamilyMetricsLiveSsTableCountByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsLiveSsTableCountByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsLiveSsTableCountByNameGetDefault creates a ColumnFamilyMetricsLiveSsTableCountByNameGetDefault with default headers values +func NewColumnFamilyMetricsLiveSsTableCountByNameGetDefault(code int) *ColumnFamilyMetricsLiveSsTableCountByNameGetDefault { + return &ColumnFamilyMetricsLiveSsTableCountByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsLiveSsTableCountByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsLiveSsTableCountByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics live ss table count by name get default response +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsLiveSsTableCountByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_get_parameters.go new file mode 100644 index 00000000000..7639bf3610b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsLiveSsTableCountGetParams creates a new ColumnFamilyMetricsLiveSsTableCountGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsLiveSsTableCountGetParams() *ColumnFamilyMetricsLiveSsTableCountGetParams { + + return &ColumnFamilyMetricsLiveSsTableCountGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsLiveSsTableCountGetParamsWithTimeout creates a new ColumnFamilyMetricsLiveSsTableCountGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsLiveSsTableCountGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsLiveSsTableCountGetParams { + + return &ColumnFamilyMetricsLiveSsTableCountGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsLiveSsTableCountGetParamsWithContext creates a new ColumnFamilyMetricsLiveSsTableCountGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsLiveSsTableCountGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsLiveSsTableCountGetParams { + + return &ColumnFamilyMetricsLiveSsTableCountGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsLiveSsTableCountGetParamsWithHTTPClient creates a new ColumnFamilyMetricsLiveSsTableCountGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsLiveSsTableCountGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsLiveSsTableCountGetParams { + + return &ColumnFamilyMetricsLiveSsTableCountGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsLiveSsTableCountGetParams contains all the parameters to send to the API endpoint +for the column family metrics live ss table count get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsLiveSsTableCountGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics live ss table count get params +func (o *ColumnFamilyMetricsLiveSsTableCountGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsLiveSsTableCountGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics live ss table count get params +func (o *ColumnFamilyMetricsLiveSsTableCountGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics live ss table count get params +func (o *ColumnFamilyMetricsLiveSsTableCountGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsLiveSsTableCountGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics live ss table count get params +func (o *ColumnFamilyMetricsLiveSsTableCountGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics live ss table count get params +func (o *ColumnFamilyMetricsLiveSsTableCountGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsLiveSsTableCountGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics live ss table count get params +func (o *ColumnFamilyMetricsLiveSsTableCountGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsLiveSsTableCountGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_get_responses.go new file mode 100644 index 00000000000..213d0d305e3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_live_ss_table_count_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsLiveSsTableCountGetReader is a Reader for the ColumnFamilyMetricsLiveSsTableCountGet structure. +type ColumnFamilyMetricsLiveSsTableCountGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsLiveSsTableCountGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsLiveSsTableCountGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsLiveSsTableCountGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsLiveSsTableCountGetOK creates a ColumnFamilyMetricsLiveSsTableCountGetOK with default headers values +func NewColumnFamilyMetricsLiveSsTableCountGetOK() *ColumnFamilyMetricsLiveSsTableCountGetOK { + return &ColumnFamilyMetricsLiveSsTableCountGetOK{} +} + +/* +ColumnFamilyMetricsLiveSsTableCountGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsLiveSsTableCountGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsLiveSsTableCountGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsLiveSsTableCountGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsLiveSsTableCountGetDefault creates a ColumnFamilyMetricsLiveSsTableCountGetDefault with default headers values +func NewColumnFamilyMetricsLiveSsTableCountGetDefault(code int) *ColumnFamilyMetricsLiveSsTableCountGetDefault { + return &ColumnFamilyMetricsLiveSsTableCountGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsLiveSsTableCountGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsLiveSsTableCountGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics live ss table count get default response +func (o *ColumnFamilyMetricsLiveSsTableCountGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsLiveSsTableCountGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsLiveSsTableCountGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsLiveSsTableCountGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_by_name_get_parameters.go new file mode 100644 index 00000000000..6f827aac6b8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMaxRowSizeByNameGetParams creates a new ColumnFamilyMetricsMaxRowSizeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMaxRowSizeByNameGetParams() *ColumnFamilyMetricsMaxRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMaxRowSizeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMaxRowSizeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsMaxRowSizeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMaxRowSizeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMaxRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMaxRowSizeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMaxRowSizeByNameGetParamsWithContext creates a new ColumnFamilyMetricsMaxRowSizeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMaxRowSizeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMaxRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMaxRowSizeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMaxRowSizeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMaxRowSizeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMaxRowSizeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMaxRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMaxRowSizeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMaxRowSizeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics max row size by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMaxRowSizeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics max row size by name get params +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMaxRowSizeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics max row size by name get params +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics max row size by name get params +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMaxRowSizeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics max row size by name get params +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics max row size by name get params +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMaxRowSizeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics max row size by name get params +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics max row size by name get params +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetParams) WithName(name string) *ColumnFamilyMetricsMaxRowSizeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics max row size by name get params +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_by_name_get_responses.go new file mode 100644 index 00000000000..123658dc2fe --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMaxRowSizeByNameGetReader is a Reader for the ColumnFamilyMetricsMaxRowSizeByNameGet structure. +type ColumnFamilyMetricsMaxRowSizeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMaxRowSizeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMaxRowSizeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMaxRowSizeByNameGetOK creates a ColumnFamilyMetricsMaxRowSizeByNameGetOK with default headers values +func NewColumnFamilyMetricsMaxRowSizeByNameGetOK() *ColumnFamilyMetricsMaxRowSizeByNameGetOK { + return &ColumnFamilyMetricsMaxRowSizeByNameGetOK{} +} + +/* +ColumnFamilyMetricsMaxRowSizeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMaxRowSizeByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMaxRowSizeByNameGetDefault creates a ColumnFamilyMetricsMaxRowSizeByNameGetDefault with default headers values +func NewColumnFamilyMetricsMaxRowSizeByNameGetDefault(code int) *ColumnFamilyMetricsMaxRowSizeByNameGetDefault { + return &ColumnFamilyMetricsMaxRowSizeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMaxRowSizeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMaxRowSizeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics max row size by name get default response +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMaxRowSizeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_get_parameters.go new file mode 100644 index 00000000000..b42a11ff2a0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMaxRowSizeGetParams creates a new ColumnFamilyMetricsMaxRowSizeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMaxRowSizeGetParams() *ColumnFamilyMetricsMaxRowSizeGetParams { + + return &ColumnFamilyMetricsMaxRowSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMaxRowSizeGetParamsWithTimeout creates a new ColumnFamilyMetricsMaxRowSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMaxRowSizeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMaxRowSizeGetParams { + + return &ColumnFamilyMetricsMaxRowSizeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMaxRowSizeGetParamsWithContext creates a new ColumnFamilyMetricsMaxRowSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMaxRowSizeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMaxRowSizeGetParams { + + return &ColumnFamilyMetricsMaxRowSizeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMaxRowSizeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMaxRowSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMaxRowSizeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMaxRowSizeGetParams { + + return &ColumnFamilyMetricsMaxRowSizeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMaxRowSizeGetParams contains all the parameters to send to the API endpoint +for the column family metrics max row size get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMaxRowSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics max row size get params +func (o *ColumnFamilyMetricsMaxRowSizeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMaxRowSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics max row size get params +func (o *ColumnFamilyMetricsMaxRowSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics max row size get params +func (o *ColumnFamilyMetricsMaxRowSizeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMaxRowSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics max row size get params +func (o *ColumnFamilyMetricsMaxRowSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics max row size get params +func (o *ColumnFamilyMetricsMaxRowSizeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMaxRowSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics max row size get params +func (o *ColumnFamilyMetricsMaxRowSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMaxRowSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_get_responses.go new file mode 100644 index 00000000000..cfcb196c8ef --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_max_row_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMaxRowSizeGetReader is a Reader for the ColumnFamilyMetricsMaxRowSizeGet structure. +type ColumnFamilyMetricsMaxRowSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMaxRowSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMaxRowSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMaxRowSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMaxRowSizeGetOK creates a ColumnFamilyMetricsMaxRowSizeGetOK with default headers values +func NewColumnFamilyMetricsMaxRowSizeGetOK() *ColumnFamilyMetricsMaxRowSizeGetOK { + return &ColumnFamilyMetricsMaxRowSizeGetOK{} +} + +/* +ColumnFamilyMetricsMaxRowSizeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMaxRowSizeGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMaxRowSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMaxRowSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMaxRowSizeGetDefault creates a ColumnFamilyMetricsMaxRowSizeGetDefault with default headers values +func NewColumnFamilyMetricsMaxRowSizeGetDefault(code int) *ColumnFamilyMetricsMaxRowSizeGetDefault { + return &ColumnFamilyMetricsMaxRowSizeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMaxRowSizeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMaxRowSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics max row size get default response +func (o *ColumnFamilyMetricsMaxRowSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMaxRowSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMaxRowSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMaxRowSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_by_name_get_parameters.go new file mode 100644 index 00000000000..95d032a1655 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMeanRowSizeByNameGetParams creates a new ColumnFamilyMetricsMeanRowSizeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMeanRowSizeByNameGetParams() *ColumnFamilyMetricsMeanRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMeanRowSizeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMeanRowSizeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsMeanRowSizeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMeanRowSizeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMeanRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMeanRowSizeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMeanRowSizeByNameGetParamsWithContext creates a new ColumnFamilyMetricsMeanRowSizeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMeanRowSizeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMeanRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMeanRowSizeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMeanRowSizeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMeanRowSizeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMeanRowSizeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMeanRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMeanRowSizeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMeanRowSizeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics mean row size by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMeanRowSizeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics mean row size by name get params +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMeanRowSizeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics mean row size by name get params +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics mean row size by name get params +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMeanRowSizeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics mean row size by name get params +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics mean row size by name get params +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMeanRowSizeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics mean row size by name get params +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics mean row size by name get params +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetParams) WithName(name string) *ColumnFamilyMetricsMeanRowSizeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics mean row size by name get params +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_by_name_get_responses.go new file mode 100644 index 00000000000..d814ff9322b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMeanRowSizeByNameGetReader is a Reader for the ColumnFamilyMetricsMeanRowSizeByNameGet structure. +type ColumnFamilyMetricsMeanRowSizeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMeanRowSizeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMeanRowSizeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMeanRowSizeByNameGetOK creates a ColumnFamilyMetricsMeanRowSizeByNameGetOK with default headers values +func NewColumnFamilyMetricsMeanRowSizeByNameGetOK() *ColumnFamilyMetricsMeanRowSizeByNameGetOK { + return &ColumnFamilyMetricsMeanRowSizeByNameGetOK{} +} + +/* +ColumnFamilyMetricsMeanRowSizeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMeanRowSizeByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMeanRowSizeByNameGetDefault creates a ColumnFamilyMetricsMeanRowSizeByNameGetDefault with default headers values +func NewColumnFamilyMetricsMeanRowSizeByNameGetDefault(code int) *ColumnFamilyMetricsMeanRowSizeByNameGetDefault { + return &ColumnFamilyMetricsMeanRowSizeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMeanRowSizeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMeanRowSizeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics mean row size by name get default response +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMeanRowSizeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_get_parameters.go new file mode 100644 index 00000000000..ef9f14fcd3c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMeanRowSizeGetParams creates a new ColumnFamilyMetricsMeanRowSizeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMeanRowSizeGetParams() *ColumnFamilyMetricsMeanRowSizeGetParams { + + return &ColumnFamilyMetricsMeanRowSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMeanRowSizeGetParamsWithTimeout creates a new ColumnFamilyMetricsMeanRowSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMeanRowSizeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMeanRowSizeGetParams { + + return &ColumnFamilyMetricsMeanRowSizeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMeanRowSizeGetParamsWithContext creates a new ColumnFamilyMetricsMeanRowSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMeanRowSizeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMeanRowSizeGetParams { + + return &ColumnFamilyMetricsMeanRowSizeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMeanRowSizeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMeanRowSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMeanRowSizeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMeanRowSizeGetParams { + + return &ColumnFamilyMetricsMeanRowSizeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMeanRowSizeGetParams contains all the parameters to send to the API endpoint +for the column family metrics mean row size get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMeanRowSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics mean row size get params +func (o *ColumnFamilyMetricsMeanRowSizeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMeanRowSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics mean row size get params +func (o *ColumnFamilyMetricsMeanRowSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics mean row size get params +func (o *ColumnFamilyMetricsMeanRowSizeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMeanRowSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics mean row size get params +func (o *ColumnFamilyMetricsMeanRowSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics mean row size get params +func (o *ColumnFamilyMetricsMeanRowSizeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMeanRowSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics mean row size get params +func (o *ColumnFamilyMetricsMeanRowSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMeanRowSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_get_responses.go new file mode 100644 index 00000000000..14e0167ca0c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_mean_row_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMeanRowSizeGetReader is a Reader for the ColumnFamilyMetricsMeanRowSizeGet structure. +type ColumnFamilyMetricsMeanRowSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMeanRowSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMeanRowSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMeanRowSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMeanRowSizeGetOK creates a ColumnFamilyMetricsMeanRowSizeGetOK with default headers values +func NewColumnFamilyMetricsMeanRowSizeGetOK() *ColumnFamilyMetricsMeanRowSizeGetOK { + return &ColumnFamilyMetricsMeanRowSizeGetOK{} +} + +/* +ColumnFamilyMetricsMeanRowSizeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMeanRowSizeGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMeanRowSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMeanRowSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMeanRowSizeGetDefault creates a ColumnFamilyMetricsMeanRowSizeGetDefault with default headers values +func NewColumnFamilyMetricsMeanRowSizeGetDefault(code int) *ColumnFamilyMetricsMeanRowSizeGetDefault { + return &ColumnFamilyMetricsMeanRowSizeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMeanRowSizeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMeanRowSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics mean row size get default response +func (o *ColumnFamilyMetricsMeanRowSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMeanRowSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMeanRowSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMeanRowSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_by_name_get_parameters.go new file mode 100644 index 00000000000..1c13095e4d1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMemtableColumnsCountByNameGetParams creates a new ColumnFamilyMetricsMemtableColumnsCountByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMemtableColumnsCountByNameGetParams() *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableColumnsCountByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMemtableColumnsCountByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsMemtableColumnsCountByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMemtableColumnsCountByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableColumnsCountByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMemtableColumnsCountByNameGetParamsWithContext creates a new ColumnFamilyMetricsMemtableColumnsCountByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMemtableColumnsCountByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableColumnsCountByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMemtableColumnsCountByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMemtableColumnsCountByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMemtableColumnsCountByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableColumnsCountByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMemtableColumnsCountByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics memtable columns count by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMemtableColumnsCountByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics memtable columns count by name get params +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics memtable columns count by name get params +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics memtable columns count by name get params +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics memtable columns count by name get params +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics memtable columns count by name get params +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics memtable columns count by name get params +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics memtable columns count by name get params +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) WithName(name string) *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics memtable columns count by name get params +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_by_name_get_responses.go new file mode 100644 index 00000000000..f9a099dea70 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMemtableColumnsCountByNameGetReader is a Reader for the ColumnFamilyMetricsMemtableColumnsCountByNameGet structure. +type ColumnFamilyMetricsMemtableColumnsCountByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMemtableColumnsCountByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMemtableColumnsCountByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMemtableColumnsCountByNameGetOK creates a ColumnFamilyMetricsMemtableColumnsCountByNameGetOK with default headers values +func NewColumnFamilyMetricsMemtableColumnsCountByNameGetOK() *ColumnFamilyMetricsMemtableColumnsCountByNameGetOK { + return &ColumnFamilyMetricsMemtableColumnsCountByNameGetOK{} +} + +/* +ColumnFamilyMetricsMemtableColumnsCountByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMemtableColumnsCountByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMemtableColumnsCountByNameGetDefault creates a ColumnFamilyMetricsMemtableColumnsCountByNameGetDefault with default headers values +func NewColumnFamilyMetricsMemtableColumnsCountByNameGetDefault(code int) *ColumnFamilyMetricsMemtableColumnsCountByNameGetDefault { + return &ColumnFamilyMetricsMemtableColumnsCountByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMemtableColumnsCountByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMemtableColumnsCountByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics memtable columns count by name get default response +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMemtableColumnsCountByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_get_parameters.go new file mode 100644 index 00000000000..467df3de944 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMemtableColumnsCountGetParams creates a new ColumnFamilyMetricsMemtableColumnsCountGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMemtableColumnsCountGetParams() *ColumnFamilyMetricsMemtableColumnsCountGetParams { + + return &ColumnFamilyMetricsMemtableColumnsCountGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMemtableColumnsCountGetParamsWithTimeout creates a new ColumnFamilyMetricsMemtableColumnsCountGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMemtableColumnsCountGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableColumnsCountGetParams { + + return &ColumnFamilyMetricsMemtableColumnsCountGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMemtableColumnsCountGetParamsWithContext creates a new ColumnFamilyMetricsMemtableColumnsCountGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMemtableColumnsCountGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMemtableColumnsCountGetParams { + + return &ColumnFamilyMetricsMemtableColumnsCountGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMemtableColumnsCountGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMemtableColumnsCountGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMemtableColumnsCountGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableColumnsCountGetParams { + + return &ColumnFamilyMetricsMemtableColumnsCountGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMemtableColumnsCountGetParams contains all the parameters to send to the API endpoint +for the column family metrics memtable columns count get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMemtableColumnsCountGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics memtable columns count get params +func (o *ColumnFamilyMetricsMemtableColumnsCountGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableColumnsCountGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics memtable columns count get params +func (o *ColumnFamilyMetricsMemtableColumnsCountGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics memtable columns count get params +func (o *ColumnFamilyMetricsMemtableColumnsCountGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMemtableColumnsCountGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics memtable columns count get params +func (o *ColumnFamilyMetricsMemtableColumnsCountGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics memtable columns count get params +func (o *ColumnFamilyMetricsMemtableColumnsCountGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableColumnsCountGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics memtable columns count get params +func (o *ColumnFamilyMetricsMemtableColumnsCountGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMemtableColumnsCountGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_get_responses.go new file mode 100644 index 00000000000..119e5fd6ade --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_columns_count_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMemtableColumnsCountGetReader is a Reader for the ColumnFamilyMetricsMemtableColumnsCountGet structure. +type ColumnFamilyMetricsMemtableColumnsCountGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMemtableColumnsCountGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMemtableColumnsCountGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMemtableColumnsCountGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMemtableColumnsCountGetOK creates a ColumnFamilyMetricsMemtableColumnsCountGetOK with default headers values +func NewColumnFamilyMetricsMemtableColumnsCountGetOK() *ColumnFamilyMetricsMemtableColumnsCountGetOK { + return &ColumnFamilyMetricsMemtableColumnsCountGetOK{} +} + +/* +ColumnFamilyMetricsMemtableColumnsCountGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMemtableColumnsCountGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMemtableColumnsCountGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableColumnsCountGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMemtableColumnsCountGetDefault creates a ColumnFamilyMetricsMemtableColumnsCountGetDefault with default headers values +func NewColumnFamilyMetricsMemtableColumnsCountGetDefault(code int) *ColumnFamilyMetricsMemtableColumnsCountGetDefault { + return &ColumnFamilyMetricsMemtableColumnsCountGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMemtableColumnsCountGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMemtableColumnsCountGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics memtable columns count get default response +func (o *ColumnFamilyMetricsMemtableColumnsCountGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMemtableColumnsCountGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableColumnsCountGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMemtableColumnsCountGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_by_name_get_parameters.go new file mode 100644 index 00000000000..c45dee74fee --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams creates a new ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams() *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetParamsWithContext creates a new ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics memtable live data size by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics memtable live data size by name get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics memtable live data size by name get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics memtable live data size by name get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics memtable live data size by name get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics memtable live data size by name get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics memtable live data size by name get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics memtable live data size by name get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) WithName(name string) *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics memtable live data size by name get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_by_name_get_responses.go new file mode 100644 index 00000000000..cbe801915ee --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMemtableLiveDataSizeByNameGetReader is a Reader for the ColumnFamilyMetricsMemtableLiveDataSizeByNameGet structure. +type ColumnFamilyMetricsMemtableLiveDataSizeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK creates a ColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK with default headers values +func NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK() *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK { + return &ColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK{} +} + +/* +ColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault creates a ColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault with default headers values +func NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault(code int) *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault { + return &ColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics memtable live data size by name get default response +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_get_parameters.go new file mode 100644 index 00000000000..993c6fdaacc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMemtableLiveDataSizeGetParams creates a new ColumnFamilyMetricsMemtableLiveDataSizeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMemtableLiveDataSizeGetParams() *ColumnFamilyMetricsMemtableLiveDataSizeGetParams { + + return &ColumnFamilyMetricsMemtableLiveDataSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMemtableLiveDataSizeGetParamsWithTimeout creates a new ColumnFamilyMetricsMemtableLiveDataSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMemtableLiveDataSizeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableLiveDataSizeGetParams { + + return &ColumnFamilyMetricsMemtableLiveDataSizeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMemtableLiveDataSizeGetParamsWithContext creates a new ColumnFamilyMetricsMemtableLiveDataSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMemtableLiveDataSizeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMemtableLiveDataSizeGetParams { + + return &ColumnFamilyMetricsMemtableLiveDataSizeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMemtableLiveDataSizeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMemtableLiveDataSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMemtableLiveDataSizeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableLiveDataSizeGetParams { + + return &ColumnFamilyMetricsMemtableLiveDataSizeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMemtableLiveDataSizeGetParams contains all the parameters to send to the API endpoint +for the column family metrics memtable live data size get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMemtableLiveDataSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics memtable live data size get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableLiveDataSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics memtable live data size get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics memtable live data size get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMemtableLiveDataSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics memtable live data size get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics memtable live data size get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableLiveDataSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics memtable live data size get params +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_get_responses.go new file mode 100644 index 00000000000..ad81638a5ad --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_live_data_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMemtableLiveDataSizeGetReader is a Reader for the ColumnFamilyMetricsMemtableLiveDataSizeGet structure. +type ColumnFamilyMetricsMemtableLiveDataSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMemtableLiveDataSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMemtableLiveDataSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMemtableLiveDataSizeGetOK creates a ColumnFamilyMetricsMemtableLiveDataSizeGetOK with default headers values +func NewColumnFamilyMetricsMemtableLiveDataSizeGetOK() *ColumnFamilyMetricsMemtableLiveDataSizeGetOK { + return &ColumnFamilyMetricsMemtableLiveDataSizeGetOK{} +} + +/* +ColumnFamilyMetricsMemtableLiveDataSizeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMemtableLiveDataSizeGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMemtableLiveDataSizeGetDefault creates a ColumnFamilyMetricsMemtableLiveDataSizeGetDefault with default headers values +func NewColumnFamilyMetricsMemtableLiveDataSizeGetDefault(code int) *ColumnFamilyMetricsMemtableLiveDataSizeGetDefault { + return &ColumnFamilyMetricsMemtableLiveDataSizeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMemtableLiveDataSizeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMemtableLiveDataSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics memtable live data size get default response +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMemtableLiveDataSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_by_name_get_parameters.go new file mode 100644 index 00000000000..38ef3db03e2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams creates a new ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams() *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetParamsWithContext creates a new ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics memtable off heap size by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics memtable off heap size by name get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics memtable off heap size by name get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics memtable off heap size by name get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics memtable off heap size by name get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics memtable off heap size by name get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics memtable off heap size by name get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics memtable off heap size by name get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) WithName(name string) *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics memtable off heap size by name get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_by_name_get_responses.go new file mode 100644 index 00000000000..cc41f5c09f3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMemtableOffHeapSizeByNameGetReader is a Reader for the ColumnFamilyMetricsMemtableOffHeapSizeByNameGet structure. +type ColumnFamilyMetricsMemtableOffHeapSizeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK creates a ColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK with default headers values +func NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK() *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK { + return &ColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK{} +} + +/* +ColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault creates a ColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault with default headers values +func NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault(code int) *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault { + return &ColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics memtable off heap size by name get default response +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_get_parameters.go new file mode 100644 index 00000000000..4f60d022eaf --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMemtableOffHeapSizeGetParams creates a new ColumnFamilyMetricsMemtableOffHeapSizeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMemtableOffHeapSizeGetParams() *ColumnFamilyMetricsMemtableOffHeapSizeGetParams { + + return &ColumnFamilyMetricsMemtableOffHeapSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMemtableOffHeapSizeGetParamsWithTimeout creates a new ColumnFamilyMetricsMemtableOffHeapSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMemtableOffHeapSizeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableOffHeapSizeGetParams { + + return &ColumnFamilyMetricsMemtableOffHeapSizeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMemtableOffHeapSizeGetParamsWithContext creates a new ColumnFamilyMetricsMemtableOffHeapSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMemtableOffHeapSizeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMemtableOffHeapSizeGetParams { + + return &ColumnFamilyMetricsMemtableOffHeapSizeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMemtableOffHeapSizeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMemtableOffHeapSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMemtableOffHeapSizeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableOffHeapSizeGetParams { + + return &ColumnFamilyMetricsMemtableOffHeapSizeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMemtableOffHeapSizeGetParams contains all the parameters to send to the API endpoint +for the column family metrics memtable off heap size get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMemtableOffHeapSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics memtable off heap size get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableOffHeapSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics memtable off heap size get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics memtable off heap size get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMemtableOffHeapSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics memtable off heap size get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics memtable off heap size get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableOffHeapSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics memtable off heap size get params +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_get_responses.go new file mode 100644 index 00000000000..9ee1532f094 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_off_heap_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMemtableOffHeapSizeGetReader is a Reader for the ColumnFamilyMetricsMemtableOffHeapSizeGet structure. +type ColumnFamilyMetricsMemtableOffHeapSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMemtableOffHeapSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMemtableOffHeapSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMemtableOffHeapSizeGetOK creates a ColumnFamilyMetricsMemtableOffHeapSizeGetOK with default headers values +func NewColumnFamilyMetricsMemtableOffHeapSizeGetOK() *ColumnFamilyMetricsMemtableOffHeapSizeGetOK { + return &ColumnFamilyMetricsMemtableOffHeapSizeGetOK{} +} + +/* +ColumnFamilyMetricsMemtableOffHeapSizeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMemtableOffHeapSizeGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMemtableOffHeapSizeGetDefault creates a ColumnFamilyMetricsMemtableOffHeapSizeGetDefault with default headers values +func NewColumnFamilyMetricsMemtableOffHeapSizeGetDefault(code int) *ColumnFamilyMetricsMemtableOffHeapSizeGetDefault { + return &ColumnFamilyMetricsMemtableOffHeapSizeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMemtableOffHeapSizeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMemtableOffHeapSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics memtable off heap size get default response +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMemtableOffHeapSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_by_name_get_parameters.go new file mode 100644 index 00000000000..acd6cc9955e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams creates a new ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams() *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetParamsWithContext creates a new ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics memtable on heap size by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics memtable on heap size by name get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics memtable on heap size by name get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics memtable on heap size by name get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics memtable on heap size by name get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics memtable on heap size by name get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics memtable on heap size by name get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics memtable on heap size by name get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) WithName(name string) *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics memtable on heap size by name get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_by_name_get_responses.go new file mode 100644 index 00000000000..b1247cdc78e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMemtableOnHeapSizeByNameGetReader is a Reader for the ColumnFamilyMetricsMemtableOnHeapSizeByNameGet structure. +type ColumnFamilyMetricsMemtableOnHeapSizeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK creates a ColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK with default headers values +func NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK() *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK { + return &ColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK{} +} + +/* +ColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault creates a ColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault with default headers values +func NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault(code int) *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault { + return &ColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics memtable on heap size by name get default response +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_get_parameters.go new file mode 100644 index 00000000000..d3473876f23 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMemtableOnHeapSizeGetParams creates a new ColumnFamilyMetricsMemtableOnHeapSizeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMemtableOnHeapSizeGetParams() *ColumnFamilyMetricsMemtableOnHeapSizeGetParams { + + return &ColumnFamilyMetricsMemtableOnHeapSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMemtableOnHeapSizeGetParamsWithTimeout creates a new ColumnFamilyMetricsMemtableOnHeapSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMemtableOnHeapSizeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableOnHeapSizeGetParams { + + return &ColumnFamilyMetricsMemtableOnHeapSizeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMemtableOnHeapSizeGetParamsWithContext creates a new ColumnFamilyMetricsMemtableOnHeapSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMemtableOnHeapSizeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMemtableOnHeapSizeGetParams { + + return &ColumnFamilyMetricsMemtableOnHeapSizeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMemtableOnHeapSizeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMemtableOnHeapSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMemtableOnHeapSizeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableOnHeapSizeGetParams { + + return &ColumnFamilyMetricsMemtableOnHeapSizeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMemtableOnHeapSizeGetParams contains all the parameters to send to the API endpoint +for the column family metrics memtable on heap size get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMemtableOnHeapSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics memtable on heap size get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableOnHeapSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics memtable on heap size get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics memtable on heap size get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMemtableOnHeapSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics memtable on heap size get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics memtable on heap size get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableOnHeapSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics memtable on heap size get params +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_get_responses.go new file mode 100644 index 00000000000..ab974948a26 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_on_heap_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMemtableOnHeapSizeGetReader is a Reader for the ColumnFamilyMetricsMemtableOnHeapSizeGet structure. +type ColumnFamilyMetricsMemtableOnHeapSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMemtableOnHeapSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMemtableOnHeapSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMemtableOnHeapSizeGetOK creates a ColumnFamilyMetricsMemtableOnHeapSizeGetOK with default headers values +func NewColumnFamilyMetricsMemtableOnHeapSizeGetOK() *ColumnFamilyMetricsMemtableOnHeapSizeGetOK { + return &ColumnFamilyMetricsMemtableOnHeapSizeGetOK{} +} + +/* +ColumnFamilyMetricsMemtableOnHeapSizeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMemtableOnHeapSizeGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMemtableOnHeapSizeGetDefault creates a ColumnFamilyMetricsMemtableOnHeapSizeGetDefault with default headers values +func NewColumnFamilyMetricsMemtableOnHeapSizeGetDefault(code int) *ColumnFamilyMetricsMemtableOnHeapSizeGetDefault { + return &ColumnFamilyMetricsMemtableOnHeapSizeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMemtableOnHeapSizeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMemtableOnHeapSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics memtable on heap size get default response +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMemtableOnHeapSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_by_name_get_parameters.go new file mode 100644 index 00000000000..8ac0e9bde0c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMemtableSwitchCountByNameGetParams creates a new ColumnFamilyMetricsMemtableSwitchCountByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMemtableSwitchCountByNameGetParams() *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableSwitchCountByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMemtableSwitchCountByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsMemtableSwitchCountByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMemtableSwitchCountByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableSwitchCountByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMemtableSwitchCountByNameGetParamsWithContext creates a new ColumnFamilyMetricsMemtableSwitchCountByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMemtableSwitchCountByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableSwitchCountByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMemtableSwitchCountByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMemtableSwitchCountByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMemtableSwitchCountByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams { + var () + return &ColumnFamilyMetricsMemtableSwitchCountByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMemtableSwitchCountByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics memtable switch count by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMemtableSwitchCountByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics memtable switch count by name get params +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics memtable switch count by name get params +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics memtable switch count by name get params +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics memtable switch count by name get params +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics memtable switch count by name get params +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics memtable switch count by name get params +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics memtable switch count by name get params +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) WithName(name string) *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics memtable switch count by name get params +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_by_name_get_responses.go new file mode 100644 index 00000000000..8ce5b801ba6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMemtableSwitchCountByNameGetReader is a Reader for the ColumnFamilyMetricsMemtableSwitchCountByNameGet structure. +type ColumnFamilyMetricsMemtableSwitchCountByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMemtableSwitchCountByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMemtableSwitchCountByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMemtableSwitchCountByNameGetOK creates a ColumnFamilyMetricsMemtableSwitchCountByNameGetOK with default headers values +func NewColumnFamilyMetricsMemtableSwitchCountByNameGetOK() *ColumnFamilyMetricsMemtableSwitchCountByNameGetOK { + return &ColumnFamilyMetricsMemtableSwitchCountByNameGetOK{} +} + +/* +ColumnFamilyMetricsMemtableSwitchCountByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMemtableSwitchCountByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMemtableSwitchCountByNameGetDefault creates a ColumnFamilyMetricsMemtableSwitchCountByNameGetDefault with default headers values +func NewColumnFamilyMetricsMemtableSwitchCountByNameGetDefault(code int) *ColumnFamilyMetricsMemtableSwitchCountByNameGetDefault { + return &ColumnFamilyMetricsMemtableSwitchCountByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMemtableSwitchCountByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMemtableSwitchCountByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics memtable switch count by name get default response +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMemtableSwitchCountByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_get_parameters.go new file mode 100644 index 00000000000..b090be77b3c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMemtableSwitchCountGetParams creates a new ColumnFamilyMetricsMemtableSwitchCountGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMemtableSwitchCountGetParams() *ColumnFamilyMetricsMemtableSwitchCountGetParams { + + return &ColumnFamilyMetricsMemtableSwitchCountGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMemtableSwitchCountGetParamsWithTimeout creates a new ColumnFamilyMetricsMemtableSwitchCountGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMemtableSwitchCountGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableSwitchCountGetParams { + + return &ColumnFamilyMetricsMemtableSwitchCountGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMemtableSwitchCountGetParamsWithContext creates a new ColumnFamilyMetricsMemtableSwitchCountGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMemtableSwitchCountGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMemtableSwitchCountGetParams { + + return &ColumnFamilyMetricsMemtableSwitchCountGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMemtableSwitchCountGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMemtableSwitchCountGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMemtableSwitchCountGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableSwitchCountGetParams { + + return &ColumnFamilyMetricsMemtableSwitchCountGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMemtableSwitchCountGetParams contains all the parameters to send to the API endpoint +for the column family metrics memtable switch count get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMemtableSwitchCountGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics memtable switch count get params +func (o *ColumnFamilyMetricsMemtableSwitchCountGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMemtableSwitchCountGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics memtable switch count get params +func (o *ColumnFamilyMetricsMemtableSwitchCountGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics memtable switch count get params +func (o *ColumnFamilyMetricsMemtableSwitchCountGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMemtableSwitchCountGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics memtable switch count get params +func (o *ColumnFamilyMetricsMemtableSwitchCountGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics memtable switch count get params +func (o *ColumnFamilyMetricsMemtableSwitchCountGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMemtableSwitchCountGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics memtable switch count get params +func (o *ColumnFamilyMetricsMemtableSwitchCountGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMemtableSwitchCountGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_get_responses.go new file mode 100644 index 00000000000..49830e41377 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_memtable_switch_count_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMemtableSwitchCountGetReader is a Reader for the ColumnFamilyMetricsMemtableSwitchCountGet structure. +type ColumnFamilyMetricsMemtableSwitchCountGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMemtableSwitchCountGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMemtableSwitchCountGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMemtableSwitchCountGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMemtableSwitchCountGetOK creates a ColumnFamilyMetricsMemtableSwitchCountGetOK with default headers values +func NewColumnFamilyMetricsMemtableSwitchCountGetOK() *ColumnFamilyMetricsMemtableSwitchCountGetOK { + return &ColumnFamilyMetricsMemtableSwitchCountGetOK{} +} + +/* +ColumnFamilyMetricsMemtableSwitchCountGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMemtableSwitchCountGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsMemtableSwitchCountGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableSwitchCountGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMemtableSwitchCountGetDefault creates a ColumnFamilyMetricsMemtableSwitchCountGetDefault with default headers values +func NewColumnFamilyMetricsMemtableSwitchCountGetDefault(code int) *ColumnFamilyMetricsMemtableSwitchCountGetDefault { + return &ColumnFamilyMetricsMemtableSwitchCountGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMemtableSwitchCountGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMemtableSwitchCountGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics memtable switch count get default response +func (o *ColumnFamilyMetricsMemtableSwitchCountGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMemtableSwitchCountGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMemtableSwitchCountGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMemtableSwitchCountGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_by_name_get_parameters.go new file mode 100644 index 00000000000..556f1492e97 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMinRowSizeByNameGetParams creates a new ColumnFamilyMetricsMinRowSizeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMinRowSizeByNameGetParams() *ColumnFamilyMetricsMinRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMinRowSizeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMinRowSizeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsMinRowSizeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMinRowSizeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMinRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMinRowSizeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMinRowSizeByNameGetParamsWithContext creates a new ColumnFamilyMetricsMinRowSizeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMinRowSizeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMinRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMinRowSizeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMinRowSizeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMinRowSizeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMinRowSizeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMinRowSizeByNameGetParams { + var () + return &ColumnFamilyMetricsMinRowSizeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMinRowSizeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics min row size by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMinRowSizeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics min row size by name get params +func (o *ColumnFamilyMetricsMinRowSizeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMinRowSizeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics min row size by name get params +func (o *ColumnFamilyMetricsMinRowSizeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics min row size by name get params +func (o *ColumnFamilyMetricsMinRowSizeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMinRowSizeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics min row size by name get params +func (o *ColumnFamilyMetricsMinRowSizeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics min row size by name get params +func (o *ColumnFamilyMetricsMinRowSizeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMinRowSizeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics min row size by name get params +func (o *ColumnFamilyMetricsMinRowSizeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics min row size by name get params +func (o *ColumnFamilyMetricsMinRowSizeByNameGetParams) WithName(name string) *ColumnFamilyMetricsMinRowSizeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics min row size by name get params +func (o *ColumnFamilyMetricsMinRowSizeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMinRowSizeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_by_name_get_responses.go new file mode 100644 index 00000000000..d48ab89f67e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMinRowSizeByNameGetReader is a Reader for the ColumnFamilyMetricsMinRowSizeByNameGet structure. +type ColumnFamilyMetricsMinRowSizeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMinRowSizeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMinRowSizeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMinRowSizeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMinRowSizeByNameGetOK creates a ColumnFamilyMetricsMinRowSizeByNameGetOK with default headers values +func NewColumnFamilyMetricsMinRowSizeByNameGetOK() *ColumnFamilyMetricsMinRowSizeByNameGetOK { + return &ColumnFamilyMetricsMinRowSizeByNameGetOK{} +} + +/* +ColumnFamilyMetricsMinRowSizeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMinRowSizeByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMinRowSizeByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMinRowSizeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMinRowSizeByNameGetDefault creates a ColumnFamilyMetricsMinRowSizeByNameGetDefault with default headers values +func NewColumnFamilyMetricsMinRowSizeByNameGetDefault(code int) *ColumnFamilyMetricsMinRowSizeByNameGetDefault { + return &ColumnFamilyMetricsMinRowSizeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMinRowSizeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMinRowSizeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics min row size by name get default response +func (o *ColumnFamilyMetricsMinRowSizeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMinRowSizeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMinRowSizeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMinRowSizeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_get_parameters.go new file mode 100644 index 00000000000..59f4010f05f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsMinRowSizeGetParams creates a new ColumnFamilyMetricsMinRowSizeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsMinRowSizeGetParams() *ColumnFamilyMetricsMinRowSizeGetParams { + + return &ColumnFamilyMetricsMinRowSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsMinRowSizeGetParamsWithTimeout creates a new ColumnFamilyMetricsMinRowSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsMinRowSizeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsMinRowSizeGetParams { + + return &ColumnFamilyMetricsMinRowSizeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsMinRowSizeGetParamsWithContext creates a new ColumnFamilyMetricsMinRowSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsMinRowSizeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsMinRowSizeGetParams { + + return &ColumnFamilyMetricsMinRowSizeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsMinRowSizeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsMinRowSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsMinRowSizeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsMinRowSizeGetParams { + + return &ColumnFamilyMetricsMinRowSizeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsMinRowSizeGetParams contains all the parameters to send to the API endpoint +for the column family metrics min row size get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsMinRowSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics min row size get params +func (o *ColumnFamilyMetricsMinRowSizeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsMinRowSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics min row size get params +func (o *ColumnFamilyMetricsMinRowSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics min row size get params +func (o *ColumnFamilyMetricsMinRowSizeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsMinRowSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics min row size get params +func (o *ColumnFamilyMetricsMinRowSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics min row size get params +func (o *ColumnFamilyMetricsMinRowSizeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsMinRowSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics min row size get params +func (o *ColumnFamilyMetricsMinRowSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsMinRowSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_get_responses.go new file mode 100644 index 00000000000..79e4688642c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_min_row_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsMinRowSizeGetReader is a Reader for the ColumnFamilyMetricsMinRowSizeGet structure. +type ColumnFamilyMetricsMinRowSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsMinRowSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsMinRowSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsMinRowSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsMinRowSizeGetOK creates a ColumnFamilyMetricsMinRowSizeGetOK with default headers values +func NewColumnFamilyMetricsMinRowSizeGetOK() *ColumnFamilyMetricsMinRowSizeGetOK { + return &ColumnFamilyMetricsMinRowSizeGetOK{} +} + +/* +ColumnFamilyMetricsMinRowSizeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsMinRowSizeGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsMinRowSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsMinRowSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsMinRowSizeGetDefault creates a ColumnFamilyMetricsMinRowSizeGetDefault with default headers values +func NewColumnFamilyMetricsMinRowSizeGetDefault(code int) *ColumnFamilyMetricsMinRowSizeGetDefault { + return &ColumnFamilyMetricsMinRowSizeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsMinRowSizeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsMinRowSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics min row size get default response +func (o *ColumnFamilyMetricsMinRowSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsMinRowSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsMinRowSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsMinRowSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_by_name_get_parameters.go new file mode 100644 index 00000000000..5ff9f845e3f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsPendingCompactionsByNameGetParams creates a new ColumnFamilyMetricsPendingCompactionsByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsPendingCompactionsByNameGetParams() *ColumnFamilyMetricsPendingCompactionsByNameGetParams { + var () + return &ColumnFamilyMetricsPendingCompactionsByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsPendingCompactionsByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsPendingCompactionsByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsPendingCompactionsByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsPendingCompactionsByNameGetParams { + var () + return &ColumnFamilyMetricsPendingCompactionsByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsPendingCompactionsByNameGetParamsWithContext creates a new ColumnFamilyMetricsPendingCompactionsByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsPendingCompactionsByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsPendingCompactionsByNameGetParams { + var () + return &ColumnFamilyMetricsPendingCompactionsByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsPendingCompactionsByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsPendingCompactionsByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsPendingCompactionsByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsPendingCompactionsByNameGetParams { + var () + return &ColumnFamilyMetricsPendingCompactionsByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsPendingCompactionsByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics pending compactions by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsPendingCompactionsByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics pending compactions by name get params +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsPendingCompactionsByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics pending compactions by name get params +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics pending compactions by name get params +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsPendingCompactionsByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics pending compactions by name get params +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics pending compactions by name get params +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsPendingCompactionsByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics pending compactions by name get params +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics pending compactions by name get params +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetParams) WithName(name string) *ColumnFamilyMetricsPendingCompactionsByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics pending compactions by name get params +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_by_name_get_responses.go new file mode 100644 index 00000000000..58646565751 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsPendingCompactionsByNameGetReader is a Reader for the ColumnFamilyMetricsPendingCompactionsByNameGet structure. +type ColumnFamilyMetricsPendingCompactionsByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsPendingCompactionsByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsPendingCompactionsByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsPendingCompactionsByNameGetOK creates a ColumnFamilyMetricsPendingCompactionsByNameGetOK with default headers values +func NewColumnFamilyMetricsPendingCompactionsByNameGetOK() *ColumnFamilyMetricsPendingCompactionsByNameGetOK { + return &ColumnFamilyMetricsPendingCompactionsByNameGetOK{} +} + +/* +ColumnFamilyMetricsPendingCompactionsByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsPendingCompactionsByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsPendingCompactionsByNameGetDefault creates a ColumnFamilyMetricsPendingCompactionsByNameGetDefault with default headers values +func NewColumnFamilyMetricsPendingCompactionsByNameGetDefault(code int) *ColumnFamilyMetricsPendingCompactionsByNameGetDefault { + return &ColumnFamilyMetricsPendingCompactionsByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsPendingCompactionsByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsPendingCompactionsByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics pending compactions by name get default response +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsPendingCompactionsByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_get_parameters.go new file mode 100644 index 00000000000..dd486883c54 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsPendingCompactionsGetParams creates a new ColumnFamilyMetricsPendingCompactionsGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsPendingCompactionsGetParams() *ColumnFamilyMetricsPendingCompactionsGetParams { + + return &ColumnFamilyMetricsPendingCompactionsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsPendingCompactionsGetParamsWithTimeout creates a new ColumnFamilyMetricsPendingCompactionsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsPendingCompactionsGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsPendingCompactionsGetParams { + + return &ColumnFamilyMetricsPendingCompactionsGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsPendingCompactionsGetParamsWithContext creates a new ColumnFamilyMetricsPendingCompactionsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsPendingCompactionsGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsPendingCompactionsGetParams { + + return &ColumnFamilyMetricsPendingCompactionsGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsPendingCompactionsGetParamsWithHTTPClient creates a new ColumnFamilyMetricsPendingCompactionsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsPendingCompactionsGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsPendingCompactionsGetParams { + + return &ColumnFamilyMetricsPendingCompactionsGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsPendingCompactionsGetParams contains all the parameters to send to the API endpoint +for the column family metrics pending compactions get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsPendingCompactionsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics pending compactions get params +func (o *ColumnFamilyMetricsPendingCompactionsGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsPendingCompactionsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics pending compactions get params +func (o *ColumnFamilyMetricsPendingCompactionsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics pending compactions get params +func (o *ColumnFamilyMetricsPendingCompactionsGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsPendingCompactionsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics pending compactions get params +func (o *ColumnFamilyMetricsPendingCompactionsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics pending compactions get params +func (o *ColumnFamilyMetricsPendingCompactionsGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsPendingCompactionsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics pending compactions get params +func (o *ColumnFamilyMetricsPendingCompactionsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsPendingCompactionsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_get_responses.go new file mode 100644 index 00000000000..06ef0475b37 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_compactions_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsPendingCompactionsGetReader is a Reader for the ColumnFamilyMetricsPendingCompactionsGet structure. +type ColumnFamilyMetricsPendingCompactionsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsPendingCompactionsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsPendingCompactionsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsPendingCompactionsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsPendingCompactionsGetOK creates a ColumnFamilyMetricsPendingCompactionsGetOK with default headers values +func NewColumnFamilyMetricsPendingCompactionsGetOK() *ColumnFamilyMetricsPendingCompactionsGetOK { + return &ColumnFamilyMetricsPendingCompactionsGetOK{} +} + +/* +ColumnFamilyMetricsPendingCompactionsGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsPendingCompactionsGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsPendingCompactionsGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsPendingCompactionsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsPendingCompactionsGetDefault creates a ColumnFamilyMetricsPendingCompactionsGetDefault with default headers values +func NewColumnFamilyMetricsPendingCompactionsGetDefault(code int) *ColumnFamilyMetricsPendingCompactionsGetDefault { + return &ColumnFamilyMetricsPendingCompactionsGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsPendingCompactionsGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsPendingCompactionsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics pending compactions get default response +func (o *ColumnFamilyMetricsPendingCompactionsGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsPendingCompactionsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsPendingCompactionsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsPendingCompactionsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_by_name_get_parameters.go new file mode 100644 index 00000000000..7f137a313d7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsPendingFlushesByNameGetParams creates a new ColumnFamilyMetricsPendingFlushesByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsPendingFlushesByNameGetParams() *ColumnFamilyMetricsPendingFlushesByNameGetParams { + var () + return &ColumnFamilyMetricsPendingFlushesByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsPendingFlushesByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsPendingFlushesByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsPendingFlushesByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsPendingFlushesByNameGetParams { + var () + return &ColumnFamilyMetricsPendingFlushesByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsPendingFlushesByNameGetParamsWithContext creates a new ColumnFamilyMetricsPendingFlushesByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsPendingFlushesByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsPendingFlushesByNameGetParams { + var () + return &ColumnFamilyMetricsPendingFlushesByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsPendingFlushesByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsPendingFlushesByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsPendingFlushesByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsPendingFlushesByNameGetParams { + var () + return &ColumnFamilyMetricsPendingFlushesByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsPendingFlushesByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics pending flushes by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsPendingFlushesByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics pending flushes by name get params +func (o *ColumnFamilyMetricsPendingFlushesByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsPendingFlushesByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics pending flushes by name get params +func (o *ColumnFamilyMetricsPendingFlushesByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics pending flushes by name get params +func (o *ColumnFamilyMetricsPendingFlushesByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsPendingFlushesByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics pending flushes by name get params +func (o *ColumnFamilyMetricsPendingFlushesByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics pending flushes by name get params +func (o *ColumnFamilyMetricsPendingFlushesByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsPendingFlushesByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics pending flushes by name get params +func (o *ColumnFamilyMetricsPendingFlushesByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics pending flushes by name get params +func (o *ColumnFamilyMetricsPendingFlushesByNameGetParams) WithName(name string) *ColumnFamilyMetricsPendingFlushesByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics pending flushes by name get params +func (o *ColumnFamilyMetricsPendingFlushesByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsPendingFlushesByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_by_name_get_responses.go new file mode 100644 index 00000000000..8e43ed345c9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsPendingFlushesByNameGetReader is a Reader for the ColumnFamilyMetricsPendingFlushesByNameGet structure. +type ColumnFamilyMetricsPendingFlushesByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsPendingFlushesByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsPendingFlushesByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsPendingFlushesByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsPendingFlushesByNameGetOK creates a ColumnFamilyMetricsPendingFlushesByNameGetOK with default headers values +func NewColumnFamilyMetricsPendingFlushesByNameGetOK() *ColumnFamilyMetricsPendingFlushesByNameGetOK { + return &ColumnFamilyMetricsPendingFlushesByNameGetOK{} +} + +/* +ColumnFamilyMetricsPendingFlushesByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsPendingFlushesByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsPendingFlushesByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsPendingFlushesByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsPendingFlushesByNameGetDefault creates a ColumnFamilyMetricsPendingFlushesByNameGetDefault with default headers values +func NewColumnFamilyMetricsPendingFlushesByNameGetDefault(code int) *ColumnFamilyMetricsPendingFlushesByNameGetDefault { + return &ColumnFamilyMetricsPendingFlushesByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsPendingFlushesByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsPendingFlushesByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics pending flushes by name get default response +func (o *ColumnFamilyMetricsPendingFlushesByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsPendingFlushesByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsPendingFlushesByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsPendingFlushesByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_get_parameters.go new file mode 100644 index 00000000000..9b974476345 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsPendingFlushesGetParams creates a new ColumnFamilyMetricsPendingFlushesGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsPendingFlushesGetParams() *ColumnFamilyMetricsPendingFlushesGetParams { + + return &ColumnFamilyMetricsPendingFlushesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsPendingFlushesGetParamsWithTimeout creates a new ColumnFamilyMetricsPendingFlushesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsPendingFlushesGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsPendingFlushesGetParams { + + return &ColumnFamilyMetricsPendingFlushesGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsPendingFlushesGetParamsWithContext creates a new ColumnFamilyMetricsPendingFlushesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsPendingFlushesGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsPendingFlushesGetParams { + + return &ColumnFamilyMetricsPendingFlushesGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsPendingFlushesGetParamsWithHTTPClient creates a new ColumnFamilyMetricsPendingFlushesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsPendingFlushesGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsPendingFlushesGetParams { + + return &ColumnFamilyMetricsPendingFlushesGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsPendingFlushesGetParams contains all the parameters to send to the API endpoint +for the column family metrics pending flushes get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsPendingFlushesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics pending flushes get params +func (o *ColumnFamilyMetricsPendingFlushesGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsPendingFlushesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics pending flushes get params +func (o *ColumnFamilyMetricsPendingFlushesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics pending flushes get params +func (o *ColumnFamilyMetricsPendingFlushesGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsPendingFlushesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics pending flushes get params +func (o *ColumnFamilyMetricsPendingFlushesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics pending flushes get params +func (o *ColumnFamilyMetricsPendingFlushesGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsPendingFlushesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics pending flushes get params +func (o *ColumnFamilyMetricsPendingFlushesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsPendingFlushesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_get_responses.go new file mode 100644 index 00000000000..fc64f4e04fd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_pending_flushes_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsPendingFlushesGetReader is a Reader for the ColumnFamilyMetricsPendingFlushesGet structure. +type ColumnFamilyMetricsPendingFlushesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsPendingFlushesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsPendingFlushesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsPendingFlushesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsPendingFlushesGetOK creates a ColumnFamilyMetricsPendingFlushesGetOK with default headers values +func NewColumnFamilyMetricsPendingFlushesGetOK() *ColumnFamilyMetricsPendingFlushesGetOK { + return &ColumnFamilyMetricsPendingFlushesGetOK{} +} + +/* +ColumnFamilyMetricsPendingFlushesGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsPendingFlushesGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsPendingFlushesGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsPendingFlushesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsPendingFlushesGetDefault creates a ColumnFamilyMetricsPendingFlushesGetDefault with default headers values +func NewColumnFamilyMetricsPendingFlushesGetDefault(code int) *ColumnFamilyMetricsPendingFlushesGetDefault { + return &ColumnFamilyMetricsPendingFlushesGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsPendingFlushesGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsPendingFlushesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics pending flushes get default response +func (o *ColumnFamilyMetricsPendingFlushesGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsPendingFlushesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsPendingFlushesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsPendingFlushesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_by_name_get_parameters.go new file mode 100644 index 00000000000..8fac8b5eeb7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRangeLatencyByNameGetParams creates a new ColumnFamilyMetricsRangeLatencyByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRangeLatencyByNameGetParams() *ColumnFamilyMetricsRangeLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRangeLatencyByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsRangeLatencyByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRangeLatencyByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRangeLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRangeLatencyByNameGetParamsWithContext creates a new ColumnFamilyMetricsRangeLatencyByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRangeLatencyByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRangeLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRangeLatencyByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRangeLatencyByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRangeLatencyByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRangeLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRangeLatencyByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics range latency by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRangeLatencyByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics range latency by name get params +func (o *ColumnFamilyMetricsRangeLatencyByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRangeLatencyByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics range latency by name get params +func (o *ColumnFamilyMetricsRangeLatencyByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics range latency by name get params +func (o *ColumnFamilyMetricsRangeLatencyByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRangeLatencyByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics range latency by name get params +func (o *ColumnFamilyMetricsRangeLatencyByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics range latency by name get params +func (o *ColumnFamilyMetricsRangeLatencyByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRangeLatencyByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics range latency by name get params +func (o *ColumnFamilyMetricsRangeLatencyByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics range latency by name get params +func (o *ColumnFamilyMetricsRangeLatencyByNameGetParams) WithName(name string) *ColumnFamilyMetricsRangeLatencyByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics range latency by name get params +func (o *ColumnFamilyMetricsRangeLatencyByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRangeLatencyByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_by_name_get_responses.go new file mode 100644 index 00000000000..cf6845b2090 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRangeLatencyByNameGetReader is a Reader for the ColumnFamilyMetricsRangeLatencyByNameGet structure. +type ColumnFamilyMetricsRangeLatencyByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRangeLatencyByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRangeLatencyByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRangeLatencyByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRangeLatencyByNameGetOK creates a ColumnFamilyMetricsRangeLatencyByNameGetOK with default headers values +func NewColumnFamilyMetricsRangeLatencyByNameGetOK() *ColumnFamilyMetricsRangeLatencyByNameGetOK { + return &ColumnFamilyMetricsRangeLatencyByNameGetOK{} +} + +/* +ColumnFamilyMetricsRangeLatencyByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRangeLatencyByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsRangeLatencyByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsRangeLatencyByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRangeLatencyByNameGetDefault creates a ColumnFamilyMetricsRangeLatencyByNameGetDefault with default headers values +func NewColumnFamilyMetricsRangeLatencyByNameGetDefault(code int) *ColumnFamilyMetricsRangeLatencyByNameGetDefault { + return &ColumnFamilyMetricsRangeLatencyByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRangeLatencyByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRangeLatencyByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics range latency by name get default response +func (o *ColumnFamilyMetricsRangeLatencyByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRangeLatencyByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRangeLatencyByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRangeLatencyByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..d908a041aff --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams creates a new ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams() *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics range latency estimated histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics range latency estimated histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics range latency estimated histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics range latency estimated histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics range latency estimated histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics range latency estimated histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics range latency estimated histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics range latency estimated histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics range latency estimated histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..4fa680e25d8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGet structure. +type ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK creates a ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK() *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK { + return &ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault creates a ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault(code int) *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault { + return &ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics range latency estimated histogram by name get default response +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_recent_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_recent_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..f2790feec9e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_recent_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams creates a new ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams() *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics range latency estimated recent histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics range latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics range latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics range latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics range latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics range latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics range latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics range latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics range latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_recent_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_recent_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..5dcb37c0785 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_estimated_recent_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGet structure. +type ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK creates a ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK() *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK { + return &ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault creates a ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault(code int) *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault { + return &ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics range latency estimated recent histogram by name get default response +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_get_parameters.go new file mode 100644 index 00000000000..5bcb289314d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRangeLatencyGetParams creates a new ColumnFamilyMetricsRangeLatencyGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRangeLatencyGetParams() *ColumnFamilyMetricsRangeLatencyGetParams { + + return &ColumnFamilyMetricsRangeLatencyGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRangeLatencyGetParamsWithTimeout creates a new ColumnFamilyMetricsRangeLatencyGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRangeLatencyGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRangeLatencyGetParams { + + return &ColumnFamilyMetricsRangeLatencyGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRangeLatencyGetParamsWithContext creates a new ColumnFamilyMetricsRangeLatencyGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRangeLatencyGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRangeLatencyGetParams { + + return &ColumnFamilyMetricsRangeLatencyGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRangeLatencyGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRangeLatencyGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRangeLatencyGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRangeLatencyGetParams { + + return &ColumnFamilyMetricsRangeLatencyGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRangeLatencyGetParams contains all the parameters to send to the API endpoint +for the column family metrics range latency get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRangeLatencyGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics range latency get params +func (o *ColumnFamilyMetricsRangeLatencyGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRangeLatencyGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics range latency get params +func (o *ColumnFamilyMetricsRangeLatencyGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics range latency get params +func (o *ColumnFamilyMetricsRangeLatencyGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRangeLatencyGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics range latency get params +func (o *ColumnFamilyMetricsRangeLatencyGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics range latency get params +func (o *ColumnFamilyMetricsRangeLatencyGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRangeLatencyGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics range latency get params +func (o *ColumnFamilyMetricsRangeLatencyGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRangeLatencyGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_get_responses.go new file mode 100644 index 00000000000..e17b57dafc8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_range_latency_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRangeLatencyGetReader is a Reader for the ColumnFamilyMetricsRangeLatencyGet structure. +type ColumnFamilyMetricsRangeLatencyGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRangeLatencyGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRangeLatencyGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRangeLatencyGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRangeLatencyGetOK creates a ColumnFamilyMetricsRangeLatencyGetOK with default headers values +func NewColumnFamilyMetricsRangeLatencyGetOK() *ColumnFamilyMetricsRangeLatencyGetOK { + return &ColumnFamilyMetricsRangeLatencyGetOK{} +} + +/* +ColumnFamilyMetricsRangeLatencyGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRangeLatencyGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsRangeLatencyGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsRangeLatencyGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRangeLatencyGetDefault creates a ColumnFamilyMetricsRangeLatencyGetDefault with default headers values +func NewColumnFamilyMetricsRangeLatencyGetDefault(code int) *ColumnFamilyMetricsRangeLatencyGetDefault { + return &ColumnFamilyMetricsRangeLatencyGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRangeLatencyGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRangeLatencyGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics range latency get default response +func (o *ColumnFamilyMetricsRangeLatencyGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRangeLatencyGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRangeLatencyGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRangeLatencyGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_by_name_get_parameters.go new file mode 100644 index 00000000000..4e25c6b6232 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsReadByNameGetParams creates a new ColumnFamilyMetricsReadByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsReadByNameGetParams() *ColumnFamilyMetricsReadByNameGetParams { + var () + return &ColumnFamilyMetricsReadByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsReadByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsReadByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsReadByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadByNameGetParams { + var () + return &ColumnFamilyMetricsReadByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsReadByNameGetParamsWithContext creates a new ColumnFamilyMetricsReadByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsReadByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsReadByNameGetParams { + var () + return &ColumnFamilyMetricsReadByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsReadByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsReadByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsReadByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadByNameGetParams { + var () + return &ColumnFamilyMetricsReadByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsReadByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics read by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsReadByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics read by name get params +func (o *ColumnFamilyMetricsReadByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics read by name get params +func (o *ColumnFamilyMetricsReadByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics read by name get params +func (o *ColumnFamilyMetricsReadByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsReadByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics read by name get params +func (o *ColumnFamilyMetricsReadByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics read by name get params +func (o *ColumnFamilyMetricsReadByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics read by name get params +func (o *ColumnFamilyMetricsReadByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics read by name get params +func (o *ColumnFamilyMetricsReadByNameGetParams) WithName(name string) *ColumnFamilyMetricsReadByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics read by name get params +func (o *ColumnFamilyMetricsReadByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsReadByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_by_name_get_responses.go new file mode 100644 index 00000000000..c4bfe8a2c99 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsReadByNameGetReader is a Reader for the ColumnFamilyMetricsReadByNameGet structure. +type ColumnFamilyMetricsReadByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsReadByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsReadByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsReadByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsReadByNameGetOK creates a ColumnFamilyMetricsReadByNameGetOK with default headers values +func NewColumnFamilyMetricsReadByNameGetOK() *ColumnFamilyMetricsReadByNameGetOK { + return &ColumnFamilyMetricsReadByNameGetOK{} +} + +/* +ColumnFamilyMetricsReadByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsReadByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsReadByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsReadByNameGetDefault creates a ColumnFamilyMetricsReadByNameGetDefault with default headers values +func NewColumnFamilyMetricsReadByNameGetDefault(code int) *ColumnFamilyMetricsReadByNameGetDefault { + return &ColumnFamilyMetricsReadByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsReadByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsReadByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics read by name get default response +func (o *ColumnFamilyMetricsReadByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsReadByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsReadByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_get_parameters.go new file mode 100644 index 00000000000..1a542f72909 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsReadGetParams creates a new ColumnFamilyMetricsReadGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsReadGetParams() *ColumnFamilyMetricsReadGetParams { + + return &ColumnFamilyMetricsReadGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsReadGetParamsWithTimeout creates a new ColumnFamilyMetricsReadGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsReadGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadGetParams { + + return &ColumnFamilyMetricsReadGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsReadGetParamsWithContext creates a new ColumnFamilyMetricsReadGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsReadGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsReadGetParams { + + return &ColumnFamilyMetricsReadGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsReadGetParamsWithHTTPClient creates a new ColumnFamilyMetricsReadGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsReadGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadGetParams { + + return &ColumnFamilyMetricsReadGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsReadGetParams contains all the parameters to send to the API endpoint +for the column family metrics read get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsReadGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics read get params +func (o *ColumnFamilyMetricsReadGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics read get params +func (o *ColumnFamilyMetricsReadGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics read get params +func (o *ColumnFamilyMetricsReadGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsReadGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics read get params +func (o *ColumnFamilyMetricsReadGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics read get params +func (o *ColumnFamilyMetricsReadGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics read get params +func (o *ColumnFamilyMetricsReadGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsReadGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_get_responses.go new file mode 100644 index 00000000000..269033bb5fd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsReadGetReader is a Reader for the ColumnFamilyMetricsReadGet structure. +type ColumnFamilyMetricsReadGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsReadGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsReadGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsReadGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsReadGetOK creates a ColumnFamilyMetricsReadGetOK with default headers values +func NewColumnFamilyMetricsReadGetOK() *ColumnFamilyMetricsReadGetOK { + return &ColumnFamilyMetricsReadGetOK{} +} + +/* +ColumnFamilyMetricsReadGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsReadGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsReadGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsReadGetDefault creates a ColumnFamilyMetricsReadGetDefault with default headers values +func NewColumnFamilyMetricsReadGetDefault(code int) *ColumnFamilyMetricsReadGetDefault { + return &ColumnFamilyMetricsReadGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsReadGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsReadGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics read get default response +func (o *ColumnFamilyMetricsReadGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsReadGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsReadGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_by_name_get_parameters.go new file mode 100644 index 00000000000..a41cb8b68b9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsReadLatencyByNameGetParams creates a new ColumnFamilyMetricsReadLatencyByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsReadLatencyByNameGetParams() *ColumnFamilyMetricsReadLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsReadLatencyByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsReadLatencyByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsReadLatencyByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsReadLatencyByNameGetParamsWithContext creates a new ColumnFamilyMetricsReadLatencyByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsReadLatencyByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsReadLatencyByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsReadLatencyByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsReadLatencyByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsReadLatencyByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics read latency by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsReadLatencyByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics read latency by name get params +func (o *ColumnFamilyMetricsReadLatencyByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics read latency by name get params +func (o *ColumnFamilyMetricsReadLatencyByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics read latency by name get params +func (o *ColumnFamilyMetricsReadLatencyByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics read latency by name get params +func (o *ColumnFamilyMetricsReadLatencyByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics read latency by name get params +func (o *ColumnFamilyMetricsReadLatencyByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics read latency by name get params +func (o *ColumnFamilyMetricsReadLatencyByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics read latency by name get params +func (o *ColumnFamilyMetricsReadLatencyByNameGetParams) WithName(name string) *ColumnFamilyMetricsReadLatencyByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics read latency by name get params +func (o *ColumnFamilyMetricsReadLatencyByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsReadLatencyByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_by_name_get_responses.go new file mode 100644 index 00000000000..ed5703e546e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsReadLatencyByNameGetReader is a Reader for the ColumnFamilyMetricsReadLatencyByNameGet structure. +type ColumnFamilyMetricsReadLatencyByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsReadLatencyByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsReadLatencyByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsReadLatencyByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsReadLatencyByNameGetOK creates a ColumnFamilyMetricsReadLatencyByNameGetOK with default headers values +func NewColumnFamilyMetricsReadLatencyByNameGetOK() *ColumnFamilyMetricsReadLatencyByNameGetOK { + return &ColumnFamilyMetricsReadLatencyByNameGetOK{} +} + +/* +ColumnFamilyMetricsReadLatencyByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsReadLatencyByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsReadLatencyByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsReadLatencyByNameGetDefault creates a ColumnFamilyMetricsReadLatencyByNameGetDefault with default headers values +func NewColumnFamilyMetricsReadLatencyByNameGetDefault(code int) *ColumnFamilyMetricsReadLatencyByNameGetDefault { + return &ColumnFamilyMetricsReadLatencyByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsReadLatencyByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsReadLatencyByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics read latency by name get default response +func (o *ColumnFamilyMetricsReadLatencyByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsReadLatencyByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsReadLatencyByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..57ad249ddd7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams creates a new ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams() *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics read latency estimated histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics read latency estimated histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics read latency estimated histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics read latency estimated histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics read latency estimated histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics read latency estimated histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics read latency estimated histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics read latency estimated histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics read latency estimated histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..0b0e791fea6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGet structure. +type ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK creates a ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK() *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK { + return &ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault creates a ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault(code int) *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault { + return &ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics read latency estimated histogram by name get default response +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_recent_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_recent_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..032a1938da9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_recent_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams creates a new ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams() *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics read latency estimated recent histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics read latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics read latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics read latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics read latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics read latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics read latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics read latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics read latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_recent_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_recent_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..4fbabd461ab --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_estimated_recent_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGet structure. +type ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK creates a ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK() *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK { + return &ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault creates a ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault(code int) *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault { + return &ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics read latency estimated recent histogram by name get default response +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_get_parameters.go new file mode 100644 index 00000000000..b247af889bb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsReadLatencyGetParams creates a new ColumnFamilyMetricsReadLatencyGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsReadLatencyGetParams() *ColumnFamilyMetricsReadLatencyGetParams { + + return &ColumnFamilyMetricsReadLatencyGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsReadLatencyGetParamsWithTimeout creates a new ColumnFamilyMetricsReadLatencyGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsReadLatencyGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyGetParams { + + return &ColumnFamilyMetricsReadLatencyGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsReadLatencyGetParamsWithContext creates a new ColumnFamilyMetricsReadLatencyGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsReadLatencyGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyGetParams { + + return &ColumnFamilyMetricsReadLatencyGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsReadLatencyGetParamsWithHTTPClient creates a new ColumnFamilyMetricsReadLatencyGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsReadLatencyGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyGetParams { + + return &ColumnFamilyMetricsReadLatencyGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsReadLatencyGetParams contains all the parameters to send to the API endpoint +for the column family metrics read latency get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsReadLatencyGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics read latency get params +func (o *ColumnFamilyMetricsReadLatencyGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics read latency get params +func (o *ColumnFamilyMetricsReadLatencyGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics read latency get params +func (o *ColumnFamilyMetricsReadLatencyGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics read latency get params +func (o *ColumnFamilyMetricsReadLatencyGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics read latency get params +func (o *ColumnFamilyMetricsReadLatencyGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics read latency get params +func (o *ColumnFamilyMetricsReadLatencyGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsReadLatencyGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_get_responses.go new file mode 100644 index 00000000000..82aa36cb47f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsReadLatencyGetReader is a Reader for the ColumnFamilyMetricsReadLatencyGet structure. +type ColumnFamilyMetricsReadLatencyGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsReadLatencyGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsReadLatencyGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsReadLatencyGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsReadLatencyGetOK creates a ColumnFamilyMetricsReadLatencyGetOK with default headers values +func NewColumnFamilyMetricsReadLatencyGetOK() *ColumnFamilyMetricsReadLatencyGetOK { + return &ColumnFamilyMetricsReadLatencyGetOK{} +} + +/* +ColumnFamilyMetricsReadLatencyGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsReadLatencyGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsReadLatencyGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsReadLatencyGetDefault creates a ColumnFamilyMetricsReadLatencyGetDefault with default headers values +func NewColumnFamilyMetricsReadLatencyGetDefault(code int) *ColumnFamilyMetricsReadLatencyGetDefault { + return &ColumnFamilyMetricsReadLatencyGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsReadLatencyGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsReadLatencyGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics read latency get default response +func (o *ColumnFamilyMetricsReadLatencyGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsReadLatencyGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsReadLatencyGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..de88f7cdc2d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsReadLatencyHistogramByNameGetParams creates a new ColumnFamilyMetricsReadLatencyHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsReadLatencyHistogramByNameGetParams() *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsReadLatencyHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsReadLatencyHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsReadLatencyHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsReadLatencyHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsReadLatencyHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsReadLatencyHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsReadLatencyHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsReadLatencyHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsReadLatencyHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsReadLatencyHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics read latency histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsReadLatencyHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics read latency histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics read latency histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics read latency histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics read latency histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics read latency histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics read latency histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics read latency histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics read latency histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..815c3f90eb8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsReadLatencyHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsReadLatencyHistogramByNameGet structure. +type ColumnFamilyMetricsReadLatencyHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsReadLatencyHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsReadLatencyHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsReadLatencyHistogramByNameGetOK creates a ColumnFamilyMetricsReadLatencyHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsReadLatencyHistogramByNameGetOK() *ColumnFamilyMetricsReadLatencyHistogramByNameGetOK { + return &ColumnFamilyMetricsReadLatencyHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsReadLatencyHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsReadLatencyHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsReadLatencyHistogramByNameGetDefault creates a ColumnFamilyMetricsReadLatencyHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsReadLatencyHistogramByNameGetDefault(code int) *ColumnFamilyMetricsReadLatencyHistogramByNameGetDefault { + return &ColumnFamilyMetricsReadLatencyHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsReadLatencyHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsReadLatencyHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics read latency histogram by name get default response +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsReadLatencyHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_get_parameters.go new file mode 100644 index 00000000000..5f8cb38dfbe --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsReadLatencyHistogramGetParams creates a new ColumnFamilyMetricsReadLatencyHistogramGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsReadLatencyHistogramGetParams() *ColumnFamilyMetricsReadLatencyHistogramGetParams { + + return &ColumnFamilyMetricsReadLatencyHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsReadLatencyHistogramGetParamsWithTimeout creates a new ColumnFamilyMetricsReadLatencyHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsReadLatencyHistogramGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyHistogramGetParams { + + return &ColumnFamilyMetricsReadLatencyHistogramGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsReadLatencyHistogramGetParamsWithContext creates a new ColumnFamilyMetricsReadLatencyHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsReadLatencyHistogramGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyHistogramGetParams { + + return &ColumnFamilyMetricsReadLatencyHistogramGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsReadLatencyHistogramGetParamsWithHTTPClient creates a new ColumnFamilyMetricsReadLatencyHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsReadLatencyHistogramGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyHistogramGetParams { + + return &ColumnFamilyMetricsReadLatencyHistogramGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsReadLatencyHistogramGetParams contains all the parameters to send to the API endpoint +for the column family metrics read latency histogram get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsReadLatencyHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics read latency histogram get params +func (o *ColumnFamilyMetricsReadLatencyHistogramGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics read latency histogram get params +func (o *ColumnFamilyMetricsReadLatencyHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics read latency histogram get params +func (o *ColumnFamilyMetricsReadLatencyHistogramGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics read latency histogram get params +func (o *ColumnFamilyMetricsReadLatencyHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics read latency histogram get params +func (o *ColumnFamilyMetricsReadLatencyHistogramGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics read latency histogram get params +func (o *ColumnFamilyMetricsReadLatencyHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsReadLatencyHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_get_responses.go new file mode 100644 index 00000000000..8c7aa1b94c5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_histogram_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsReadLatencyHistogramGetReader is a Reader for the ColumnFamilyMetricsReadLatencyHistogramGet structure. +type ColumnFamilyMetricsReadLatencyHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsReadLatencyHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsReadLatencyHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsReadLatencyHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsReadLatencyHistogramGetOK creates a ColumnFamilyMetricsReadLatencyHistogramGetOK with default headers values +func NewColumnFamilyMetricsReadLatencyHistogramGetOK() *ColumnFamilyMetricsReadLatencyHistogramGetOK { + return &ColumnFamilyMetricsReadLatencyHistogramGetOK{} +} + +/* +ColumnFamilyMetricsReadLatencyHistogramGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsReadLatencyHistogramGetOK struct { + Payload []*models.Histogram +} + +func (o *ColumnFamilyMetricsReadLatencyHistogramGetOK) GetPayload() []*models.Histogram { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsReadLatencyHistogramGetDefault creates a ColumnFamilyMetricsReadLatencyHistogramGetDefault with default headers values +func NewColumnFamilyMetricsReadLatencyHistogramGetDefault(code int) *ColumnFamilyMetricsReadLatencyHistogramGetDefault { + return &ColumnFamilyMetricsReadLatencyHistogramGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsReadLatencyHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsReadLatencyHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics read latency histogram get default response +func (o *ColumnFamilyMetricsReadLatencyHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsReadLatencyHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsReadLatencyHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..a9090f1a98c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams creates a new ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams() *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics read latency moving average histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics read latency moving average histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics read latency moving average histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics read latency moving average histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics read latency moving average histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics read latency moving average histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics read latency moving average histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics read latency moving average histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics read latency moving average histogram by name get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..645a9b896ff --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGet structure. +type ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK creates a ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK() *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK { + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault creates a ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault(code int) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault { + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics read latency moving average histogram by name get default response +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_get_parameters.go new file mode 100644 index 00000000000..611e30134b6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams creates a new ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams() *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams { + + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParamsWithTimeout creates a new ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams { + + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParamsWithContext creates a new ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams { + + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParamsWithHTTPClient creates a new ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams { + + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams contains all the parameters to send to the API endpoint +for the column family metrics read latency moving average histogram get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics read latency moving average histogram get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics read latency moving average histogram get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics read latency moving average histogram get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics read latency moving average histogram get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics read latency moving average histogram get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics read latency moving average histogram get params +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_get_responses.go new file mode 100644 index 00000000000..7a1062fa43a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_read_latency_moving_average_histogram_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetReader is a Reader for the ColumnFamilyMetricsReadLatencyMovingAverageHistogramGet structure. +type ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK creates a ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK with default headers values +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK() *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK { + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK{} +} + +/* +ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK struct { + Payload []*models.RateMovingAverageAndHistogram +} + +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK) GetPayload() []*models.RateMovingAverageAndHistogram { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault creates a ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault with default headers values +func NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault(code int) *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault { + return &ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics read latency moving average histogram get default response +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_by_name_get_parameters.go new file mode 100644 index 00000000000..0c3d32eba76 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams creates a new ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams() *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams { + var () + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams { + var () + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParamsWithContext creates a new ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams { + var () + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams { + var () + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics recent bloom filter false positives by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics recent bloom filter false positives by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics recent bloom filter false positives by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics recent bloom filter false positives by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics recent bloom filter false positives by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics recent bloom filter false positives by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics recent bloom filter false positives by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics recent bloom filter false positives by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) WithName(name string) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics recent bloom filter false positives by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_by_name_get_responses.go new file mode 100644 index 00000000000..1fdae9bad41 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetReader is a Reader for the ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGet structure. +type ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK creates a ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK with default headers values +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK() *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK { + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK{} +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault creates a ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault with default headers values +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault(code int) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault { + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics recent bloom filter false positives by name get default response +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_get_parameters.go new file mode 100644 index 00000000000..f9b63b0ddcc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams creates a new ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams() *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams { + + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParamsWithTimeout creates a new ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams { + + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParamsWithContext creates a new ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams { + + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams { + + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams contains all the parameters to send to the API endpoint +for the column family metrics recent bloom filter false positives get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics recent bloom filter false positives get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics recent bloom filter false positives get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics recent bloom filter false positives get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics recent bloom filter false positives get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics recent bloom filter false positives get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics recent bloom filter false positives get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_get_responses.go new file mode 100644 index 00000000000..6005248b1db --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_positives_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetReader is a Reader for the ColumnFamilyMetricsRecentBloomFilterFalsePositivesGet structure. +type ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK creates a ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK with default headers values +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK() *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK { + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK{} +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault creates a ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault with default headers values +func NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault(code int) *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault { + return &ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics recent bloom filter false positives get default response +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_by_name_get_parameters.go new file mode 100644 index 00000000000..9762e1daafc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams creates a new ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams() *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams { + var () + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams { + var () + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParamsWithContext creates a new ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams { + var () + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams { + var () + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics recent bloom filter false ratio by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics recent bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics recent bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics recent bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics recent bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics recent bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics recent bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics recent bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) WithName(name string) *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics recent bloom filter false ratio by name get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_by_name_get_responses.go new file mode 100644 index 00000000000..b84165a5604 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetReader is a Reader for the ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGet structure. +type ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK creates a ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK with default headers values +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK() *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK { + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK{} +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault creates a ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault with default headers values +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault(code int) *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault { + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics recent bloom filter false ratio by name get default response +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_get_parameters.go new file mode 100644 index 00000000000..3410082b5f8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams creates a new ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams() *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams { + + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetParamsWithTimeout creates a new ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams { + + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetParamsWithContext creates a new ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams { + + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams { + + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams contains all the parameters to send to the API endpoint +for the column family metrics recent bloom filter false ratio get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics recent bloom filter false ratio get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics recent bloom filter false ratio get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics recent bloom filter false ratio get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics recent bloom filter false ratio get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics recent bloom filter false ratio get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics recent bloom filter false ratio get params +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_get_responses.go new file mode 100644 index 00000000000..7d0faf4bc5e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_recent_bloom_filter_false_ratio_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRecentBloomFilterFalseRatioGetReader is a Reader for the ColumnFamilyMetricsRecentBloomFilterFalseRatioGet structure. +type ColumnFamilyMetricsRecentBloomFilterFalseRatioGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK creates a ColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK with default headers values +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK() *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK { + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK{} +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault creates a ColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault with default headers values +func NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault(code int) *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault { + return &ColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics recent bloom filter false ratio get default response +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_by_name_get_parameters.go new file mode 100644 index 00000000000..852178cc78a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRowCacheHitByNameGetParams creates a new ColumnFamilyMetricsRowCacheHitByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRowCacheHitByNameGetParams() *ColumnFamilyMetricsRowCacheHitByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheHitByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRowCacheHitByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsRowCacheHitByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRowCacheHitByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheHitByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheHitByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRowCacheHitByNameGetParamsWithContext creates a new ColumnFamilyMetricsRowCacheHitByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRowCacheHitByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheHitByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheHitByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRowCacheHitByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRowCacheHitByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRowCacheHitByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheHitByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheHitByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRowCacheHitByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics row cache hit by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRowCacheHitByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics row cache hit by name get params +func (o *ColumnFamilyMetricsRowCacheHitByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheHitByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics row cache hit by name get params +func (o *ColumnFamilyMetricsRowCacheHitByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics row cache hit by name get params +func (o *ColumnFamilyMetricsRowCacheHitByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheHitByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics row cache hit by name get params +func (o *ColumnFamilyMetricsRowCacheHitByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics row cache hit by name get params +func (o *ColumnFamilyMetricsRowCacheHitByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheHitByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics row cache hit by name get params +func (o *ColumnFamilyMetricsRowCacheHitByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics row cache hit by name get params +func (o *ColumnFamilyMetricsRowCacheHitByNameGetParams) WithName(name string) *ColumnFamilyMetricsRowCacheHitByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics row cache hit by name get params +func (o *ColumnFamilyMetricsRowCacheHitByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRowCacheHitByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_by_name_get_responses.go new file mode 100644 index 00000000000..aee068b9da2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRowCacheHitByNameGetReader is a Reader for the ColumnFamilyMetricsRowCacheHitByNameGet structure. +type ColumnFamilyMetricsRowCacheHitByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRowCacheHitByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRowCacheHitByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRowCacheHitByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRowCacheHitByNameGetOK creates a ColumnFamilyMetricsRowCacheHitByNameGetOK with default headers values +func NewColumnFamilyMetricsRowCacheHitByNameGetOK() *ColumnFamilyMetricsRowCacheHitByNameGetOK { + return &ColumnFamilyMetricsRowCacheHitByNameGetOK{} +} + +/* +ColumnFamilyMetricsRowCacheHitByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRowCacheHitByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsRowCacheHitByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheHitByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRowCacheHitByNameGetDefault creates a ColumnFamilyMetricsRowCacheHitByNameGetDefault with default headers values +func NewColumnFamilyMetricsRowCacheHitByNameGetDefault(code int) *ColumnFamilyMetricsRowCacheHitByNameGetDefault { + return &ColumnFamilyMetricsRowCacheHitByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRowCacheHitByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRowCacheHitByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics row cache hit by name get default response +func (o *ColumnFamilyMetricsRowCacheHitByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRowCacheHitByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheHitByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRowCacheHitByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_get_parameters.go new file mode 100644 index 00000000000..d1287d9a315 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRowCacheHitGetParams creates a new ColumnFamilyMetricsRowCacheHitGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRowCacheHitGetParams() *ColumnFamilyMetricsRowCacheHitGetParams { + + return &ColumnFamilyMetricsRowCacheHitGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRowCacheHitGetParamsWithTimeout creates a new ColumnFamilyMetricsRowCacheHitGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRowCacheHitGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheHitGetParams { + + return &ColumnFamilyMetricsRowCacheHitGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRowCacheHitGetParamsWithContext creates a new ColumnFamilyMetricsRowCacheHitGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRowCacheHitGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheHitGetParams { + + return &ColumnFamilyMetricsRowCacheHitGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRowCacheHitGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRowCacheHitGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRowCacheHitGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheHitGetParams { + + return &ColumnFamilyMetricsRowCacheHitGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRowCacheHitGetParams contains all the parameters to send to the API endpoint +for the column family metrics row cache hit get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRowCacheHitGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics row cache hit get params +func (o *ColumnFamilyMetricsRowCacheHitGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheHitGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics row cache hit get params +func (o *ColumnFamilyMetricsRowCacheHitGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics row cache hit get params +func (o *ColumnFamilyMetricsRowCacheHitGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheHitGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics row cache hit get params +func (o *ColumnFamilyMetricsRowCacheHitGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics row cache hit get params +func (o *ColumnFamilyMetricsRowCacheHitGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheHitGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics row cache hit get params +func (o *ColumnFamilyMetricsRowCacheHitGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRowCacheHitGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_get_responses.go new file mode 100644 index 00000000000..edf71b23995 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRowCacheHitGetReader is a Reader for the ColumnFamilyMetricsRowCacheHitGet structure. +type ColumnFamilyMetricsRowCacheHitGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRowCacheHitGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRowCacheHitGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRowCacheHitGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRowCacheHitGetOK creates a ColumnFamilyMetricsRowCacheHitGetOK with default headers values +func NewColumnFamilyMetricsRowCacheHitGetOK() *ColumnFamilyMetricsRowCacheHitGetOK { + return &ColumnFamilyMetricsRowCacheHitGetOK{} +} + +/* +ColumnFamilyMetricsRowCacheHitGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRowCacheHitGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsRowCacheHitGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheHitGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRowCacheHitGetDefault creates a ColumnFamilyMetricsRowCacheHitGetDefault with default headers values +func NewColumnFamilyMetricsRowCacheHitGetDefault(code int) *ColumnFamilyMetricsRowCacheHitGetDefault { + return &ColumnFamilyMetricsRowCacheHitGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRowCacheHitGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRowCacheHitGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics row cache hit get default response +func (o *ColumnFamilyMetricsRowCacheHitGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRowCacheHitGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheHitGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRowCacheHitGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_by_name_get_parameters.go new file mode 100644 index 00000000000..66bff9e5d1f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams creates a new ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams() *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParamsWithContext creates a new ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics row cache hit out of range by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics row cache hit out of range by name get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics row cache hit out of range by name get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics row cache hit out of range by name get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics row cache hit out of range by name get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics row cache hit out of range by name get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics row cache hit out of range by name get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics row cache hit out of range by name get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) WithName(name string) *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics row cache hit out of range by name get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_by_name_get_responses.go new file mode 100644 index 00000000000..3eb8d5b96b0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetReader is a Reader for the ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGet structure. +type ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK creates a ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK with default headers values +func NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK() *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK { + return &ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK{} +} + +/* +ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault creates a ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault with default headers values +func NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault(code int) *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault { + return &ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics row cache hit out of range by name get default response +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_get_parameters.go new file mode 100644 index 00000000000..f0d4c6517d2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeGetParams creates a new ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRowCacheHitOutOfRangeGetParams() *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams { + + return &ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeGetParamsWithTimeout creates a new ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRowCacheHitOutOfRangeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams { + + return &ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeGetParamsWithContext creates a new ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRowCacheHitOutOfRangeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams { + + return &ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRowCacheHitOutOfRangeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams { + + return &ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams contains all the parameters to send to the API endpoint +for the column family metrics row cache hit out of range get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics row cache hit out of range get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics row cache hit out of range get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics row cache hit out of range get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics row cache hit out of range get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics row cache hit out of range get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics row cache hit out of range get params +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_get_responses.go new file mode 100644 index 00000000000..a5ae7ed79cd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_hit_out_of_range_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRowCacheHitOutOfRangeGetReader is a Reader for the ColumnFamilyMetricsRowCacheHitOutOfRangeGet structure. +type ColumnFamilyMetricsRowCacheHitOutOfRangeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRowCacheHitOutOfRangeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeGetOK creates a ColumnFamilyMetricsRowCacheHitOutOfRangeGetOK with default headers values +func NewColumnFamilyMetricsRowCacheHitOutOfRangeGetOK() *ColumnFamilyMetricsRowCacheHitOutOfRangeGetOK { + return &ColumnFamilyMetricsRowCacheHitOutOfRangeGetOK{} +} + +/* +ColumnFamilyMetricsRowCacheHitOutOfRangeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRowCacheHitOutOfRangeGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault creates a ColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault with default headers values +func NewColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault(code int) *ColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault { + return &ColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics row cache hit out of range get default response +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_by_name_get_parameters.go new file mode 100644 index 00000000000..122c2dd8482 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRowCacheMissByNameGetParams creates a new ColumnFamilyMetricsRowCacheMissByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRowCacheMissByNameGetParams() *ColumnFamilyMetricsRowCacheMissByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheMissByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRowCacheMissByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsRowCacheMissByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRowCacheMissByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheMissByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheMissByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRowCacheMissByNameGetParamsWithContext creates a new ColumnFamilyMetricsRowCacheMissByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRowCacheMissByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheMissByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheMissByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRowCacheMissByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRowCacheMissByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRowCacheMissByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheMissByNameGetParams { + var () + return &ColumnFamilyMetricsRowCacheMissByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRowCacheMissByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics row cache miss by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRowCacheMissByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics row cache miss by name get params +func (o *ColumnFamilyMetricsRowCacheMissByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheMissByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics row cache miss by name get params +func (o *ColumnFamilyMetricsRowCacheMissByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics row cache miss by name get params +func (o *ColumnFamilyMetricsRowCacheMissByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheMissByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics row cache miss by name get params +func (o *ColumnFamilyMetricsRowCacheMissByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics row cache miss by name get params +func (o *ColumnFamilyMetricsRowCacheMissByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheMissByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics row cache miss by name get params +func (o *ColumnFamilyMetricsRowCacheMissByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics row cache miss by name get params +func (o *ColumnFamilyMetricsRowCacheMissByNameGetParams) WithName(name string) *ColumnFamilyMetricsRowCacheMissByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics row cache miss by name get params +func (o *ColumnFamilyMetricsRowCacheMissByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRowCacheMissByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_by_name_get_responses.go new file mode 100644 index 00000000000..cc192fd2d0f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRowCacheMissByNameGetReader is a Reader for the ColumnFamilyMetricsRowCacheMissByNameGet structure. +type ColumnFamilyMetricsRowCacheMissByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRowCacheMissByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRowCacheMissByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRowCacheMissByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRowCacheMissByNameGetOK creates a ColumnFamilyMetricsRowCacheMissByNameGetOK with default headers values +func NewColumnFamilyMetricsRowCacheMissByNameGetOK() *ColumnFamilyMetricsRowCacheMissByNameGetOK { + return &ColumnFamilyMetricsRowCacheMissByNameGetOK{} +} + +/* +ColumnFamilyMetricsRowCacheMissByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRowCacheMissByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsRowCacheMissByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheMissByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRowCacheMissByNameGetDefault creates a ColumnFamilyMetricsRowCacheMissByNameGetDefault with default headers values +func NewColumnFamilyMetricsRowCacheMissByNameGetDefault(code int) *ColumnFamilyMetricsRowCacheMissByNameGetDefault { + return &ColumnFamilyMetricsRowCacheMissByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRowCacheMissByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRowCacheMissByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics row cache miss by name get default response +func (o *ColumnFamilyMetricsRowCacheMissByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRowCacheMissByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheMissByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRowCacheMissByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_get_parameters.go new file mode 100644 index 00000000000..44f3e792627 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsRowCacheMissGetParams creates a new ColumnFamilyMetricsRowCacheMissGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsRowCacheMissGetParams() *ColumnFamilyMetricsRowCacheMissGetParams { + + return &ColumnFamilyMetricsRowCacheMissGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsRowCacheMissGetParamsWithTimeout creates a new ColumnFamilyMetricsRowCacheMissGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsRowCacheMissGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheMissGetParams { + + return &ColumnFamilyMetricsRowCacheMissGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsRowCacheMissGetParamsWithContext creates a new ColumnFamilyMetricsRowCacheMissGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsRowCacheMissGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheMissGetParams { + + return &ColumnFamilyMetricsRowCacheMissGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsRowCacheMissGetParamsWithHTTPClient creates a new ColumnFamilyMetricsRowCacheMissGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsRowCacheMissGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheMissGetParams { + + return &ColumnFamilyMetricsRowCacheMissGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsRowCacheMissGetParams contains all the parameters to send to the API endpoint +for the column family metrics row cache miss get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsRowCacheMissGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics row cache miss get params +func (o *ColumnFamilyMetricsRowCacheMissGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsRowCacheMissGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics row cache miss get params +func (o *ColumnFamilyMetricsRowCacheMissGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics row cache miss get params +func (o *ColumnFamilyMetricsRowCacheMissGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsRowCacheMissGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics row cache miss get params +func (o *ColumnFamilyMetricsRowCacheMissGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics row cache miss get params +func (o *ColumnFamilyMetricsRowCacheMissGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsRowCacheMissGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics row cache miss get params +func (o *ColumnFamilyMetricsRowCacheMissGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsRowCacheMissGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_get_responses.go new file mode 100644 index 00000000000..caa9d6048d4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_row_cache_miss_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsRowCacheMissGetReader is a Reader for the ColumnFamilyMetricsRowCacheMissGet structure. +type ColumnFamilyMetricsRowCacheMissGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsRowCacheMissGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsRowCacheMissGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsRowCacheMissGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsRowCacheMissGetOK creates a ColumnFamilyMetricsRowCacheMissGetOK with default headers values +func NewColumnFamilyMetricsRowCacheMissGetOK() *ColumnFamilyMetricsRowCacheMissGetOK { + return &ColumnFamilyMetricsRowCacheMissGetOK{} +} + +/* +ColumnFamilyMetricsRowCacheMissGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsRowCacheMissGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsRowCacheMissGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheMissGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsRowCacheMissGetDefault creates a ColumnFamilyMetricsRowCacheMissGetDefault with default headers values +func NewColumnFamilyMetricsRowCacheMissGetDefault(code int) *ColumnFamilyMetricsRowCacheMissGetDefault { + return &ColumnFamilyMetricsRowCacheMissGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsRowCacheMissGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsRowCacheMissGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics row cache miss get default response +func (o *ColumnFamilyMetricsRowCacheMissGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsRowCacheMissGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsRowCacheMissGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsRowCacheMissGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_snapshots_size_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_snapshots_size_by_name_get_parameters.go new file mode 100644 index 00000000000..9c0f7d1f631 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_snapshots_size_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsSnapshotsSizeByNameGetParams creates a new ColumnFamilyMetricsSnapshotsSizeByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsSnapshotsSizeByNameGetParams() *ColumnFamilyMetricsSnapshotsSizeByNameGetParams { + var () + return &ColumnFamilyMetricsSnapshotsSizeByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsSnapshotsSizeByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsSnapshotsSizeByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsSnapshotsSizeByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsSnapshotsSizeByNameGetParams { + var () + return &ColumnFamilyMetricsSnapshotsSizeByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsSnapshotsSizeByNameGetParamsWithContext creates a new ColumnFamilyMetricsSnapshotsSizeByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsSnapshotsSizeByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsSnapshotsSizeByNameGetParams { + var () + return &ColumnFamilyMetricsSnapshotsSizeByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsSnapshotsSizeByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsSnapshotsSizeByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsSnapshotsSizeByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsSnapshotsSizeByNameGetParams { + var () + return &ColumnFamilyMetricsSnapshotsSizeByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsSnapshotsSizeByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics snapshots size by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsSnapshotsSizeByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics snapshots size by name get params +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsSnapshotsSizeByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics snapshots size by name get params +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics snapshots size by name get params +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsSnapshotsSizeByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics snapshots size by name get params +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics snapshots size by name get params +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsSnapshotsSizeByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics snapshots size by name get params +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics snapshots size by name get params +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) WithName(name string) *ColumnFamilyMetricsSnapshotsSizeByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics snapshots size by name get params +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_snapshots_size_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_snapshots_size_by_name_get_responses.go new file mode 100644 index 00000000000..296cd92284b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_snapshots_size_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsSnapshotsSizeByNameGetReader is a Reader for the ColumnFamilyMetricsSnapshotsSizeByNameGet structure. +type ColumnFamilyMetricsSnapshotsSizeByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsSnapshotsSizeByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsSnapshotsSizeByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsSnapshotsSizeByNameGetOK creates a ColumnFamilyMetricsSnapshotsSizeByNameGetOK with default headers values +func NewColumnFamilyMetricsSnapshotsSizeByNameGetOK() *ColumnFamilyMetricsSnapshotsSizeByNameGetOK { + return &ColumnFamilyMetricsSnapshotsSizeByNameGetOK{} +} + +/* +ColumnFamilyMetricsSnapshotsSizeByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsSnapshotsSizeByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsSnapshotsSizeByNameGetDefault creates a ColumnFamilyMetricsSnapshotsSizeByNameGetDefault with default headers values +func NewColumnFamilyMetricsSnapshotsSizeByNameGetDefault(code int) *ColumnFamilyMetricsSnapshotsSizeByNameGetDefault { + return &ColumnFamilyMetricsSnapshotsSizeByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsSnapshotsSizeByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsSnapshotsSizeByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics snapshots size by name get default response +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsSnapshotsSizeByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_by_name_get_parameters.go new file mode 100644 index 00000000000..a001c08fa52 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsSpeculativeRetriesByNameGetParams creates a new ColumnFamilyMetricsSpeculativeRetriesByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsSpeculativeRetriesByNameGetParams() *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams { + var () + return &ColumnFamilyMetricsSpeculativeRetriesByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsSpeculativeRetriesByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsSpeculativeRetriesByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsSpeculativeRetriesByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams { + var () + return &ColumnFamilyMetricsSpeculativeRetriesByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsSpeculativeRetriesByNameGetParamsWithContext creates a new ColumnFamilyMetricsSpeculativeRetriesByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsSpeculativeRetriesByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams { + var () + return &ColumnFamilyMetricsSpeculativeRetriesByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsSpeculativeRetriesByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsSpeculativeRetriesByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsSpeculativeRetriesByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams { + var () + return &ColumnFamilyMetricsSpeculativeRetriesByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsSpeculativeRetriesByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics speculative retries by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsSpeculativeRetriesByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics speculative retries by name get params +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics speculative retries by name get params +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics speculative retries by name get params +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics speculative retries by name get params +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics speculative retries by name get params +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics speculative retries by name get params +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics speculative retries by name get params +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) WithName(name string) *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics speculative retries by name get params +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_by_name_get_responses.go new file mode 100644 index 00000000000..9e6c8f56c2d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsSpeculativeRetriesByNameGetReader is a Reader for the ColumnFamilyMetricsSpeculativeRetriesByNameGet structure. +type ColumnFamilyMetricsSpeculativeRetriesByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsSpeculativeRetriesByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsSpeculativeRetriesByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsSpeculativeRetriesByNameGetOK creates a ColumnFamilyMetricsSpeculativeRetriesByNameGetOK with default headers values +func NewColumnFamilyMetricsSpeculativeRetriesByNameGetOK() *ColumnFamilyMetricsSpeculativeRetriesByNameGetOK { + return &ColumnFamilyMetricsSpeculativeRetriesByNameGetOK{} +} + +/* +ColumnFamilyMetricsSpeculativeRetriesByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsSpeculativeRetriesByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsSpeculativeRetriesByNameGetDefault creates a ColumnFamilyMetricsSpeculativeRetriesByNameGetDefault with default headers values +func NewColumnFamilyMetricsSpeculativeRetriesByNameGetDefault(code int) *ColumnFamilyMetricsSpeculativeRetriesByNameGetDefault { + return &ColumnFamilyMetricsSpeculativeRetriesByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsSpeculativeRetriesByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsSpeculativeRetriesByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics speculative retries by name get default response +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsSpeculativeRetriesByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_get_parameters.go new file mode 100644 index 00000000000..73e7f276eab --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsSpeculativeRetriesGetParams creates a new ColumnFamilyMetricsSpeculativeRetriesGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsSpeculativeRetriesGetParams() *ColumnFamilyMetricsSpeculativeRetriesGetParams { + + return &ColumnFamilyMetricsSpeculativeRetriesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsSpeculativeRetriesGetParamsWithTimeout creates a new ColumnFamilyMetricsSpeculativeRetriesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsSpeculativeRetriesGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsSpeculativeRetriesGetParams { + + return &ColumnFamilyMetricsSpeculativeRetriesGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsSpeculativeRetriesGetParamsWithContext creates a new ColumnFamilyMetricsSpeculativeRetriesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsSpeculativeRetriesGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsSpeculativeRetriesGetParams { + + return &ColumnFamilyMetricsSpeculativeRetriesGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsSpeculativeRetriesGetParamsWithHTTPClient creates a new ColumnFamilyMetricsSpeculativeRetriesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsSpeculativeRetriesGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsSpeculativeRetriesGetParams { + + return &ColumnFamilyMetricsSpeculativeRetriesGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsSpeculativeRetriesGetParams contains all the parameters to send to the API endpoint +for the column family metrics speculative retries get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsSpeculativeRetriesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics speculative retries get params +func (o *ColumnFamilyMetricsSpeculativeRetriesGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsSpeculativeRetriesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics speculative retries get params +func (o *ColumnFamilyMetricsSpeculativeRetriesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics speculative retries get params +func (o *ColumnFamilyMetricsSpeculativeRetriesGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsSpeculativeRetriesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics speculative retries get params +func (o *ColumnFamilyMetricsSpeculativeRetriesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics speculative retries get params +func (o *ColumnFamilyMetricsSpeculativeRetriesGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsSpeculativeRetriesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics speculative retries get params +func (o *ColumnFamilyMetricsSpeculativeRetriesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsSpeculativeRetriesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_get_responses.go new file mode 100644 index 00000000000..70ffa8690bc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_speculative_retries_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsSpeculativeRetriesGetReader is a Reader for the ColumnFamilyMetricsSpeculativeRetriesGet structure. +type ColumnFamilyMetricsSpeculativeRetriesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsSpeculativeRetriesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsSpeculativeRetriesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsSpeculativeRetriesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsSpeculativeRetriesGetOK creates a ColumnFamilyMetricsSpeculativeRetriesGetOK with default headers values +func NewColumnFamilyMetricsSpeculativeRetriesGetOK() *ColumnFamilyMetricsSpeculativeRetriesGetOK { + return &ColumnFamilyMetricsSpeculativeRetriesGetOK{} +} + +/* +ColumnFamilyMetricsSpeculativeRetriesGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsSpeculativeRetriesGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsSpeculativeRetriesGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsSpeculativeRetriesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsSpeculativeRetriesGetDefault creates a ColumnFamilyMetricsSpeculativeRetriesGetDefault with default headers values +func NewColumnFamilyMetricsSpeculativeRetriesGetDefault(code int) *ColumnFamilyMetricsSpeculativeRetriesGetDefault { + return &ColumnFamilyMetricsSpeculativeRetriesGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsSpeculativeRetriesGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsSpeculativeRetriesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics speculative retries get default response +func (o *ColumnFamilyMetricsSpeculativeRetriesGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsSpeculativeRetriesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsSpeculativeRetriesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsSpeculativeRetriesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_sstables_per_read_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_sstables_per_read_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..6f2758b37e4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_sstables_per_read_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams creates a new ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams() *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics sstables per read histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics sstables per read histogram by name get params +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics sstables per read histogram by name get params +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics sstables per read histogram by name get params +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics sstables per read histogram by name get params +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics sstables per read histogram by name get params +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics sstables per read histogram by name get params +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics sstables per read histogram by name get params +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics sstables per read histogram by name get params +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_sstables_per_read_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_sstables_per_read_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..8838d481691 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_sstables_per_read_histogram_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsSstablesPerReadHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsSstablesPerReadHistogramByNameGet structure. +type ColumnFamilyMetricsSstablesPerReadHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK creates a ColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK() *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK { + return &ColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault creates a ColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault(code int) *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault { + return &ColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics sstables per read histogram by name get default response +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_tombstone_scanned_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_tombstone_scanned_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..3a257271d61 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_tombstone_scanned_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams creates a new ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams() *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics tombstone scanned histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics tombstone scanned histogram by name get params +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics tombstone scanned histogram by name get params +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics tombstone scanned histogram by name get params +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics tombstone scanned histogram by name get params +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics tombstone scanned histogram by name get params +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics tombstone scanned histogram by name get params +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics tombstone scanned histogram by name get params +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics tombstone scanned histogram by name get params +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_tombstone_scanned_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_tombstone_scanned_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..9bd72f9fa5a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_tombstone_scanned_histogram_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsTombstoneScannedHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsTombstoneScannedHistogramByNameGet structure. +type ColumnFamilyMetricsTombstoneScannedHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK creates a ColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK() *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK { + return &ColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault creates a ColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault(code int) *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault { + return &ColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics tombstone scanned histogram by name get default response +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_by_name_get_parameters.go new file mode 100644 index 00000000000..7ac6906d6d2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams creates a new ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams() *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParamsWithContext creates a new ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams { + var () + return &ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics total disk space used by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics total disk space used by name get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics total disk space used by name get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics total disk space used by name get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics total disk space used by name get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics total disk space used by name get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics total disk space used by name get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics total disk space used by name get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WithName(name string) *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics total disk space used by name get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_by_name_get_responses.go new file mode 100644 index 00000000000..a01ea0dc859 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetReader is a Reader for the ColumnFamilyMetricsTotalDiskSpaceUsedByNameGet structure. +type ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK creates a ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK with default headers values +func NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK() *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK { + return &ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK{} +} + +/* +ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK struct { + Payload int64 +} + +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK) GetPayload() int64 { + return o.Payload +} + +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault creates a ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault with default headers values +func NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault(code int) *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault { + return &ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics total disk space used by name get default response +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_get_parameters.go new file mode 100644 index 00000000000..2eb161d6709 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsTotalDiskSpaceUsedGetParams creates a new ColumnFamilyMetricsTotalDiskSpaceUsedGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsTotalDiskSpaceUsedGetParams() *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsTotalDiskSpaceUsedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsTotalDiskSpaceUsedGetParamsWithTimeout creates a new ColumnFamilyMetricsTotalDiskSpaceUsedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsTotalDiskSpaceUsedGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsTotalDiskSpaceUsedGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsTotalDiskSpaceUsedGetParamsWithContext creates a new ColumnFamilyMetricsTotalDiskSpaceUsedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsTotalDiskSpaceUsedGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsTotalDiskSpaceUsedGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsTotalDiskSpaceUsedGetParamsWithHTTPClient creates a new ColumnFamilyMetricsTotalDiskSpaceUsedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsTotalDiskSpaceUsedGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams { + + return &ColumnFamilyMetricsTotalDiskSpaceUsedGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsTotalDiskSpaceUsedGetParams contains all the parameters to send to the API endpoint +for the column family metrics total disk space used get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsTotalDiskSpaceUsedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics total disk space used get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics total disk space used get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics total disk space used get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics total disk space used get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics total disk space used get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics total disk space used get params +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_get_responses.go new file mode 100644 index 00000000000..8929f10e442 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_total_disk_space_used_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsTotalDiskSpaceUsedGetReader is a Reader for the ColumnFamilyMetricsTotalDiskSpaceUsedGet structure. +type ColumnFamilyMetricsTotalDiskSpaceUsedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsTotalDiskSpaceUsedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsTotalDiskSpaceUsedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsTotalDiskSpaceUsedGetOK creates a ColumnFamilyMetricsTotalDiskSpaceUsedGetOK with default headers values +func NewColumnFamilyMetricsTotalDiskSpaceUsedGetOK() *ColumnFamilyMetricsTotalDiskSpaceUsedGetOK { + return &ColumnFamilyMetricsTotalDiskSpaceUsedGetOK{} +} + +/* +ColumnFamilyMetricsTotalDiskSpaceUsedGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsTotalDiskSpaceUsedGetOK struct { + Payload int64 +} + +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetOK) GetPayload() int64 { + return o.Payload +} + +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsTotalDiskSpaceUsedGetDefault creates a ColumnFamilyMetricsTotalDiskSpaceUsedGetDefault with default headers values +func NewColumnFamilyMetricsTotalDiskSpaceUsedGetDefault(code int) *ColumnFamilyMetricsTotalDiskSpaceUsedGetDefault { + return &ColumnFamilyMetricsTotalDiskSpaceUsedGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsTotalDiskSpaceUsedGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsTotalDiskSpaceUsedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics total disk space used get default response +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsTotalDiskSpaceUsedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_true_snapshots_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_true_snapshots_size_get_parameters.go new file mode 100644 index 00000000000..7e9bd8a8542 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_true_snapshots_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsTrueSnapshotsSizeGetParams creates a new ColumnFamilyMetricsTrueSnapshotsSizeGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsTrueSnapshotsSizeGetParams() *ColumnFamilyMetricsTrueSnapshotsSizeGetParams { + + return &ColumnFamilyMetricsTrueSnapshotsSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsTrueSnapshotsSizeGetParamsWithTimeout creates a new ColumnFamilyMetricsTrueSnapshotsSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsTrueSnapshotsSizeGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsTrueSnapshotsSizeGetParams { + + return &ColumnFamilyMetricsTrueSnapshotsSizeGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsTrueSnapshotsSizeGetParamsWithContext creates a new ColumnFamilyMetricsTrueSnapshotsSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsTrueSnapshotsSizeGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsTrueSnapshotsSizeGetParams { + + return &ColumnFamilyMetricsTrueSnapshotsSizeGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsTrueSnapshotsSizeGetParamsWithHTTPClient creates a new ColumnFamilyMetricsTrueSnapshotsSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsTrueSnapshotsSizeGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsTrueSnapshotsSizeGetParams { + + return &ColumnFamilyMetricsTrueSnapshotsSizeGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsTrueSnapshotsSizeGetParams contains all the parameters to send to the API endpoint +for the column family metrics true snapshots size get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsTrueSnapshotsSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics true snapshots size get params +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsTrueSnapshotsSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics true snapshots size get params +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics true snapshots size get params +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsTrueSnapshotsSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics true snapshots size get params +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics true snapshots size get params +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsTrueSnapshotsSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics true snapshots size get params +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_true_snapshots_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_true_snapshots_size_get_responses.go new file mode 100644 index 00000000000..76fbce7e629 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_true_snapshots_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsTrueSnapshotsSizeGetReader is a Reader for the ColumnFamilyMetricsTrueSnapshotsSizeGet structure. +type ColumnFamilyMetricsTrueSnapshotsSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsTrueSnapshotsSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsTrueSnapshotsSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsTrueSnapshotsSizeGetOK creates a ColumnFamilyMetricsTrueSnapshotsSizeGetOK with default headers values +func NewColumnFamilyMetricsTrueSnapshotsSizeGetOK() *ColumnFamilyMetricsTrueSnapshotsSizeGetOK { + return &ColumnFamilyMetricsTrueSnapshotsSizeGetOK{} +} + +/* +ColumnFamilyMetricsTrueSnapshotsSizeGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsTrueSnapshotsSizeGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsTrueSnapshotsSizeGetDefault creates a ColumnFamilyMetricsTrueSnapshotsSizeGetDefault with default headers values +func NewColumnFamilyMetricsTrueSnapshotsSizeGetDefault(code int) *ColumnFamilyMetricsTrueSnapshotsSizeGetDefault { + return &ColumnFamilyMetricsTrueSnapshotsSizeGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsTrueSnapshotsSizeGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsTrueSnapshotsSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics true snapshots size get default response +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsTrueSnapshotsSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_waiting_on_free_memtable_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_waiting_on_free_memtable_get_parameters.go new file mode 100644 index 00000000000..958d5da958b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_waiting_on_free_memtable_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWaitingOnFreeMemtableGetParams creates a new ColumnFamilyMetricsWaitingOnFreeMemtableGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWaitingOnFreeMemtableGetParams() *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams { + + return &ColumnFamilyMetricsWaitingOnFreeMemtableGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWaitingOnFreeMemtableGetParamsWithTimeout creates a new ColumnFamilyMetricsWaitingOnFreeMemtableGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWaitingOnFreeMemtableGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams { + + return &ColumnFamilyMetricsWaitingOnFreeMemtableGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWaitingOnFreeMemtableGetParamsWithContext creates a new ColumnFamilyMetricsWaitingOnFreeMemtableGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWaitingOnFreeMemtableGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams { + + return &ColumnFamilyMetricsWaitingOnFreeMemtableGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWaitingOnFreeMemtableGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWaitingOnFreeMemtableGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWaitingOnFreeMemtableGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams { + + return &ColumnFamilyMetricsWaitingOnFreeMemtableGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWaitingOnFreeMemtableGetParams contains all the parameters to send to the API endpoint +for the column family metrics waiting on free memtable get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWaitingOnFreeMemtableGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics waiting on free memtable get params +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics waiting on free memtable get params +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics waiting on free memtable get params +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics waiting on free memtable get params +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics waiting on free memtable get params +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics waiting on free memtable get params +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_waiting_on_free_memtable_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_waiting_on_free_memtable_get_responses.go new file mode 100644 index 00000000000..3664e557fd3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_waiting_on_free_memtable_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWaitingOnFreeMemtableGetReader is a Reader for the ColumnFamilyMetricsWaitingOnFreeMemtableGet structure. +type ColumnFamilyMetricsWaitingOnFreeMemtableGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWaitingOnFreeMemtableGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWaitingOnFreeMemtableGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWaitingOnFreeMemtableGetOK creates a ColumnFamilyMetricsWaitingOnFreeMemtableGetOK with default headers values +func NewColumnFamilyMetricsWaitingOnFreeMemtableGetOK() *ColumnFamilyMetricsWaitingOnFreeMemtableGetOK { + return &ColumnFamilyMetricsWaitingOnFreeMemtableGetOK{} +} + +/* +ColumnFamilyMetricsWaitingOnFreeMemtableGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWaitingOnFreeMemtableGetOK struct { +} + +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsWaitingOnFreeMemtableGetDefault creates a ColumnFamilyMetricsWaitingOnFreeMemtableGetDefault with default headers values +func NewColumnFamilyMetricsWaitingOnFreeMemtableGetDefault(code int) *ColumnFamilyMetricsWaitingOnFreeMemtableGetDefault { + return &ColumnFamilyMetricsWaitingOnFreeMemtableGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWaitingOnFreeMemtableGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWaitingOnFreeMemtableGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics waiting on free memtable get default response +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWaitingOnFreeMemtableGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_by_name_get_parameters.go new file mode 100644 index 00000000000..95a95afcc61 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWriteByNameGetParams creates a new ColumnFamilyMetricsWriteByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWriteByNameGetParams() *ColumnFamilyMetricsWriteByNameGetParams { + var () + return &ColumnFamilyMetricsWriteByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWriteByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsWriteByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWriteByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteByNameGetParams { + var () + return &ColumnFamilyMetricsWriteByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWriteByNameGetParamsWithContext creates a new ColumnFamilyMetricsWriteByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWriteByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWriteByNameGetParams { + var () + return &ColumnFamilyMetricsWriteByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWriteByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWriteByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWriteByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteByNameGetParams { + var () + return &ColumnFamilyMetricsWriteByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWriteByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics write by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWriteByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics write by name get params +func (o *ColumnFamilyMetricsWriteByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics write by name get params +func (o *ColumnFamilyMetricsWriteByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics write by name get params +func (o *ColumnFamilyMetricsWriteByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWriteByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics write by name get params +func (o *ColumnFamilyMetricsWriteByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics write by name get params +func (o *ColumnFamilyMetricsWriteByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics write by name get params +func (o *ColumnFamilyMetricsWriteByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics write by name get params +func (o *ColumnFamilyMetricsWriteByNameGetParams) WithName(name string) *ColumnFamilyMetricsWriteByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics write by name get params +func (o *ColumnFamilyMetricsWriteByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWriteByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_by_name_get_responses.go new file mode 100644 index 00000000000..a57fba546dd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWriteByNameGetReader is a Reader for the ColumnFamilyMetricsWriteByNameGet structure. +type ColumnFamilyMetricsWriteByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWriteByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWriteByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWriteByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWriteByNameGetOK creates a ColumnFamilyMetricsWriteByNameGetOK with default headers values +func NewColumnFamilyMetricsWriteByNameGetOK() *ColumnFamilyMetricsWriteByNameGetOK { + return &ColumnFamilyMetricsWriteByNameGetOK{} +} + +/* +ColumnFamilyMetricsWriteByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWriteByNameGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsWriteByNameGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsWriteByNameGetDefault creates a ColumnFamilyMetricsWriteByNameGetDefault with default headers values +func NewColumnFamilyMetricsWriteByNameGetDefault(code int) *ColumnFamilyMetricsWriteByNameGetDefault { + return &ColumnFamilyMetricsWriteByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWriteByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWriteByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics write by name get default response +func (o *ColumnFamilyMetricsWriteByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWriteByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWriteByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_get_parameters.go new file mode 100644 index 00000000000..558bc37fd6b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWriteGetParams creates a new ColumnFamilyMetricsWriteGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWriteGetParams() *ColumnFamilyMetricsWriteGetParams { + + return &ColumnFamilyMetricsWriteGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWriteGetParamsWithTimeout creates a new ColumnFamilyMetricsWriteGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWriteGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteGetParams { + + return &ColumnFamilyMetricsWriteGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWriteGetParamsWithContext creates a new ColumnFamilyMetricsWriteGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWriteGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWriteGetParams { + + return &ColumnFamilyMetricsWriteGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWriteGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWriteGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWriteGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteGetParams { + + return &ColumnFamilyMetricsWriteGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWriteGetParams contains all the parameters to send to the API endpoint +for the column family metrics write get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWriteGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics write get params +func (o *ColumnFamilyMetricsWriteGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics write get params +func (o *ColumnFamilyMetricsWriteGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics write get params +func (o *ColumnFamilyMetricsWriteGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWriteGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics write get params +func (o *ColumnFamilyMetricsWriteGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics write get params +func (o *ColumnFamilyMetricsWriteGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics write get params +func (o *ColumnFamilyMetricsWriteGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWriteGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_get_responses.go new file mode 100644 index 00000000000..4e95cc10438 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWriteGetReader is a Reader for the ColumnFamilyMetricsWriteGet structure. +type ColumnFamilyMetricsWriteGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWriteGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWriteGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWriteGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWriteGetOK creates a ColumnFamilyMetricsWriteGetOK with default headers values +func NewColumnFamilyMetricsWriteGetOK() *ColumnFamilyMetricsWriteGetOK { + return &ColumnFamilyMetricsWriteGetOK{} +} + +/* +ColumnFamilyMetricsWriteGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWriteGetOK struct { + Payload interface{} +} + +func (o *ColumnFamilyMetricsWriteGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsWriteGetDefault creates a ColumnFamilyMetricsWriteGetDefault with default headers values +func NewColumnFamilyMetricsWriteGetDefault(code int) *ColumnFamilyMetricsWriteGetDefault { + return &ColumnFamilyMetricsWriteGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWriteGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWriteGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics write get default response +func (o *ColumnFamilyMetricsWriteGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWriteGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWriteGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_by_name_get_parameters.go new file mode 100644 index 00000000000..0bc18ec7f0f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWriteLatencyByNameGetParams creates a new ColumnFamilyMetricsWriteLatencyByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWriteLatencyByNameGetParams() *ColumnFamilyMetricsWriteLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsWriteLatencyByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWriteLatencyByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyByNameGetParamsWithContext creates a new ColumnFamilyMetricsWriteLatencyByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWriteLatencyByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWriteLatencyByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWriteLatencyByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWriteLatencyByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWriteLatencyByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics write latency by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWriteLatencyByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics write latency by name get params +func (o *ColumnFamilyMetricsWriteLatencyByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics write latency by name get params +func (o *ColumnFamilyMetricsWriteLatencyByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics write latency by name get params +func (o *ColumnFamilyMetricsWriteLatencyByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics write latency by name get params +func (o *ColumnFamilyMetricsWriteLatencyByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics write latency by name get params +func (o *ColumnFamilyMetricsWriteLatencyByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics write latency by name get params +func (o *ColumnFamilyMetricsWriteLatencyByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics write latency by name get params +func (o *ColumnFamilyMetricsWriteLatencyByNameGetParams) WithName(name string) *ColumnFamilyMetricsWriteLatencyByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics write latency by name get params +func (o *ColumnFamilyMetricsWriteLatencyByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWriteLatencyByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_by_name_get_responses.go new file mode 100644 index 00000000000..7c19b239dc5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWriteLatencyByNameGetReader is a Reader for the ColumnFamilyMetricsWriteLatencyByNameGet structure. +type ColumnFamilyMetricsWriteLatencyByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWriteLatencyByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWriteLatencyByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWriteLatencyByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWriteLatencyByNameGetOK creates a ColumnFamilyMetricsWriteLatencyByNameGetOK with default headers values +func NewColumnFamilyMetricsWriteLatencyByNameGetOK() *ColumnFamilyMetricsWriteLatencyByNameGetOK { + return &ColumnFamilyMetricsWriteLatencyByNameGetOK{} +} + +/* +ColumnFamilyMetricsWriteLatencyByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWriteLatencyByNameGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsWriteLatencyByNameGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsWriteLatencyByNameGetDefault creates a ColumnFamilyMetricsWriteLatencyByNameGetDefault with default headers values +func NewColumnFamilyMetricsWriteLatencyByNameGetDefault(code int) *ColumnFamilyMetricsWriteLatencyByNameGetDefault { + return &ColumnFamilyMetricsWriteLatencyByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWriteLatencyByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWriteLatencyByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics write latency by name get default response +func (o *ColumnFamilyMetricsWriteLatencyByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWriteLatencyByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWriteLatencyByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..89bd0d2ade9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams creates a new ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams() *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics write latency estimated histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics write latency estimated histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics write latency estimated histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics write latency estimated histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics write latency estimated histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics write latency estimated histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics write latency estimated histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics write latency estimated histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics write latency estimated histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..3333a24cd67 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGet structure. +type ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK creates a ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK() *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK { + return &ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault creates a ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault(code int) *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault { + return &ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics write latency estimated histogram by name get default response +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_recent_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_recent_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..1144aba7961 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_recent_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams creates a new ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams() *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics write latency estimated recent histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics write latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics write latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics write latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics write latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics write latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics write latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics write latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics write latency estimated recent histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_recent_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_recent_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..a9d2aa83d49 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_estimated_recent_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGet structure. +type ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK creates a ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK() *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK { + return &ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault creates a ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault(code int) *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault { + return &ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics write latency estimated recent histogram by name get default response +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_get_parameters.go new file mode 100644 index 00000000000..90a9f745b6f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWriteLatencyGetParams creates a new ColumnFamilyMetricsWriteLatencyGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWriteLatencyGetParams() *ColumnFamilyMetricsWriteLatencyGetParams { + + return &ColumnFamilyMetricsWriteLatencyGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyGetParamsWithTimeout creates a new ColumnFamilyMetricsWriteLatencyGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWriteLatencyGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyGetParams { + + return &ColumnFamilyMetricsWriteLatencyGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyGetParamsWithContext creates a new ColumnFamilyMetricsWriteLatencyGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWriteLatencyGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyGetParams { + + return &ColumnFamilyMetricsWriteLatencyGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWriteLatencyGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWriteLatencyGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWriteLatencyGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyGetParams { + + return &ColumnFamilyMetricsWriteLatencyGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWriteLatencyGetParams contains all the parameters to send to the API endpoint +for the column family metrics write latency get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWriteLatencyGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics write latency get params +func (o *ColumnFamilyMetricsWriteLatencyGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics write latency get params +func (o *ColumnFamilyMetricsWriteLatencyGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics write latency get params +func (o *ColumnFamilyMetricsWriteLatencyGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics write latency get params +func (o *ColumnFamilyMetricsWriteLatencyGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics write latency get params +func (o *ColumnFamilyMetricsWriteLatencyGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics write latency get params +func (o *ColumnFamilyMetricsWriteLatencyGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWriteLatencyGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_get_responses.go new file mode 100644 index 00000000000..283540d0db3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWriteLatencyGetReader is a Reader for the ColumnFamilyMetricsWriteLatencyGet structure. +type ColumnFamilyMetricsWriteLatencyGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWriteLatencyGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWriteLatencyGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWriteLatencyGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWriteLatencyGetOK creates a ColumnFamilyMetricsWriteLatencyGetOK with default headers values +func NewColumnFamilyMetricsWriteLatencyGetOK() *ColumnFamilyMetricsWriteLatencyGetOK { + return &ColumnFamilyMetricsWriteLatencyGetOK{} +} + +/* +ColumnFamilyMetricsWriteLatencyGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWriteLatencyGetOK struct { + Payload int32 +} + +func (o *ColumnFamilyMetricsWriteLatencyGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsWriteLatencyGetDefault creates a ColumnFamilyMetricsWriteLatencyGetDefault with default headers values +func NewColumnFamilyMetricsWriteLatencyGetDefault(code int) *ColumnFamilyMetricsWriteLatencyGetDefault { + return &ColumnFamilyMetricsWriteLatencyGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWriteLatencyGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWriteLatencyGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics write latency get default response +func (o *ColumnFamilyMetricsWriteLatencyGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWriteLatencyGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWriteLatencyGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..0ce860536ab --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWriteLatencyHistogramByNameGetParams creates a new ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWriteLatencyHistogramByNameGetParams() *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWriteLatencyHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWriteLatencyHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWriteLatencyHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWriteLatencyHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics write latency histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics write latency histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics write latency histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics write latency histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics write latency histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics write latency histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics write latency histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics write latency histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics write latency histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..be47259e613 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWriteLatencyHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsWriteLatencyHistogramByNameGet structure. +type ColumnFamilyMetricsWriteLatencyHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWriteLatencyHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWriteLatencyHistogramByNameGetOK creates a ColumnFamilyMetricsWriteLatencyHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsWriteLatencyHistogramByNameGetOK() *ColumnFamilyMetricsWriteLatencyHistogramByNameGetOK { + return &ColumnFamilyMetricsWriteLatencyHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsWriteLatencyHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWriteLatencyHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault creates a ColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault(code int) *ColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault { + return &ColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics write latency histogram by name get default response +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_get_parameters.go new file mode 100644 index 00000000000..7036f3ed6b5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWriteLatencyHistogramGetParams creates a new ColumnFamilyMetricsWriteLatencyHistogramGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWriteLatencyHistogramGetParams() *ColumnFamilyMetricsWriteLatencyHistogramGetParams { + + return &ColumnFamilyMetricsWriteLatencyHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyHistogramGetParamsWithTimeout creates a new ColumnFamilyMetricsWriteLatencyHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWriteLatencyHistogramGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyHistogramGetParams { + + return &ColumnFamilyMetricsWriteLatencyHistogramGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyHistogramGetParamsWithContext creates a new ColumnFamilyMetricsWriteLatencyHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWriteLatencyHistogramGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyHistogramGetParams { + + return &ColumnFamilyMetricsWriteLatencyHistogramGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWriteLatencyHistogramGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWriteLatencyHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWriteLatencyHistogramGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyHistogramGetParams { + + return &ColumnFamilyMetricsWriteLatencyHistogramGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWriteLatencyHistogramGetParams contains all the parameters to send to the API endpoint +for the column family metrics write latency histogram get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWriteLatencyHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics write latency histogram get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics write latency histogram get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics write latency histogram get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics write latency histogram get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics write latency histogram get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics write latency histogram get params +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_get_responses.go new file mode 100644 index 00000000000..ac0abb0e6de --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_histogram_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWriteLatencyHistogramGetReader is a Reader for the ColumnFamilyMetricsWriteLatencyHistogramGet structure. +type ColumnFamilyMetricsWriteLatencyHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWriteLatencyHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWriteLatencyHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWriteLatencyHistogramGetOK creates a ColumnFamilyMetricsWriteLatencyHistogramGetOK with default headers values +func NewColumnFamilyMetricsWriteLatencyHistogramGetOK() *ColumnFamilyMetricsWriteLatencyHistogramGetOK { + return &ColumnFamilyMetricsWriteLatencyHistogramGetOK{} +} + +/* +ColumnFamilyMetricsWriteLatencyHistogramGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWriteLatencyHistogramGetOK struct { + Payload []*models.Histogram +} + +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetOK) GetPayload() []*models.Histogram { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsWriteLatencyHistogramGetDefault creates a ColumnFamilyMetricsWriteLatencyHistogramGetDefault with default headers values +func NewColumnFamilyMetricsWriteLatencyHistogramGetDefault(code int) *ColumnFamilyMetricsWriteLatencyHistogramGetDefault { + return &ColumnFamilyMetricsWriteLatencyHistogramGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWriteLatencyHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWriteLatencyHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics write latency histogram get default response +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWriteLatencyHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_by_name_get_parameters.go new file mode 100644 index 00000000000..2a0703f103f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams creates a new ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams() *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParamsWithTimeout creates a new ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParamsWithContext creates a new ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams { + var () + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams contains all the parameters to send to the API endpoint +for the column family metrics write latency moving average histogram by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics write latency moving average histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics write latency moving average histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics write latency moving average histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics write latency moving average histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics write latency moving average histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics write latency moving average histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family metrics write latency moving average histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) WithName(name string) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family metrics write latency moving average histogram by name get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_by_name_get_responses.go new file mode 100644 index 00000000000..dd2b1556d0b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_by_name_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetReader is a Reader for the ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGet structure. +type ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK creates a ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK with default headers values +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK() *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK { + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK{} +} + +/* +ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK struct { +} + +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault creates a ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault with default headers values +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault(code int) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault { + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics write latency moving average histogram by name get default response +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_get_parameters.go new file mode 100644 index 00000000000..16c80314b05 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams creates a new ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams object +// with the default values initialized. +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams() *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams { + + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParamsWithTimeout creates a new ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams { + + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParamsWithContext creates a new ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParamsWithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams { + + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParamsWithHTTPClient creates a new ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams { + + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams contains all the parameters to send to the API endpoint +for the column family metrics write latency moving average histogram get operation typically these are written to a http.Request +*/ +type ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family metrics write latency moving average histogram get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family metrics write latency moving average histogram get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family metrics write latency moving average histogram get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams) WithContext(ctx context.Context) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family metrics write latency moving average histogram get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family metrics write latency moving average histogram get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family metrics write latency moving average histogram get params +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_get_responses.go new file mode 100644 index 00000000000..9a90f03d231 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_metrics_write_latency_moving_average_histogram_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetReader is a Reader for the ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGet structure. +type ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK creates a ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK with default headers values +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK() *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK { + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK{} +} + +/* +ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK struct { + Payload []*models.RateMovingAverageAndHistogram +} + +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK) GetPayload() []*models.RateMovingAverageAndHistogram { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault creates a ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault with default headers values +func NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault(code int) *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault { + return &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family metrics write latency moving average histogram get default response +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_get_parameters.go new file mode 100644 index 00000000000..43b8084f7fb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyMinimumCompactionByNameGetParams creates a new ColumnFamilyMinimumCompactionByNameGetParams object +// with the default values initialized. +func NewColumnFamilyMinimumCompactionByNameGetParams() *ColumnFamilyMinimumCompactionByNameGetParams { + var () + return &ColumnFamilyMinimumCompactionByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMinimumCompactionByNameGetParamsWithTimeout creates a new ColumnFamilyMinimumCompactionByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMinimumCompactionByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyMinimumCompactionByNameGetParams { + var () + return &ColumnFamilyMinimumCompactionByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMinimumCompactionByNameGetParamsWithContext creates a new ColumnFamilyMinimumCompactionByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMinimumCompactionByNameGetParamsWithContext(ctx context.Context) *ColumnFamilyMinimumCompactionByNameGetParams { + var () + return &ColumnFamilyMinimumCompactionByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMinimumCompactionByNameGetParamsWithHTTPClient creates a new ColumnFamilyMinimumCompactionByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMinimumCompactionByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyMinimumCompactionByNameGetParams { + var () + return &ColumnFamilyMinimumCompactionByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMinimumCompactionByNameGetParams contains all the parameters to send to the API endpoint +for the column family minimum compaction by name get operation typically these are written to a http.Request +*/ +type ColumnFamilyMinimumCompactionByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family minimum compaction by name get params +func (o *ColumnFamilyMinimumCompactionByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyMinimumCompactionByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family minimum compaction by name get params +func (o *ColumnFamilyMinimumCompactionByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family minimum compaction by name get params +func (o *ColumnFamilyMinimumCompactionByNameGetParams) WithContext(ctx context.Context) *ColumnFamilyMinimumCompactionByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family minimum compaction by name get params +func (o *ColumnFamilyMinimumCompactionByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family minimum compaction by name get params +func (o *ColumnFamilyMinimumCompactionByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyMinimumCompactionByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family minimum compaction by name get params +func (o *ColumnFamilyMinimumCompactionByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family minimum compaction by name get params +func (o *ColumnFamilyMinimumCompactionByNameGetParams) WithName(name string) *ColumnFamilyMinimumCompactionByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family minimum compaction by name get params +func (o *ColumnFamilyMinimumCompactionByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMinimumCompactionByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_get_responses.go new file mode 100644 index 00000000000..afcd2adf6dd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMinimumCompactionByNameGetReader is a Reader for the ColumnFamilyMinimumCompactionByNameGet structure. +type ColumnFamilyMinimumCompactionByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMinimumCompactionByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMinimumCompactionByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMinimumCompactionByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMinimumCompactionByNameGetOK creates a ColumnFamilyMinimumCompactionByNameGetOK with default headers values +func NewColumnFamilyMinimumCompactionByNameGetOK() *ColumnFamilyMinimumCompactionByNameGetOK { + return &ColumnFamilyMinimumCompactionByNameGetOK{} +} + +/* +ColumnFamilyMinimumCompactionByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMinimumCompactionByNameGetOK struct { + Payload string +} + +func (o *ColumnFamilyMinimumCompactionByNameGetOK) GetPayload() string { + return o.Payload +} + +func (o *ColumnFamilyMinimumCompactionByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMinimumCompactionByNameGetDefault creates a ColumnFamilyMinimumCompactionByNameGetDefault with default headers values +func NewColumnFamilyMinimumCompactionByNameGetDefault(code int) *ColumnFamilyMinimumCompactionByNameGetDefault { + return &ColumnFamilyMinimumCompactionByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMinimumCompactionByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMinimumCompactionByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family minimum compaction by name get default response +func (o *ColumnFamilyMinimumCompactionByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMinimumCompactionByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMinimumCompactionByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMinimumCompactionByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_post_parameters.go new file mode 100644 index 00000000000..dec22784264 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_post_parameters.go @@ -0,0 +1,162 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewColumnFamilyMinimumCompactionByNamePostParams creates a new ColumnFamilyMinimumCompactionByNamePostParams object +// with the default values initialized. +func NewColumnFamilyMinimumCompactionByNamePostParams() *ColumnFamilyMinimumCompactionByNamePostParams { + var () + return &ColumnFamilyMinimumCompactionByNamePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyMinimumCompactionByNamePostParamsWithTimeout creates a new ColumnFamilyMinimumCompactionByNamePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyMinimumCompactionByNamePostParamsWithTimeout(timeout time.Duration) *ColumnFamilyMinimumCompactionByNamePostParams { + var () + return &ColumnFamilyMinimumCompactionByNamePostParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyMinimumCompactionByNamePostParamsWithContext creates a new ColumnFamilyMinimumCompactionByNamePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyMinimumCompactionByNamePostParamsWithContext(ctx context.Context) *ColumnFamilyMinimumCompactionByNamePostParams { + var () + return &ColumnFamilyMinimumCompactionByNamePostParams{ + + Context: ctx, + } +} + +// NewColumnFamilyMinimumCompactionByNamePostParamsWithHTTPClient creates a new ColumnFamilyMinimumCompactionByNamePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyMinimumCompactionByNamePostParamsWithHTTPClient(client *http.Client) *ColumnFamilyMinimumCompactionByNamePostParams { + var () + return &ColumnFamilyMinimumCompactionByNamePostParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyMinimumCompactionByNamePostParams contains all the parameters to send to the API endpoint +for the column family minimum compaction by name post operation typically these are written to a http.Request +*/ +type ColumnFamilyMinimumCompactionByNamePostParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + /*Value + The minimum number of sstables in queue before compaction kicks off + + */ + Value int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family minimum compaction by name post params +func (o *ColumnFamilyMinimumCompactionByNamePostParams) WithTimeout(timeout time.Duration) *ColumnFamilyMinimumCompactionByNamePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family minimum compaction by name post params +func (o *ColumnFamilyMinimumCompactionByNamePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family minimum compaction by name post params +func (o *ColumnFamilyMinimumCompactionByNamePostParams) WithContext(ctx context.Context) *ColumnFamilyMinimumCompactionByNamePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family minimum compaction by name post params +func (o *ColumnFamilyMinimumCompactionByNamePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family minimum compaction by name post params +func (o *ColumnFamilyMinimumCompactionByNamePostParams) WithHTTPClient(client *http.Client) *ColumnFamilyMinimumCompactionByNamePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family minimum compaction by name post params +func (o *ColumnFamilyMinimumCompactionByNamePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family minimum compaction by name post params +func (o *ColumnFamilyMinimumCompactionByNamePostParams) WithName(name string) *ColumnFamilyMinimumCompactionByNamePostParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family minimum compaction by name post params +func (o *ColumnFamilyMinimumCompactionByNamePostParams) SetName(name string) { + o.Name = name +} + +// WithValue adds the value to the column family minimum compaction by name post params +func (o *ColumnFamilyMinimumCompactionByNamePostParams) WithValue(value int32) *ColumnFamilyMinimumCompactionByNamePostParams { + o.SetValue(value) + return o +} + +// SetValue adds the value to the column family minimum compaction by name post params +func (o *ColumnFamilyMinimumCompactionByNamePostParams) SetValue(value int32) { + o.Value = value +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyMinimumCompactionByNamePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + // query param value + qrValue := o.Value + qValue := swag.FormatInt32(qrValue) + if qValue != "" { + if err := r.SetQueryParam("value", qValue); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_post_responses.go new file mode 100644 index 00000000000..3cdd5e6aa10 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_minimum_compaction_by_name_post_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyMinimumCompactionByNamePostReader is a Reader for the ColumnFamilyMinimumCompactionByNamePost structure. +type ColumnFamilyMinimumCompactionByNamePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyMinimumCompactionByNamePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyMinimumCompactionByNamePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyMinimumCompactionByNamePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyMinimumCompactionByNamePostOK creates a ColumnFamilyMinimumCompactionByNamePostOK with default headers values +func NewColumnFamilyMinimumCompactionByNamePostOK() *ColumnFamilyMinimumCompactionByNamePostOK { + return &ColumnFamilyMinimumCompactionByNamePostOK{} +} + +/* +ColumnFamilyMinimumCompactionByNamePostOK handles this case with default header values. + +Success +*/ +type ColumnFamilyMinimumCompactionByNamePostOK struct { + Payload string +} + +func (o *ColumnFamilyMinimumCompactionByNamePostOK) GetPayload() string { + return o.Payload +} + +func (o *ColumnFamilyMinimumCompactionByNamePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyMinimumCompactionByNamePostDefault creates a ColumnFamilyMinimumCompactionByNamePostDefault with default headers values +func NewColumnFamilyMinimumCompactionByNamePostDefault(code int) *ColumnFamilyMinimumCompactionByNamePostDefault { + return &ColumnFamilyMinimumCompactionByNamePostDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyMinimumCompactionByNamePostDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyMinimumCompactionByNamePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family minimum compaction by name post default response +func (o *ColumnFamilyMinimumCompactionByNamePostDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyMinimumCompactionByNamePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyMinimumCompactionByNamePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyMinimumCompactionByNamePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_get_parameters.go new file mode 100644 index 00000000000..56de4de4b22 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyNameGetParams creates a new ColumnFamilyNameGetParams object +// with the default values initialized. +func NewColumnFamilyNameGetParams() *ColumnFamilyNameGetParams { + + return &ColumnFamilyNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyNameGetParamsWithTimeout creates a new ColumnFamilyNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyNameGetParams { + + return &ColumnFamilyNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyNameGetParamsWithContext creates a new ColumnFamilyNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyNameGetParamsWithContext(ctx context.Context) *ColumnFamilyNameGetParams { + + return &ColumnFamilyNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyNameGetParamsWithHTTPClient creates a new ColumnFamilyNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyNameGetParams { + + return &ColumnFamilyNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyNameGetParams contains all the parameters to send to the API endpoint +for the column family name get operation typically these are written to a http.Request +*/ +type ColumnFamilyNameGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family name get params +func (o *ColumnFamilyNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family name get params +func (o *ColumnFamilyNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family name get params +func (o *ColumnFamilyNameGetParams) WithContext(ctx context.Context) *ColumnFamilyNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family name get params +func (o *ColumnFamilyNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family name get params +func (o *ColumnFamilyNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family name get params +func (o *ColumnFamilyNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_get_responses.go new file mode 100644 index 00000000000..b924823299c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyNameGetReader is a Reader for the ColumnFamilyNameGet structure. +type ColumnFamilyNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyNameGetOK creates a ColumnFamilyNameGetOK with default headers values +func NewColumnFamilyNameGetOK() *ColumnFamilyNameGetOK { + return &ColumnFamilyNameGetOK{} +} + +/* +ColumnFamilyNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyNameGetOK struct { + Payload []string +} + +func (o *ColumnFamilyNameGetOK) GetPayload() []string { + return o.Payload +} + +func (o *ColumnFamilyNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyNameGetDefault creates a ColumnFamilyNameGetDefault with default headers values +func NewColumnFamilyNameGetDefault(code int) *ColumnFamilyNameGetDefault { + return &ColumnFamilyNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family name get default response +func (o *ColumnFamilyNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_keyspace_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_keyspace_get_parameters.go new file mode 100644 index 00000000000..b121be02876 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_keyspace_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilyNameKeyspaceGetParams creates a new ColumnFamilyNameKeyspaceGetParams object +// with the default values initialized. +func NewColumnFamilyNameKeyspaceGetParams() *ColumnFamilyNameKeyspaceGetParams { + + return &ColumnFamilyNameKeyspaceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilyNameKeyspaceGetParamsWithTimeout creates a new ColumnFamilyNameKeyspaceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilyNameKeyspaceGetParamsWithTimeout(timeout time.Duration) *ColumnFamilyNameKeyspaceGetParams { + + return &ColumnFamilyNameKeyspaceGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilyNameKeyspaceGetParamsWithContext creates a new ColumnFamilyNameKeyspaceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilyNameKeyspaceGetParamsWithContext(ctx context.Context) *ColumnFamilyNameKeyspaceGetParams { + + return &ColumnFamilyNameKeyspaceGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilyNameKeyspaceGetParamsWithHTTPClient creates a new ColumnFamilyNameKeyspaceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilyNameKeyspaceGetParamsWithHTTPClient(client *http.Client) *ColumnFamilyNameKeyspaceGetParams { + + return &ColumnFamilyNameKeyspaceGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilyNameKeyspaceGetParams contains all the parameters to send to the API endpoint +for the column family name keyspace get operation typically these are written to a http.Request +*/ +type ColumnFamilyNameKeyspaceGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family name keyspace get params +func (o *ColumnFamilyNameKeyspaceGetParams) WithTimeout(timeout time.Duration) *ColumnFamilyNameKeyspaceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family name keyspace get params +func (o *ColumnFamilyNameKeyspaceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family name keyspace get params +func (o *ColumnFamilyNameKeyspaceGetParams) WithContext(ctx context.Context) *ColumnFamilyNameKeyspaceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family name keyspace get params +func (o *ColumnFamilyNameKeyspaceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family name keyspace get params +func (o *ColumnFamilyNameKeyspaceGetParams) WithHTTPClient(client *http.Client) *ColumnFamilyNameKeyspaceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family name keyspace get params +func (o *ColumnFamilyNameKeyspaceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilyNameKeyspaceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_keyspace_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_keyspace_get_responses.go new file mode 100644 index 00000000000..8738cb5ec90 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_name_keyspace_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilyNameKeyspaceGetReader is a Reader for the ColumnFamilyNameKeyspaceGet structure. +type ColumnFamilyNameKeyspaceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilyNameKeyspaceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilyNameKeyspaceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilyNameKeyspaceGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilyNameKeyspaceGetOK creates a ColumnFamilyNameKeyspaceGetOK with default headers values +func NewColumnFamilyNameKeyspaceGetOK() *ColumnFamilyNameKeyspaceGetOK { + return &ColumnFamilyNameKeyspaceGetOK{} +} + +/* +ColumnFamilyNameKeyspaceGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilyNameKeyspaceGetOK struct { + Payload []string +} + +func (o *ColumnFamilyNameKeyspaceGetOK) GetPayload() []string { + return o.Payload +} + +func (o *ColumnFamilyNameKeyspaceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilyNameKeyspaceGetDefault creates a ColumnFamilyNameKeyspaceGetDefault with default headers values +func NewColumnFamilyNameKeyspaceGetDefault(code int) *ColumnFamilyNameKeyspaceGetDefault { + return &ColumnFamilyNameKeyspaceGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilyNameKeyspaceGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilyNameKeyspaceGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family name keyspace get default response +func (o *ColumnFamilyNameKeyspaceGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilyNameKeyspaceGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilyNameKeyspaceGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilyNameKeyspaceGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_by_key_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_by_key_by_name_get_parameters.go new file mode 100644 index 00000000000..63c5c8eb73d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_by_key_by_name_get_parameters.go @@ -0,0 +1,161 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilySstablesByKeyByNameGetParams creates a new ColumnFamilySstablesByKeyByNameGetParams object +// with the default values initialized. +func NewColumnFamilySstablesByKeyByNameGetParams() *ColumnFamilySstablesByKeyByNameGetParams { + var () + return &ColumnFamilySstablesByKeyByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilySstablesByKeyByNameGetParamsWithTimeout creates a new ColumnFamilySstablesByKeyByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilySstablesByKeyByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilySstablesByKeyByNameGetParams { + var () + return &ColumnFamilySstablesByKeyByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilySstablesByKeyByNameGetParamsWithContext creates a new ColumnFamilySstablesByKeyByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilySstablesByKeyByNameGetParamsWithContext(ctx context.Context) *ColumnFamilySstablesByKeyByNameGetParams { + var () + return &ColumnFamilySstablesByKeyByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilySstablesByKeyByNameGetParamsWithHTTPClient creates a new ColumnFamilySstablesByKeyByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilySstablesByKeyByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilySstablesByKeyByNameGetParams { + var () + return &ColumnFamilySstablesByKeyByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilySstablesByKeyByNameGetParams contains all the parameters to send to the API endpoint +for the column family sstables by key by name get operation typically these are written to a http.Request +*/ +type ColumnFamilySstablesByKeyByNameGetParams struct { + + /*Key + The partition key. In a composite-key scenario, use ':' to separate the columns in the key. + + */ + Key string + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family sstables by key by name get params +func (o *ColumnFamilySstablesByKeyByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilySstablesByKeyByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family sstables by key by name get params +func (o *ColumnFamilySstablesByKeyByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family sstables by key by name get params +func (o *ColumnFamilySstablesByKeyByNameGetParams) WithContext(ctx context.Context) *ColumnFamilySstablesByKeyByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family sstables by key by name get params +func (o *ColumnFamilySstablesByKeyByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family sstables by key by name get params +func (o *ColumnFamilySstablesByKeyByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilySstablesByKeyByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family sstables by key by name get params +func (o *ColumnFamilySstablesByKeyByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithKey adds the key to the column family sstables by key by name get params +func (o *ColumnFamilySstablesByKeyByNameGetParams) WithKey(key string) *ColumnFamilySstablesByKeyByNameGetParams { + o.SetKey(key) + return o +} + +// SetKey adds the key to the column family sstables by key by name get params +func (o *ColumnFamilySstablesByKeyByNameGetParams) SetKey(key string) { + o.Key = key +} + +// WithName adds the name to the column family sstables by key by name get params +func (o *ColumnFamilySstablesByKeyByNameGetParams) WithName(name string) *ColumnFamilySstablesByKeyByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family sstables by key by name get params +func (o *ColumnFamilySstablesByKeyByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilySstablesByKeyByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param key + qrKey := o.Key + qKey := qrKey + if qKey != "" { + if err := r.SetQueryParam("key", qKey); err != nil { + return err + } + } + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_by_key_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_by_key_by_name_get_responses.go new file mode 100644 index 00000000000..471485187e3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_by_key_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilySstablesByKeyByNameGetReader is a Reader for the ColumnFamilySstablesByKeyByNameGet structure. +type ColumnFamilySstablesByKeyByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilySstablesByKeyByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilySstablesByKeyByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilySstablesByKeyByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilySstablesByKeyByNameGetOK creates a ColumnFamilySstablesByKeyByNameGetOK with default headers values +func NewColumnFamilySstablesByKeyByNameGetOK() *ColumnFamilySstablesByKeyByNameGetOK { + return &ColumnFamilySstablesByKeyByNameGetOK{} +} + +/* +ColumnFamilySstablesByKeyByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilySstablesByKeyByNameGetOK struct { + Payload []string +} + +func (o *ColumnFamilySstablesByKeyByNameGetOK) GetPayload() []string { + return o.Payload +} + +func (o *ColumnFamilySstablesByKeyByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilySstablesByKeyByNameGetDefault creates a ColumnFamilySstablesByKeyByNameGetDefault with default headers values +func NewColumnFamilySstablesByKeyByNameGetDefault(code int) *ColumnFamilySstablesByKeyByNameGetDefault { + return &ColumnFamilySstablesByKeyByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilySstablesByKeyByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilySstablesByKeyByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family sstables by key by name get default response +func (o *ColumnFamilySstablesByKeyByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilySstablesByKeyByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilySstablesByKeyByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilySstablesByKeyByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_per_level_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_per_level_by_name_get_parameters.go new file mode 100644 index 00000000000..14530689c85 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_per_level_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilySstablesPerLevelByNameGetParams creates a new ColumnFamilySstablesPerLevelByNameGetParams object +// with the default values initialized. +func NewColumnFamilySstablesPerLevelByNameGetParams() *ColumnFamilySstablesPerLevelByNameGetParams { + var () + return &ColumnFamilySstablesPerLevelByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilySstablesPerLevelByNameGetParamsWithTimeout creates a new ColumnFamilySstablesPerLevelByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilySstablesPerLevelByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilySstablesPerLevelByNameGetParams { + var () + return &ColumnFamilySstablesPerLevelByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilySstablesPerLevelByNameGetParamsWithContext creates a new ColumnFamilySstablesPerLevelByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilySstablesPerLevelByNameGetParamsWithContext(ctx context.Context) *ColumnFamilySstablesPerLevelByNameGetParams { + var () + return &ColumnFamilySstablesPerLevelByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilySstablesPerLevelByNameGetParamsWithHTTPClient creates a new ColumnFamilySstablesPerLevelByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilySstablesPerLevelByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilySstablesPerLevelByNameGetParams { + var () + return &ColumnFamilySstablesPerLevelByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilySstablesPerLevelByNameGetParams contains all the parameters to send to the API endpoint +for the column family sstables per level by name get operation typically these are written to a http.Request +*/ +type ColumnFamilySstablesPerLevelByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family sstables per level by name get params +func (o *ColumnFamilySstablesPerLevelByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilySstablesPerLevelByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family sstables per level by name get params +func (o *ColumnFamilySstablesPerLevelByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family sstables per level by name get params +func (o *ColumnFamilySstablesPerLevelByNameGetParams) WithContext(ctx context.Context) *ColumnFamilySstablesPerLevelByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family sstables per level by name get params +func (o *ColumnFamilySstablesPerLevelByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family sstables per level by name get params +func (o *ColumnFamilySstablesPerLevelByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilySstablesPerLevelByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family sstables per level by name get params +func (o *ColumnFamilySstablesPerLevelByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family sstables per level by name get params +func (o *ColumnFamilySstablesPerLevelByNameGetParams) WithName(name string) *ColumnFamilySstablesPerLevelByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family sstables per level by name get params +func (o *ColumnFamilySstablesPerLevelByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilySstablesPerLevelByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_per_level_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_per_level_by_name_get_responses.go new file mode 100644 index 00000000000..930f0a5393c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_per_level_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilySstablesPerLevelByNameGetReader is a Reader for the ColumnFamilySstablesPerLevelByNameGet structure. +type ColumnFamilySstablesPerLevelByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilySstablesPerLevelByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilySstablesPerLevelByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilySstablesPerLevelByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilySstablesPerLevelByNameGetOK creates a ColumnFamilySstablesPerLevelByNameGetOK with default headers values +func NewColumnFamilySstablesPerLevelByNameGetOK() *ColumnFamilySstablesPerLevelByNameGetOK { + return &ColumnFamilySstablesPerLevelByNameGetOK{} +} + +/* +ColumnFamilySstablesPerLevelByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilySstablesPerLevelByNameGetOK struct { + Payload []int32 +} + +func (o *ColumnFamilySstablesPerLevelByNameGetOK) GetPayload() []int32 { + return o.Payload +} + +func (o *ColumnFamilySstablesPerLevelByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilySstablesPerLevelByNameGetDefault creates a ColumnFamilySstablesPerLevelByNameGetDefault with default headers values +func NewColumnFamilySstablesPerLevelByNameGetDefault(code int) *ColumnFamilySstablesPerLevelByNameGetDefault { + return &ColumnFamilySstablesPerLevelByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilySstablesPerLevelByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilySstablesPerLevelByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family sstables per level by name get default response +func (o *ColumnFamilySstablesPerLevelByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilySstablesPerLevelByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilySstablesPerLevelByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilySstablesPerLevelByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_unleveled_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_unleveled_by_name_get_parameters.go new file mode 100644 index 00000000000..54db8c307bd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_unleveled_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewColumnFamilySstablesUnleveledByNameGetParams creates a new ColumnFamilySstablesUnleveledByNameGetParams object +// with the default values initialized. +func NewColumnFamilySstablesUnleveledByNameGetParams() *ColumnFamilySstablesUnleveledByNameGetParams { + var () + return &ColumnFamilySstablesUnleveledByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewColumnFamilySstablesUnleveledByNameGetParamsWithTimeout creates a new ColumnFamilySstablesUnleveledByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewColumnFamilySstablesUnleveledByNameGetParamsWithTimeout(timeout time.Duration) *ColumnFamilySstablesUnleveledByNameGetParams { + var () + return &ColumnFamilySstablesUnleveledByNameGetParams{ + + timeout: timeout, + } +} + +// NewColumnFamilySstablesUnleveledByNameGetParamsWithContext creates a new ColumnFamilySstablesUnleveledByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewColumnFamilySstablesUnleveledByNameGetParamsWithContext(ctx context.Context) *ColumnFamilySstablesUnleveledByNameGetParams { + var () + return &ColumnFamilySstablesUnleveledByNameGetParams{ + + Context: ctx, + } +} + +// NewColumnFamilySstablesUnleveledByNameGetParamsWithHTTPClient creates a new ColumnFamilySstablesUnleveledByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewColumnFamilySstablesUnleveledByNameGetParamsWithHTTPClient(client *http.Client) *ColumnFamilySstablesUnleveledByNameGetParams { + var () + return &ColumnFamilySstablesUnleveledByNameGetParams{ + HTTPClient: client, + } +} + +/* +ColumnFamilySstablesUnleveledByNameGetParams contains all the parameters to send to the API endpoint +for the column family sstables unleveled by name get operation typically these are written to a http.Request +*/ +type ColumnFamilySstablesUnleveledByNameGetParams struct { + + /*Name + The column family name in keyspace:name format + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the column family sstables unleveled by name get params +func (o *ColumnFamilySstablesUnleveledByNameGetParams) WithTimeout(timeout time.Duration) *ColumnFamilySstablesUnleveledByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the column family sstables unleveled by name get params +func (o *ColumnFamilySstablesUnleveledByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the column family sstables unleveled by name get params +func (o *ColumnFamilySstablesUnleveledByNameGetParams) WithContext(ctx context.Context) *ColumnFamilySstablesUnleveledByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the column family sstables unleveled by name get params +func (o *ColumnFamilySstablesUnleveledByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the column family sstables unleveled by name get params +func (o *ColumnFamilySstablesUnleveledByNameGetParams) WithHTTPClient(client *http.Client) *ColumnFamilySstablesUnleveledByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the column family sstables unleveled by name get params +func (o *ColumnFamilySstablesUnleveledByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the column family sstables unleveled by name get params +func (o *ColumnFamilySstablesUnleveledByNameGetParams) WithName(name string) *ColumnFamilySstablesUnleveledByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the column family sstables unleveled by name get params +func (o *ColumnFamilySstablesUnleveledByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *ColumnFamilySstablesUnleveledByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_unleveled_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_unleveled_by_name_get_responses.go new file mode 100644 index 00000000000..53e94c4f36d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/column_family_sstables_unleveled_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// ColumnFamilySstablesUnleveledByNameGetReader is a Reader for the ColumnFamilySstablesUnleveledByNameGet structure. +type ColumnFamilySstablesUnleveledByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ColumnFamilySstablesUnleveledByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewColumnFamilySstablesUnleveledByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewColumnFamilySstablesUnleveledByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewColumnFamilySstablesUnleveledByNameGetOK creates a ColumnFamilySstablesUnleveledByNameGetOK with default headers values +func NewColumnFamilySstablesUnleveledByNameGetOK() *ColumnFamilySstablesUnleveledByNameGetOK { + return &ColumnFamilySstablesUnleveledByNameGetOK{} +} + +/* +ColumnFamilySstablesUnleveledByNameGetOK handles this case with default header values. + +Success +*/ +type ColumnFamilySstablesUnleveledByNameGetOK struct { + Payload []string +} + +func (o *ColumnFamilySstablesUnleveledByNameGetOK) GetPayload() []string { + return o.Payload +} + +func (o *ColumnFamilySstablesUnleveledByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewColumnFamilySstablesUnleveledByNameGetDefault creates a ColumnFamilySstablesUnleveledByNameGetDefault with default headers values +func NewColumnFamilySstablesUnleveledByNameGetDefault(code int) *ColumnFamilySstablesUnleveledByNameGetDefault { + return &ColumnFamilySstablesUnleveledByNameGetDefault{ + _statusCode: code, + } +} + +/* +ColumnFamilySstablesUnleveledByNameGetDefault handles this case with default header values. + +internal server error +*/ +type ColumnFamilySstablesUnleveledByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the column family sstables unleveled by name get default response +func (o *ColumnFamilySstablesUnleveledByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *ColumnFamilySstablesUnleveledByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *ColumnFamilySstablesUnleveledByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *ColumnFamilySstablesUnleveledByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_commit_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_commit_get_parameters.go new file mode 100644 index 00000000000..cb2a3e04b7a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_commit_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCommitLogMetricsWaitingOnCommitGetParams creates a new CommitLogMetricsWaitingOnCommitGetParams object +// with the default values initialized. +func NewCommitLogMetricsWaitingOnCommitGetParams() *CommitLogMetricsWaitingOnCommitGetParams { + + return &CommitLogMetricsWaitingOnCommitGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCommitLogMetricsWaitingOnCommitGetParamsWithTimeout creates a new CommitLogMetricsWaitingOnCommitGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCommitLogMetricsWaitingOnCommitGetParamsWithTimeout(timeout time.Duration) *CommitLogMetricsWaitingOnCommitGetParams { + + return &CommitLogMetricsWaitingOnCommitGetParams{ + + timeout: timeout, + } +} + +// NewCommitLogMetricsWaitingOnCommitGetParamsWithContext creates a new CommitLogMetricsWaitingOnCommitGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCommitLogMetricsWaitingOnCommitGetParamsWithContext(ctx context.Context) *CommitLogMetricsWaitingOnCommitGetParams { + + return &CommitLogMetricsWaitingOnCommitGetParams{ + + Context: ctx, + } +} + +// NewCommitLogMetricsWaitingOnCommitGetParamsWithHTTPClient creates a new CommitLogMetricsWaitingOnCommitGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCommitLogMetricsWaitingOnCommitGetParamsWithHTTPClient(client *http.Client) *CommitLogMetricsWaitingOnCommitGetParams { + + return &CommitLogMetricsWaitingOnCommitGetParams{ + HTTPClient: client, + } +} + +/* +CommitLogMetricsWaitingOnCommitGetParams contains all the parameters to send to the API endpoint +for the commit log metrics waiting on commit get operation typically these are written to a http.Request +*/ +type CommitLogMetricsWaitingOnCommitGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the commit log metrics waiting on commit get params +func (o *CommitLogMetricsWaitingOnCommitGetParams) WithTimeout(timeout time.Duration) *CommitLogMetricsWaitingOnCommitGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the commit log metrics waiting on commit get params +func (o *CommitLogMetricsWaitingOnCommitGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the commit log metrics waiting on commit get params +func (o *CommitLogMetricsWaitingOnCommitGetParams) WithContext(ctx context.Context) *CommitLogMetricsWaitingOnCommitGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the commit log metrics waiting on commit get params +func (o *CommitLogMetricsWaitingOnCommitGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the commit log metrics waiting on commit get params +func (o *CommitLogMetricsWaitingOnCommitGetParams) WithHTTPClient(client *http.Client) *CommitLogMetricsWaitingOnCommitGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the commit log metrics waiting on commit get params +func (o *CommitLogMetricsWaitingOnCommitGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CommitLogMetricsWaitingOnCommitGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_commit_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_commit_get_responses.go new file mode 100644 index 00000000000..c44b5ba0711 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_commit_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CommitLogMetricsWaitingOnCommitGetReader is a Reader for the CommitLogMetricsWaitingOnCommitGet structure. +type CommitLogMetricsWaitingOnCommitGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CommitLogMetricsWaitingOnCommitGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCommitLogMetricsWaitingOnCommitGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCommitLogMetricsWaitingOnCommitGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCommitLogMetricsWaitingOnCommitGetOK creates a CommitLogMetricsWaitingOnCommitGetOK with default headers values +func NewCommitLogMetricsWaitingOnCommitGetOK() *CommitLogMetricsWaitingOnCommitGetOK { + return &CommitLogMetricsWaitingOnCommitGetOK{} +} + +/* +CommitLogMetricsWaitingOnCommitGetOK handles this case with default header values. + +Success +*/ +type CommitLogMetricsWaitingOnCommitGetOK struct { +} + +func (o *CommitLogMetricsWaitingOnCommitGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCommitLogMetricsWaitingOnCommitGetDefault creates a CommitLogMetricsWaitingOnCommitGetDefault with default headers values +func NewCommitLogMetricsWaitingOnCommitGetDefault(code int) *CommitLogMetricsWaitingOnCommitGetDefault { + return &CommitLogMetricsWaitingOnCommitGetDefault{ + _statusCode: code, + } +} + +/* +CommitLogMetricsWaitingOnCommitGetDefault handles this case with default header values. + +internal server error +*/ +type CommitLogMetricsWaitingOnCommitGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the commit log metrics waiting on commit get default response +func (o *CommitLogMetricsWaitingOnCommitGetDefault) Code() int { + return o._statusCode +} + +func (o *CommitLogMetricsWaitingOnCommitGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CommitLogMetricsWaitingOnCommitGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CommitLogMetricsWaitingOnCommitGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_segment_allocation_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_segment_allocation_get_parameters.go new file mode 100644 index 00000000000..be54bc4b74e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_segment_allocation_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCommitLogMetricsWaitingOnSegmentAllocationGetParams creates a new CommitLogMetricsWaitingOnSegmentAllocationGetParams object +// with the default values initialized. +func NewCommitLogMetricsWaitingOnSegmentAllocationGetParams() *CommitLogMetricsWaitingOnSegmentAllocationGetParams { + + return &CommitLogMetricsWaitingOnSegmentAllocationGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCommitLogMetricsWaitingOnSegmentAllocationGetParamsWithTimeout creates a new CommitLogMetricsWaitingOnSegmentAllocationGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCommitLogMetricsWaitingOnSegmentAllocationGetParamsWithTimeout(timeout time.Duration) *CommitLogMetricsWaitingOnSegmentAllocationGetParams { + + return &CommitLogMetricsWaitingOnSegmentAllocationGetParams{ + + timeout: timeout, + } +} + +// NewCommitLogMetricsWaitingOnSegmentAllocationGetParamsWithContext creates a new CommitLogMetricsWaitingOnSegmentAllocationGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCommitLogMetricsWaitingOnSegmentAllocationGetParamsWithContext(ctx context.Context) *CommitLogMetricsWaitingOnSegmentAllocationGetParams { + + return &CommitLogMetricsWaitingOnSegmentAllocationGetParams{ + + Context: ctx, + } +} + +// NewCommitLogMetricsWaitingOnSegmentAllocationGetParamsWithHTTPClient creates a new CommitLogMetricsWaitingOnSegmentAllocationGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCommitLogMetricsWaitingOnSegmentAllocationGetParamsWithHTTPClient(client *http.Client) *CommitLogMetricsWaitingOnSegmentAllocationGetParams { + + return &CommitLogMetricsWaitingOnSegmentAllocationGetParams{ + HTTPClient: client, + } +} + +/* +CommitLogMetricsWaitingOnSegmentAllocationGetParams contains all the parameters to send to the API endpoint +for the commit log metrics waiting on segment allocation get operation typically these are written to a http.Request +*/ +type CommitLogMetricsWaitingOnSegmentAllocationGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the commit log metrics waiting on segment allocation get params +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetParams) WithTimeout(timeout time.Duration) *CommitLogMetricsWaitingOnSegmentAllocationGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the commit log metrics waiting on segment allocation get params +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the commit log metrics waiting on segment allocation get params +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetParams) WithContext(ctx context.Context) *CommitLogMetricsWaitingOnSegmentAllocationGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the commit log metrics waiting on segment allocation get params +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the commit log metrics waiting on segment allocation get params +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetParams) WithHTTPClient(client *http.Client) *CommitLogMetricsWaitingOnSegmentAllocationGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the commit log metrics waiting on segment allocation get params +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_segment_allocation_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_segment_allocation_get_responses.go new file mode 100644 index 00000000000..3c90867471e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commit_log_metrics_waiting_on_segment_allocation_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CommitLogMetricsWaitingOnSegmentAllocationGetReader is a Reader for the CommitLogMetricsWaitingOnSegmentAllocationGet structure. +type CommitLogMetricsWaitingOnSegmentAllocationGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCommitLogMetricsWaitingOnSegmentAllocationGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCommitLogMetricsWaitingOnSegmentAllocationGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCommitLogMetricsWaitingOnSegmentAllocationGetOK creates a CommitLogMetricsWaitingOnSegmentAllocationGetOK with default headers values +func NewCommitLogMetricsWaitingOnSegmentAllocationGetOK() *CommitLogMetricsWaitingOnSegmentAllocationGetOK { + return &CommitLogMetricsWaitingOnSegmentAllocationGetOK{} +} + +/* +CommitLogMetricsWaitingOnSegmentAllocationGetOK handles this case with default header values. + +Success +*/ +type CommitLogMetricsWaitingOnSegmentAllocationGetOK struct { +} + +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCommitLogMetricsWaitingOnSegmentAllocationGetDefault creates a CommitLogMetricsWaitingOnSegmentAllocationGetDefault with default headers values +func NewCommitLogMetricsWaitingOnSegmentAllocationGetDefault(code int) *CommitLogMetricsWaitingOnSegmentAllocationGetDefault { + return &CommitLogMetricsWaitingOnSegmentAllocationGetDefault{ + _statusCode: code, + } +} + +/* +CommitLogMetricsWaitingOnSegmentAllocationGetDefault handles this case with default header values. + +internal server error +*/ +type CommitLogMetricsWaitingOnSegmentAllocationGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the commit log metrics waiting on segment allocation get default response +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetDefault) Code() int { + return o._statusCode +} + +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CommitLogMetricsWaitingOnSegmentAllocationGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_completed_tasks_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_completed_tasks_get_parameters.go new file mode 100644 index 00000000000..f76fad52da7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_completed_tasks_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCommitlogMetricsCompletedTasksGetParams creates a new CommitlogMetricsCompletedTasksGetParams object +// with the default values initialized. +func NewCommitlogMetricsCompletedTasksGetParams() *CommitlogMetricsCompletedTasksGetParams { + + return &CommitlogMetricsCompletedTasksGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCommitlogMetricsCompletedTasksGetParamsWithTimeout creates a new CommitlogMetricsCompletedTasksGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCommitlogMetricsCompletedTasksGetParamsWithTimeout(timeout time.Duration) *CommitlogMetricsCompletedTasksGetParams { + + return &CommitlogMetricsCompletedTasksGetParams{ + + timeout: timeout, + } +} + +// NewCommitlogMetricsCompletedTasksGetParamsWithContext creates a new CommitlogMetricsCompletedTasksGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCommitlogMetricsCompletedTasksGetParamsWithContext(ctx context.Context) *CommitlogMetricsCompletedTasksGetParams { + + return &CommitlogMetricsCompletedTasksGetParams{ + + Context: ctx, + } +} + +// NewCommitlogMetricsCompletedTasksGetParamsWithHTTPClient creates a new CommitlogMetricsCompletedTasksGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCommitlogMetricsCompletedTasksGetParamsWithHTTPClient(client *http.Client) *CommitlogMetricsCompletedTasksGetParams { + + return &CommitlogMetricsCompletedTasksGetParams{ + HTTPClient: client, + } +} + +/* +CommitlogMetricsCompletedTasksGetParams contains all the parameters to send to the API endpoint +for the commitlog metrics completed tasks get operation typically these are written to a http.Request +*/ +type CommitlogMetricsCompletedTasksGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the commitlog metrics completed tasks get params +func (o *CommitlogMetricsCompletedTasksGetParams) WithTimeout(timeout time.Duration) *CommitlogMetricsCompletedTasksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the commitlog metrics completed tasks get params +func (o *CommitlogMetricsCompletedTasksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the commitlog metrics completed tasks get params +func (o *CommitlogMetricsCompletedTasksGetParams) WithContext(ctx context.Context) *CommitlogMetricsCompletedTasksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the commitlog metrics completed tasks get params +func (o *CommitlogMetricsCompletedTasksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the commitlog metrics completed tasks get params +func (o *CommitlogMetricsCompletedTasksGetParams) WithHTTPClient(client *http.Client) *CommitlogMetricsCompletedTasksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the commitlog metrics completed tasks get params +func (o *CommitlogMetricsCompletedTasksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CommitlogMetricsCompletedTasksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_completed_tasks_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_completed_tasks_get_responses.go new file mode 100644 index 00000000000..dc05c56bd5c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_completed_tasks_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CommitlogMetricsCompletedTasksGetReader is a Reader for the CommitlogMetricsCompletedTasksGet structure. +type CommitlogMetricsCompletedTasksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CommitlogMetricsCompletedTasksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCommitlogMetricsCompletedTasksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCommitlogMetricsCompletedTasksGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCommitlogMetricsCompletedTasksGetOK creates a CommitlogMetricsCompletedTasksGetOK with default headers values +func NewCommitlogMetricsCompletedTasksGetOK() *CommitlogMetricsCompletedTasksGetOK { + return &CommitlogMetricsCompletedTasksGetOK{} +} + +/* +CommitlogMetricsCompletedTasksGetOK handles this case with default header values. + +Success +*/ +type CommitlogMetricsCompletedTasksGetOK struct { + Payload interface{} +} + +func (o *CommitlogMetricsCompletedTasksGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CommitlogMetricsCompletedTasksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCommitlogMetricsCompletedTasksGetDefault creates a CommitlogMetricsCompletedTasksGetDefault with default headers values +func NewCommitlogMetricsCompletedTasksGetDefault(code int) *CommitlogMetricsCompletedTasksGetDefault { + return &CommitlogMetricsCompletedTasksGetDefault{ + _statusCode: code, + } +} + +/* +CommitlogMetricsCompletedTasksGetDefault handles this case with default header values. + +internal server error +*/ +type CommitlogMetricsCompletedTasksGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the commitlog metrics completed tasks get default response +func (o *CommitlogMetricsCompletedTasksGetDefault) Code() int { + return o._statusCode +} + +func (o *CommitlogMetricsCompletedTasksGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CommitlogMetricsCompletedTasksGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CommitlogMetricsCompletedTasksGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_pending_tasks_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_pending_tasks_get_parameters.go new file mode 100644 index 00000000000..ef280c9289b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_pending_tasks_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCommitlogMetricsPendingTasksGetParams creates a new CommitlogMetricsPendingTasksGetParams object +// with the default values initialized. +func NewCommitlogMetricsPendingTasksGetParams() *CommitlogMetricsPendingTasksGetParams { + + return &CommitlogMetricsPendingTasksGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCommitlogMetricsPendingTasksGetParamsWithTimeout creates a new CommitlogMetricsPendingTasksGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCommitlogMetricsPendingTasksGetParamsWithTimeout(timeout time.Duration) *CommitlogMetricsPendingTasksGetParams { + + return &CommitlogMetricsPendingTasksGetParams{ + + timeout: timeout, + } +} + +// NewCommitlogMetricsPendingTasksGetParamsWithContext creates a new CommitlogMetricsPendingTasksGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCommitlogMetricsPendingTasksGetParamsWithContext(ctx context.Context) *CommitlogMetricsPendingTasksGetParams { + + return &CommitlogMetricsPendingTasksGetParams{ + + Context: ctx, + } +} + +// NewCommitlogMetricsPendingTasksGetParamsWithHTTPClient creates a new CommitlogMetricsPendingTasksGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCommitlogMetricsPendingTasksGetParamsWithHTTPClient(client *http.Client) *CommitlogMetricsPendingTasksGetParams { + + return &CommitlogMetricsPendingTasksGetParams{ + HTTPClient: client, + } +} + +/* +CommitlogMetricsPendingTasksGetParams contains all the parameters to send to the API endpoint +for the commitlog metrics pending tasks get operation typically these are written to a http.Request +*/ +type CommitlogMetricsPendingTasksGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the commitlog metrics pending tasks get params +func (o *CommitlogMetricsPendingTasksGetParams) WithTimeout(timeout time.Duration) *CommitlogMetricsPendingTasksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the commitlog metrics pending tasks get params +func (o *CommitlogMetricsPendingTasksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the commitlog metrics pending tasks get params +func (o *CommitlogMetricsPendingTasksGetParams) WithContext(ctx context.Context) *CommitlogMetricsPendingTasksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the commitlog metrics pending tasks get params +func (o *CommitlogMetricsPendingTasksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the commitlog metrics pending tasks get params +func (o *CommitlogMetricsPendingTasksGetParams) WithHTTPClient(client *http.Client) *CommitlogMetricsPendingTasksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the commitlog metrics pending tasks get params +func (o *CommitlogMetricsPendingTasksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CommitlogMetricsPendingTasksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_pending_tasks_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_pending_tasks_get_responses.go new file mode 100644 index 00000000000..9323a0e1ff3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_pending_tasks_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CommitlogMetricsPendingTasksGetReader is a Reader for the CommitlogMetricsPendingTasksGet structure. +type CommitlogMetricsPendingTasksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CommitlogMetricsPendingTasksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCommitlogMetricsPendingTasksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCommitlogMetricsPendingTasksGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCommitlogMetricsPendingTasksGetOK creates a CommitlogMetricsPendingTasksGetOK with default headers values +func NewCommitlogMetricsPendingTasksGetOK() *CommitlogMetricsPendingTasksGetOK { + return &CommitlogMetricsPendingTasksGetOK{} +} + +/* +CommitlogMetricsPendingTasksGetOK handles this case with default header values. + +Success +*/ +type CommitlogMetricsPendingTasksGetOK struct { + Payload interface{} +} + +func (o *CommitlogMetricsPendingTasksGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CommitlogMetricsPendingTasksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCommitlogMetricsPendingTasksGetDefault creates a CommitlogMetricsPendingTasksGetDefault with default headers values +func NewCommitlogMetricsPendingTasksGetDefault(code int) *CommitlogMetricsPendingTasksGetDefault { + return &CommitlogMetricsPendingTasksGetDefault{ + _statusCode: code, + } +} + +/* +CommitlogMetricsPendingTasksGetDefault handles this case with default header values. + +internal server error +*/ +type CommitlogMetricsPendingTasksGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the commitlog metrics pending tasks get default response +func (o *CommitlogMetricsPendingTasksGetDefault) Code() int { + return o._statusCode +} + +func (o *CommitlogMetricsPendingTasksGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CommitlogMetricsPendingTasksGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CommitlogMetricsPendingTasksGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_total_commit_log_size_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_total_commit_log_size_get_parameters.go new file mode 100644 index 00000000000..c1ddab72215 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_total_commit_log_size_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCommitlogMetricsTotalCommitLogSizeGetParams creates a new CommitlogMetricsTotalCommitLogSizeGetParams object +// with the default values initialized. +func NewCommitlogMetricsTotalCommitLogSizeGetParams() *CommitlogMetricsTotalCommitLogSizeGetParams { + + return &CommitlogMetricsTotalCommitLogSizeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCommitlogMetricsTotalCommitLogSizeGetParamsWithTimeout creates a new CommitlogMetricsTotalCommitLogSizeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCommitlogMetricsTotalCommitLogSizeGetParamsWithTimeout(timeout time.Duration) *CommitlogMetricsTotalCommitLogSizeGetParams { + + return &CommitlogMetricsTotalCommitLogSizeGetParams{ + + timeout: timeout, + } +} + +// NewCommitlogMetricsTotalCommitLogSizeGetParamsWithContext creates a new CommitlogMetricsTotalCommitLogSizeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCommitlogMetricsTotalCommitLogSizeGetParamsWithContext(ctx context.Context) *CommitlogMetricsTotalCommitLogSizeGetParams { + + return &CommitlogMetricsTotalCommitLogSizeGetParams{ + + Context: ctx, + } +} + +// NewCommitlogMetricsTotalCommitLogSizeGetParamsWithHTTPClient creates a new CommitlogMetricsTotalCommitLogSizeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCommitlogMetricsTotalCommitLogSizeGetParamsWithHTTPClient(client *http.Client) *CommitlogMetricsTotalCommitLogSizeGetParams { + + return &CommitlogMetricsTotalCommitLogSizeGetParams{ + HTTPClient: client, + } +} + +/* +CommitlogMetricsTotalCommitLogSizeGetParams contains all the parameters to send to the API endpoint +for the commitlog metrics total commit log size get operation typically these are written to a http.Request +*/ +type CommitlogMetricsTotalCommitLogSizeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the commitlog metrics total commit log size get params +func (o *CommitlogMetricsTotalCommitLogSizeGetParams) WithTimeout(timeout time.Duration) *CommitlogMetricsTotalCommitLogSizeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the commitlog metrics total commit log size get params +func (o *CommitlogMetricsTotalCommitLogSizeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the commitlog metrics total commit log size get params +func (o *CommitlogMetricsTotalCommitLogSizeGetParams) WithContext(ctx context.Context) *CommitlogMetricsTotalCommitLogSizeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the commitlog metrics total commit log size get params +func (o *CommitlogMetricsTotalCommitLogSizeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the commitlog metrics total commit log size get params +func (o *CommitlogMetricsTotalCommitLogSizeGetParams) WithHTTPClient(client *http.Client) *CommitlogMetricsTotalCommitLogSizeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the commitlog metrics total commit log size get params +func (o *CommitlogMetricsTotalCommitLogSizeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CommitlogMetricsTotalCommitLogSizeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_total_commit_log_size_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_total_commit_log_size_get_responses.go new file mode 100644 index 00000000000..6f034a3ba5d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_metrics_total_commit_log_size_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CommitlogMetricsTotalCommitLogSizeGetReader is a Reader for the CommitlogMetricsTotalCommitLogSizeGet structure. +type CommitlogMetricsTotalCommitLogSizeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CommitlogMetricsTotalCommitLogSizeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCommitlogMetricsTotalCommitLogSizeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCommitlogMetricsTotalCommitLogSizeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCommitlogMetricsTotalCommitLogSizeGetOK creates a CommitlogMetricsTotalCommitLogSizeGetOK with default headers values +func NewCommitlogMetricsTotalCommitLogSizeGetOK() *CommitlogMetricsTotalCommitLogSizeGetOK { + return &CommitlogMetricsTotalCommitLogSizeGetOK{} +} + +/* +CommitlogMetricsTotalCommitLogSizeGetOK handles this case with default header values. + +Success +*/ +type CommitlogMetricsTotalCommitLogSizeGetOK struct { + Payload interface{} +} + +func (o *CommitlogMetricsTotalCommitLogSizeGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CommitlogMetricsTotalCommitLogSizeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCommitlogMetricsTotalCommitLogSizeGetDefault creates a CommitlogMetricsTotalCommitLogSizeGetDefault with default headers values +func NewCommitlogMetricsTotalCommitLogSizeGetDefault(code int) *CommitlogMetricsTotalCommitLogSizeGetDefault { + return &CommitlogMetricsTotalCommitLogSizeGetDefault{ + _statusCode: code, + } +} + +/* +CommitlogMetricsTotalCommitLogSizeGetDefault handles this case with default header values. + +internal server error +*/ +type CommitlogMetricsTotalCommitLogSizeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the commitlog metrics total commit log size get default response +func (o *CommitlogMetricsTotalCommitLogSizeGetDefault) Code() int { + return o._statusCode +} + +func (o *CommitlogMetricsTotalCommitLogSizeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CommitlogMetricsTotalCommitLogSizeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CommitlogMetricsTotalCommitLogSizeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_recover_by_path_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_recover_by_path_post_parameters.go new file mode 100644 index 00000000000..997f83771f4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_recover_by_path_post_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCommitlogRecoverByPathPostParams creates a new CommitlogRecoverByPathPostParams object +// with the default values initialized. +func NewCommitlogRecoverByPathPostParams() *CommitlogRecoverByPathPostParams { + var () + return &CommitlogRecoverByPathPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCommitlogRecoverByPathPostParamsWithTimeout creates a new CommitlogRecoverByPathPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCommitlogRecoverByPathPostParamsWithTimeout(timeout time.Duration) *CommitlogRecoverByPathPostParams { + var () + return &CommitlogRecoverByPathPostParams{ + + timeout: timeout, + } +} + +// NewCommitlogRecoverByPathPostParamsWithContext creates a new CommitlogRecoverByPathPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCommitlogRecoverByPathPostParamsWithContext(ctx context.Context) *CommitlogRecoverByPathPostParams { + var () + return &CommitlogRecoverByPathPostParams{ + + Context: ctx, + } +} + +// NewCommitlogRecoverByPathPostParamsWithHTTPClient creates a new CommitlogRecoverByPathPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCommitlogRecoverByPathPostParamsWithHTTPClient(client *http.Client) *CommitlogRecoverByPathPostParams { + var () + return &CommitlogRecoverByPathPostParams{ + HTTPClient: client, + } +} + +/* +CommitlogRecoverByPathPostParams contains all the parameters to send to the API endpoint +for the commitlog recover by path post operation typically these are written to a http.Request +*/ +type CommitlogRecoverByPathPostParams struct { + + /*Path + Full path of file or directory + + */ + Path string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the commitlog recover by path post params +func (o *CommitlogRecoverByPathPostParams) WithTimeout(timeout time.Duration) *CommitlogRecoverByPathPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the commitlog recover by path post params +func (o *CommitlogRecoverByPathPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the commitlog recover by path post params +func (o *CommitlogRecoverByPathPostParams) WithContext(ctx context.Context) *CommitlogRecoverByPathPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the commitlog recover by path post params +func (o *CommitlogRecoverByPathPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the commitlog recover by path post params +func (o *CommitlogRecoverByPathPostParams) WithHTTPClient(client *http.Client) *CommitlogRecoverByPathPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the commitlog recover by path post params +func (o *CommitlogRecoverByPathPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithPath adds the path to the commitlog recover by path post params +func (o *CommitlogRecoverByPathPostParams) WithPath(path string) *CommitlogRecoverByPathPostParams { + o.SetPath(path) + return o +} + +// SetPath adds the path to the commitlog recover by path post params +func (o *CommitlogRecoverByPathPostParams) SetPath(path string) { + o.Path = path +} + +// WriteToRequest writes these params to a swagger request +func (o *CommitlogRecoverByPathPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param path + if err := r.SetPathParam("path", o.Path); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_recover_by_path_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_recover_by_path_post_responses.go new file mode 100644 index 00000000000..bdd28c39b2f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_recover_by_path_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CommitlogRecoverByPathPostReader is a Reader for the CommitlogRecoverByPathPost structure. +type CommitlogRecoverByPathPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CommitlogRecoverByPathPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCommitlogRecoverByPathPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCommitlogRecoverByPathPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCommitlogRecoverByPathPostOK creates a CommitlogRecoverByPathPostOK with default headers values +func NewCommitlogRecoverByPathPostOK() *CommitlogRecoverByPathPostOK { + return &CommitlogRecoverByPathPostOK{} +} + +/* +CommitlogRecoverByPathPostOK handles this case with default header values. + +Success +*/ +type CommitlogRecoverByPathPostOK struct { +} + +func (o *CommitlogRecoverByPathPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCommitlogRecoverByPathPostDefault creates a CommitlogRecoverByPathPostDefault with default headers values +func NewCommitlogRecoverByPathPostDefault(code int) *CommitlogRecoverByPathPostDefault { + return &CommitlogRecoverByPathPostDefault{ + _statusCode: code, + } +} + +/* +CommitlogRecoverByPathPostDefault handles this case with default header values. + +internal server error +*/ +type CommitlogRecoverByPathPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the commitlog recover by path post default response +func (o *CommitlogRecoverByPathPostDefault) Code() int { + return o._statusCode +} + +func (o *CommitlogRecoverByPathPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CommitlogRecoverByPathPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CommitlogRecoverByPathPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_active_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_active_get_parameters.go new file mode 100644 index 00000000000..323b5adbb6b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_active_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCommitlogSegmentsActiveGetParams creates a new CommitlogSegmentsActiveGetParams object +// with the default values initialized. +func NewCommitlogSegmentsActiveGetParams() *CommitlogSegmentsActiveGetParams { + + return &CommitlogSegmentsActiveGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCommitlogSegmentsActiveGetParamsWithTimeout creates a new CommitlogSegmentsActiveGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCommitlogSegmentsActiveGetParamsWithTimeout(timeout time.Duration) *CommitlogSegmentsActiveGetParams { + + return &CommitlogSegmentsActiveGetParams{ + + timeout: timeout, + } +} + +// NewCommitlogSegmentsActiveGetParamsWithContext creates a new CommitlogSegmentsActiveGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCommitlogSegmentsActiveGetParamsWithContext(ctx context.Context) *CommitlogSegmentsActiveGetParams { + + return &CommitlogSegmentsActiveGetParams{ + + Context: ctx, + } +} + +// NewCommitlogSegmentsActiveGetParamsWithHTTPClient creates a new CommitlogSegmentsActiveGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCommitlogSegmentsActiveGetParamsWithHTTPClient(client *http.Client) *CommitlogSegmentsActiveGetParams { + + return &CommitlogSegmentsActiveGetParams{ + HTTPClient: client, + } +} + +/* +CommitlogSegmentsActiveGetParams contains all the parameters to send to the API endpoint +for the commitlog segments active get operation typically these are written to a http.Request +*/ +type CommitlogSegmentsActiveGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the commitlog segments active get params +func (o *CommitlogSegmentsActiveGetParams) WithTimeout(timeout time.Duration) *CommitlogSegmentsActiveGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the commitlog segments active get params +func (o *CommitlogSegmentsActiveGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the commitlog segments active get params +func (o *CommitlogSegmentsActiveGetParams) WithContext(ctx context.Context) *CommitlogSegmentsActiveGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the commitlog segments active get params +func (o *CommitlogSegmentsActiveGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the commitlog segments active get params +func (o *CommitlogSegmentsActiveGetParams) WithHTTPClient(client *http.Client) *CommitlogSegmentsActiveGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the commitlog segments active get params +func (o *CommitlogSegmentsActiveGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CommitlogSegmentsActiveGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_active_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_active_get_responses.go new file mode 100644 index 00000000000..71c4967ac20 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_active_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CommitlogSegmentsActiveGetReader is a Reader for the CommitlogSegmentsActiveGet structure. +type CommitlogSegmentsActiveGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CommitlogSegmentsActiveGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCommitlogSegmentsActiveGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCommitlogSegmentsActiveGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCommitlogSegmentsActiveGetOK creates a CommitlogSegmentsActiveGetOK with default headers values +func NewCommitlogSegmentsActiveGetOK() *CommitlogSegmentsActiveGetOK { + return &CommitlogSegmentsActiveGetOK{} +} + +/* +CommitlogSegmentsActiveGetOK handles this case with default header values. + +Success +*/ +type CommitlogSegmentsActiveGetOK struct { + Payload []string +} + +func (o *CommitlogSegmentsActiveGetOK) GetPayload() []string { + return o.Payload +} + +func (o *CommitlogSegmentsActiveGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCommitlogSegmentsActiveGetDefault creates a CommitlogSegmentsActiveGetDefault with default headers values +func NewCommitlogSegmentsActiveGetDefault(code int) *CommitlogSegmentsActiveGetDefault { + return &CommitlogSegmentsActiveGetDefault{ + _statusCode: code, + } +} + +/* +CommitlogSegmentsActiveGetDefault handles this case with default header values. + +internal server error +*/ +type CommitlogSegmentsActiveGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the commitlog segments active get default response +func (o *CommitlogSegmentsActiveGetDefault) Code() int { + return o._statusCode +} + +func (o *CommitlogSegmentsActiveGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CommitlogSegmentsActiveGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CommitlogSegmentsActiveGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_archiving_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_archiving_get_parameters.go new file mode 100644 index 00000000000..be65a05c362 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_archiving_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCommitlogSegmentsArchivingGetParams creates a new CommitlogSegmentsArchivingGetParams object +// with the default values initialized. +func NewCommitlogSegmentsArchivingGetParams() *CommitlogSegmentsArchivingGetParams { + + return &CommitlogSegmentsArchivingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCommitlogSegmentsArchivingGetParamsWithTimeout creates a new CommitlogSegmentsArchivingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCommitlogSegmentsArchivingGetParamsWithTimeout(timeout time.Duration) *CommitlogSegmentsArchivingGetParams { + + return &CommitlogSegmentsArchivingGetParams{ + + timeout: timeout, + } +} + +// NewCommitlogSegmentsArchivingGetParamsWithContext creates a new CommitlogSegmentsArchivingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCommitlogSegmentsArchivingGetParamsWithContext(ctx context.Context) *CommitlogSegmentsArchivingGetParams { + + return &CommitlogSegmentsArchivingGetParams{ + + Context: ctx, + } +} + +// NewCommitlogSegmentsArchivingGetParamsWithHTTPClient creates a new CommitlogSegmentsArchivingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCommitlogSegmentsArchivingGetParamsWithHTTPClient(client *http.Client) *CommitlogSegmentsArchivingGetParams { + + return &CommitlogSegmentsArchivingGetParams{ + HTTPClient: client, + } +} + +/* +CommitlogSegmentsArchivingGetParams contains all the parameters to send to the API endpoint +for the commitlog segments archiving get operation typically these are written to a http.Request +*/ +type CommitlogSegmentsArchivingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the commitlog segments archiving get params +func (o *CommitlogSegmentsArchivingGetParams) WithTimeout(timeout time.Duration) *CommitlogSegmentsArchivingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the commitlog segments archiving get params +func (o *CommitlogSegmentsArchivingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the commitlog segments archiving get params +func (o *CommitlogSegmentsArchivingGetParams) WithContext(ctx context.Context) *CommitlogSegmentsArchivingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the commitlog segments archiving get params +func (o *CommitlogSegmentsArchivingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the commitlog segments archiving get params +func (o *CommitlogSegmentsArchivingGetParams) WithHTTPClient(client *http.Client) *CommitlogSegmentsArchivingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the commitlog segments archiving get params +func (o *CommitlogSegmentsArchivingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CommitlogSegmentsArchivingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_archiving_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_archiving_get_responses.go new file mode 100644 index 00000000000..4f2df544a02 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/commitlog_segments_archiving_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CommitlogSegmentsArchivingGetReader is a Reader for the CommitlogSegmentsArchivingGet structure. +type CommitlogSegmentsArchivingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CommitlogSegmentsArchivingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCommitlogSegmentsArchivingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCommitlogSegmentsArchivingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCommitlogSegmentsArchivingGetOK creates a CommitlogSegmentsArchivingGetOK with default headers values +func NewCommitlogSegmentsArchivingGetOK() *CommitlogSegmentsArchivingGetOK { + return &CommitlogSegmentsArchivingGetOK{} +} + +/* +CommitlogSegmentsArchivingGetOK handles this case with default header values. + +Success +*/ +type CommitlogSegmentsArchivingGetOK struct { + Payload []string +} + +func (o *CommitlogSegmentsArchivingGetOK) GetPayload() []string { + return o.Payload +} + +func (o *CommitlogSegmentsArchivingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCommitlogSegmentsArchivingGetDefault creates a CommitlogSegmentsArchivingGetDefault with default headers values +func NewCommitlogSegmentsArchivingGetDefault(code int) *CommitlogSegmentsArchivingGetDefault { + return &CommitlogSegmentsArchivingGetDefault{ + _statusCode: code, + } +} + +/* +CommitlogSegmentsArchivingGetDefault handles this case with default header values. + +internal server error +*/ +type CommitlogSegmentsArchivingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the commitlog segments archiving get default response +func (o *CommitlogSegmentsArchivingGetDefault) Code() int { + return o._statusCode +} + +func (o *CommitlogSegmentsArchivingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CommitlogSegmentsArchivingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CommitlogSegmentsArchivingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_history_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_history_get_parameters.go new file mode 100644 index 00000000000..31580ab19a3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_history_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCompactionManagerCompactionHistoryGetParams creates a new CompactionManagerCompactionHistoryGetParams object +// with the default values initialized. +func NewCompactionManagerCompactionHistoryGetParams() *CompactionManagerCompactionHistoryGetParams { + + return &CompactionManagerCompactionHistoryGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCompactionManagerCompactionHistoryGetParamsWithTimeout creates a new CompactionManagerCompactionHistoryGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCompactionManagerCompactionHistoryGetParamsWithTimeout(timeout time.Duration) *CompactionManagerCompactionHistoryGetParams { + + return &CompactionManagerCompactionHistoryGetParams{ + + timeout: timeout, + } +} + +// NewCompactionManagerCompactionHistoryGetParamsWithContext creates a new CompactionManagerCompactionHistoryGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCompactionManagerCompactionHistoryGetParamsWithContext(ctx context.Context) *CompactionManagerCompactionHistoryGetParams { + + return &CompactionManagerCompactionHistoryGetParams{ + + Context: ctx, + } +} + +// NewCompactionManagerCompactionHistoryGetParamsWithHTTPClient creates a new CompactionManagerCompactionHistoryGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCompactionManagerCompactionHistoryGetParamsWithHTTPClient(client *http.Client) *CompactionManagerCompactionHistoryGetParams { + + return &CompactionManagerCompactionHistoryGetParams{ + HTTPClient: client, + } +} + +/* +CompactionManagerCompactionHistoryGetParams contains all the parameters to send to the API endpoint +for the compaction manager compaction history get operation typically these are written to a http.Request +*/ +type CompactionManagerCompactionHistoryGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the compaction manager compaction history get params +func (o *CompactionManagerCompactionHistoryGetParams) WithTimeout(timeout time.Duration) *CompactionManagerCompactionHistoryGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the compaction manager compaction history get params +func (o *CompactionManagerCompactionHistoryGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the compaction manager compaction history get params +func (o *CompactionManagerCompactionHistoryGetParams) WithContext(ctx context.Context) *CompactionManagerCompactionHistoryGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the compaction manager compaction history get params +func (o *CompactionManagerCompactionHistoryGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the compaction manager compaction history get params +func (o *CompactionManagerCompactionHistoryGetParams) WithHTTPClient(client *http.Client) *CompactionManagerCompactionHistoryGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the compaction manager compaction history get params +func (o *CompactionManagerCompactionHistoryGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CompactionManagerCompactionHistoryGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_history_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_history_get_responses.go new file mode 100644 index 00000000000..207a1793aa2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_history_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CompactionManagerCompactionHistoryGetReader is a Reader for the CompactionManagerCompactionHistoryGet structure. +type CompactionManagerCompactionHistoryGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CompactionManagerCompactionHistoryGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCompactionManagerCompactionHistoryGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCompactionManagerCompactionHistoryGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCompactionManagerCompactionHistoryGetOK creates a CompactionManagerCompactionHistoryGetOK with default headers values +func NewCompactionManagerCompactionHistoryGetOK() *CompactionManagerCompactionHistoryGetOK { + return &CompactionManagerCompactionHistoryGetOK{} +} + +/* +CompactionManagerCompactionHistoryGetOK handles this case with default header values. + +Success +*/ +type CompactionManagerCompactionHistoryGetOK struct { + Payload []*models.History +} + +func (o *CompactionManagerCompactionHistoryGetOK) GetPayload() []*models.History { + return o.Payload +} + +func (o *CompactionManagerCompactionHistoryGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCompactionManagerCompactionHistoryGetDefault creates a CompactionManagerCompactionHistoryGetDefault with default headers values +func NewCompactionManagerCompactionHistoryGetDefault(code int) *CompactionManagerCompactionHistoryGetDefault { + return &CompactionManagerCompactionHistoryGetDefault{ + _statusCode: code, + } +} + +/* +CompactionManagerCompactionHistoryGetDefault handles this case with default header values. + +internal server error +*/ +type CompactionManagerCompactionHistoryGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the compaction manager compaction history get default response +func (o *CompactionManagerCompactionHistoryGetDefault) Code() int { + return o._statusCode +} + +func (o *CompactionManagerCompactionHistoryGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CompactionManagerCompactionHistoryGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CompactionManagerCompactionHistoryGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_info_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_info_get_parameters.go new file mode 100644 index 00000000000..67c8639f068 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_info_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCompactionManagerCompactionInfoGetParams creates a new CompactionManagerCompactionInfoGetParams object +// with the default values initialized. +func NewCompactionManagerCompactionInfoGetParams() *CompactionManagerCompactionInfoGetParams { + + return &CompactionManagerCompactionInfoGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCompactionManagerCompactionInfoGetParamsWithTimeout creates a new CompactionManagerCompactionInfoGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCompactionManagerCompactionInfoGetParamsWithTimeout(timeout time.Duration) *CompactionManagerCompactionInfoGetParams { + + return &CompactionManagerCompactionInfoGetParams{ + + timeout: timeout, + } +} + +// NewCompactionManagerCompactionInfoGetParamsWithContext creates a new CompactionManagerCompactionInfoGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCompactionManagerCompactionInfoGetParamsWithContext(ctx context.Context) *CompactionManagerCompactionInfoGetParams { + + return &CompactionManagerCompactionInfoGetParams{ + + Context: ctx, + } +} + +// NewCompactionManagerCompactionInfoGetParamsWithHTTPClient creates a new CompactionManagerCompactionInfoGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCompactionManagerCompactionInfoGetParamsWithHTTPClient(client *http.Client) *CompactionManagerCompactionInfoGetParams { + + return &CompactionManagerCompactionInfoGetParams{ + HTTPClient: client, + } +} + +/* +CompactionManagerCompactionInfoGetParams contains all the parameters to send to the API endpoint +for the compaction manager compaction info get operation typically these are written to a http.Request +*/ +type CompactionManagerCompactionInfoGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the compaction manager compaction info get params +func (o *CompactionManagerCompactionInfoGetParams) WithTimeout(timeout time.Duration) *CompactionManagerCompactionInfoGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the compaction manager compaction info get params +func (o *CompactionManagerCompactionInfoGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the compaction manager compaction info get params +func (o *CompactionManagerCompactionInfoGetParams) WithContext(ctx context.Context) *CompactionManagerCompactionInfoGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the compaction manager compaction info get params +func (o *CompactionManagerCompactionInfoGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the compaction manager compaction info get params +func (o *CompactionManagerCompactionInfoGetParams) WithHTTPClient(client *http.Client) *CompactionManagerCompactionInfoGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the compaction manager compaction info get params +func (o *CompactionManagerCompactionInfoGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CompactionManagerCompactionInfoGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_info_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_info_get_responses.go new file mode 100644 index 00000000000..19db4d23748 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compaction_info_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CompactionManagerCompactionInfoGetReader is a Reader for the CompactionManagerCompactionInfoGet structure. +type CompactionManagerCompactionInfoGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CompactionManagerCompactionInfoGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCompactionManagerCompactionInfoGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCompactionManagerCompactionInfoGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCompactionManagerCompactionInfoGetOK creates a CompactionManagerCompactionInfoGetOK with default headers values +func NewCompactionManagerCompactionInfoGetOK() *CompactionManagerCompactionInfoGetOK { + return &CompactionManagerCompactionInfoGetOK{} +} + +/* +CompactionManagerCompactionInfoGetOK handles this case with default header values. + +Success +*/ +type CompactionManagerCompactionInfoGetOK struct { + Payload []*models.CompactionInfo +} + +func (o *CompactionManagerCompactionInfoGetOK) GetPayload() []*models.CompactionInfo { + return o.Payload +} + +func (o *CompactionManagerCompactionInfoGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCompactionManagerCompactionInfoGetDefault creates a CompactionManagerCompactionInfoGetDefault with default headers values +func NewCompactionManagerCompactionInfoGetDefault(code int) *CompactionManagerCompactionInfoGetDefault { + return &CompactionManagerCompactionInfoGetDefault{ + _statusCode: code, + } +} + +/* +CompactionManagerCompactionInfoGetDefault handles this case with default header values. + +internal server error +*/ +type CompactionManagerCompactionInfoGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the compaction manager compaction info get default response +func (o *CompactionManagerCompactionInfoGetDefault) Code() int { + return o._statusCode +} + +func (o *CompactionManagerCompactionInfoGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CompactionManagerCompactionInfoGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CompactionManagerCompactionInfoGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compactions_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compactions_get_parameters.go new file mode 100644 index 00000000000..5ee958a9891 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compactions_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCompactionManagerCompactionsGetParams creates a new CompactionManagerCompactionsGetParams object +// with the default values initialized. +func NewCompactionManagerCompactionsGetParams() *CompactionManagerCompactionsGetParams { + + return &CompactionManagerCompactionsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCompactionManagerCompactionsGetParamsWithTimeout creates a new CompactionManagerCompactionsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCompactionManagerCompactionsGetParamsWithTimeout(timeout time.Duration) *CompactionManagerCompactionsGetParams { + + return &CompactionManagerCompactionsGetParams{ + + timeout: timeout, + } +} + +// NewCompactionManagerCompactionsGetParamsWithContext creates a new CompactionManagerCompactionsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCompactionManagerCompactionsGetParamsWithContext(ctx context.Context) *CompactionManagerCompactionsGetParams { + + return &CompactionManagerCompactionsGetParams{ + + Context: ctx, + } +} + +// NewCompactionManagerCompactionsGetParamsWithHTTPClient creates a new CompactionManagerCompactionsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCompactionManagerCompactionsGetParamsWithHTTPClient(client *http.Client) *CompactionManagerCompactionsGetParams { + + return &CompactionManagerCompactionsGetParams{ + HTTPClient: client, + } +} + +/* +CompactionManagerCompactionsGetParams contains all the parameters to send to the API endpoint +for the compaction manager compactions get operation typically these are written to a http.Request +*/ +type CompactionManagerCompactionsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the compaction manager compactions get params +func (o *CompactionManagerCompactionsGetParams) WithTimeout(timeout time.Duration) *CompactionManagerCompactionsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the compaction manager compactions get params +func (o *CompactionManagerCompactionsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the compaction manager compactions get params +func (o *CompactionManagerCompactionsGetParams) WithContext(ctx context.Context) *CompactionManagerCompactionsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the compaction manager compactions get params +func (o *CompactionManagerCompactionsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the compaction manager compactions get params +func (o *CompactionManagerCompactionsGetParams) WithHTTPClient(client *http.Client) *CompactionManagerCompactionsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the compaction manager compactions get params +func (o *CompactionManagerCompactionsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CompactionManagerCompactionsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compactions_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compactions_get_responses.go new file mode 100644 index 00000000000..97ee7e14b65 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_compactions_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CompactionManagerCompactionsGetReader is a Reader for the CompactionManagerCompactionsGet structure. +type CompactionManagerCompactionsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CompactionManagerCompactionsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCompactionManagerCompactionsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCompactionManagerCompactionsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCompactionManagerCompactionsGetOK creates a CompactionManagerCompactionsGetOK with default headers values +func NewCompactionManagerCompactionsGetOK() *CompactionManagerCompactionsGetOK { + return &CompactionManagerCompactionsGetOK{} +} + +/* +CompactionManagerCompactionsGetOK handles this case with default header values. + +Success +*/ +type CompactionManagerCompactionsGetOK struct { + Payload []*models.Summary +} + +func (o *CompactionManagerCompactionsGetOK) GetPayload() []*models.Summary { + return o.Payload +} + +func (o *CompactionManagerCompactionsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCompactionManagerCompactionsGetDefault creates a CompactionManagerCompactionsGetDefault with default headers values +func NewCompactionManagerCompactionsGetDefault(code int) *CompactionManagerCompactionsGetDefault { + return &CompactionManagerCompactionsGetDefault{ + _statusCode: code, + } +} + +/* +CompactionManagerCompactionsGetDefault handles this case with default header values. + +internal server error +*/ +type CompactionManagerCompactionsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the compaction manager compactions get default response +func (o *CompactionManagerCompactionsGetDefault) Code() int { + return o._statusCode +} + +func (o *CompactionManagerCompactionsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CompactionManagerCompactionsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CompactionManagerCompactionsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_force_user_defined_compaction_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_force_user_defined_compaction_post_parameters.go new file mode 100644 index 00000000000..6348a41ad63 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_force_user_defined_compaction_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCompactionManagerForceUserDefinedCompactionPostParams creates a new CompactionManagerForceUserDefinedCompactionPostParams object +// with the default values initialized. +func NewCompactionManagerForceUserDefinedCompactionPostParams() *CompactionManagerForceUserDefinedCompactionPostParams { + var () + return &CompactionManagerForceUserDefinedCompactionPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCompactionManagerForceUserDefinedCompactionPostParamsWithTimeout creates a new CompactionManagerForceUserDefinedCompactionPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCompactionManagerForceUserDefinedCompactionPostParamsWithTimeout(timeout time.Duration) *CompactionManagerForceUserDefinedCompactionPostParams { + var () + return &CompactionManagerForceUserDefinedCompactionPostParams{ + + timeout: timeout, + } +} + +// NewCompactionManagerForceUserDefinedCompactionPostParamsWithContext creates a new CompactionManagerForceUserDefinedCompactionPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCompactionManagerForceUserDefinedCompactionPostParamsWithContext(ctx context.Context) *CompactionManagerForceUserDefinedCompactionPostParams { + var () + return &CompactionManagerForceUserDefinedCompactionPostParams{ + + Context: ctx, + } +} + +// NewCompactionManagerForceUserDefinedCompactionPostParamsWithHTTPClient creates a new CompactionManagerForceUserDefinedCompactionPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCompactionManagerForceUserDefinedCompactionPostParamsWithHTTPClient(client *http.Client) *CompactionManagerForceUserDefinedCompactionPostParams { + var () + return &CompactionManagerForceUserDefinedCompactionPostParams{ + HTTPClient: client, + } +} + +/* +CompactionManagerForceUserDefinedCompactionPostParams contains all the parameters to send to the API endpoint +for the compaction manager force user defined compaction post operation typically these are written to a http.Request +*/ +type CompactionManagerForceUserDefinedCompactionPostParams struct { + + /*DataFiles + a comma separated list of sstable file to compact. must contain keyspace and columnfamily name in path(for 2.1+) or file name itself + + */ + DataFiles string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the compaction manager force user defined compaction post params +func (o *CompactionManagerForceUserDefinedCompactionPostParams) WithTimeout(timeout time.Duration) *CompactionManagerForceUserDefinedCompactionPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the compaction manager force user defined compaction post params +func (o *CompactionManagerForceUserDefinedCompactionPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the compaction manager force user defined compaction post params +func (o *CompactionManagerForceUserDefinedCompactionPostParams) WithContext(ctx context.Context) *CompactionManagerForceUserDefinedCompactionPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the compaction manager force user defined compaction post params +func (o *CompactionManagerForceUserDefinedCompactionPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the compaction manager force user defined compaction post params +func (o *CompactionManagerForceUserDefinedCompactionPostParams) WithHTTPClient(client *http.Client) *CompactionManagerForceUserDefinedCompactionPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the compaction manager force user defined compaction post params +func (o *CompactionManagerForceUserDefinedCompactionPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithDataFiles adds the dataFiles to the compaction manager force user defined compaction post params +func (o *CompactionManagerForceUserDefinedCompactionPostParams) WithDataFiles(dataFiles string) *CompactionManagerForceUserDefinedCompactionPostParams { + o.SetDataFiles(dataFiles) + return o +} + +// SetDataFiles adds the dataFiles to the compaction manager force user defined compaction post params +func (o *CompactionManagerForceUserDefinedCompactionPostParams) SetDataFiles(dataFiles string) { + o.DataFiles = dataFiles +} + +// WriteToRequest writes these params to a swagger request +func (o *CompactionManagerForceUserDefinedCompactionPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param data_files + qrDataFiles := o.DataFiles + qDataFiles := qrDataFiles + if qDataFiles != "" { + if err := r.SetQueryParam("data_files", qDataFiles); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_force_user_defined_compaction_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_force_user_defined_compaction_post_responses.go new file mode 100644 index 00000000000..3bffaf621c7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_force_user_defined_compaction_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CompactionManagerForceUserDefinedCompactionPostReader is a Reader for the CompactionManagerForceUserDefinedCompactionPost structure. +type CompactionManagerForceUserDefinedCompactionPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CompactionManagerForceUserDefinedCompactionPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCompactionManagerForceUserDefinedCompactionPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCompactionManagerForceUserDefinedCompactionPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCompactionManagerForceUserDefinedCompactionPostOK creates a CompactionManagerForceUserDefinedCompactionPostOK with default headers values +func NewCompactionManagerForceUserDefinedCompactionPostOK() *CompactionManagerForceUserDefinedCompactionPostOK { + return &CompactionManagerForceUserDefinedCompactionPostOK{} +} + +/* +CompactionManagerForceUserDefinedCompactionPostOK handles this case with default header values. + +Success +*/ +type CompactionManagerForceUserDefinedCompactionPostOK struct { +} + +func (o *CompactionManagerForceUserDefinedCompactionPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCompactionManagerForceUserDefinedCompactionPostDefault creates a CompactionManagerForceUserDefinedCompactionPostDefault with default headers values +func NewCompactionManagerForceUserDefinedCompactionPostDefault(code int) *CompactionManagerForceUserDefinedCompactionPostDefault { + return &CompactionManagerForceUserDefinedCompactionPostDefault{ + _statusCode: code, + } +} + +/* +CompactionManagerForceUserDefinedCompactionPostDefault handles this case with default header values. + +internal server error +*/ +type CompactionManagerForceUserDefinedCompactionPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the compaction manager force user defined compaction post default response +func (o *CompactionManagerForceUserDefinedCompactionPostDefault) Code() int { + return o._statusCode +} + +func (o *CompactionManagerForceUserDefinedCompactionPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CompactionManagerForceUserDefinedCompactionPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CompactionManagerForceUserDefinedCompactionPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_bytes_compacted_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_bytes_compacted_get_parameters.go new file mode 100644 index 00000000000..1d92ae983d5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_bytes_compacted_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCompactionManagerMetricsBytesCompactedGetParams creates a new CompactionManagerMetricsBytesCompactedGetParams object +// with the default values initialized. +func NewCompactionManagerMetricsBytesCompactedGetParams() *CompactionManagerMetricsBytesCompactedGetParams { + + return &CompactionManagerMetricsBytesCompactedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCompactionManagerMetricsBytesCompactedGetParamsWithTimeout creates a new CompactionManagerMetricsBytesCompactedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCompactionManagerMetricsBytesCompactedGetParamsWithTimeout(timeout time.Duration) *CompactionManagerMetricsBytesCompactedGetParams { + + return &CompactionManagerMetricsBytesCompactedGetParams{ + + timeout: timeout, + } +} + +// NewCompactionManagerMetricsBytesCompactedGetParamsWithContext creates a new CompactionManagerMetricsBytesCompactedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCompactionManagerMetricsBytesCompactedGetParamsWithContext(ctx context.Context) *CompactionManagerMetricsBytesCompactedGetParams { + + return &CompactionManagerMetricsBytesCompactedGetParams{ + + Context: ctx, + } +} + +// NewCompactionManagerMetricsBytesCompactedGetParamsWithHTTPClient creates a new CompactionManagerMetricsBytesCompactedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCompactionManagerMetricsBytesCompactedGetParamsWithHTTPClient(client *http.Client) *CompactionManagerMetricsBytesCompactedGetParams { + + return &CompactionManagerMetricsBytesCompactedGetParams{ + HTTPClient: client, + } +} + +/* +CompactionManagerMetricsBytesCompactedGetParams contains all the parameters to send to the API endpoint +for the compaction manager metrics bytes compacted get operation typically these are written to a http.Request +*/ +type CompactionManagerMetricsBytesCompactedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the compaction manager metrics bytes compacted get params +func (o *CompactionManagerMetricsBytesCompactedGetParams) WithTimeout(timeout time.Duration) *CompactionManagerMetricsBytesCompactedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the compaction manager metrics bytes compacted get params +func (o *CompactionManagerMetricsBytesCompactedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the compaction manager metrics bytes compacted get params +func (o *CompactionManagerMetricsBytesCompactedGetParams) WithContext(ctx context.Context) *CompactionManagerMetricsBytesCompactedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the compaction manager metrics bytes compacted get params +func (o *CompactionManagerMetricsBytesCompactedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the compaction manager metrics bytes compacted get params +func (o *CompactionManagerMetricsBytesCompactedGetParams) WithHTTPClient(client *http.Client) *CompactionManagerMetricsBytesCompactedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the compaction manager metrics bytes compacted get params +func (o *CompactionManagerMetricsBytesCompactedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CompactionManagerMetricsBytesCompactedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_bytes_compacted_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_bytes_compacted_get_responses.go new file mode 100644 index 00000000000..0413158101d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_bytes_compacted_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CompactionManagerMetricsBytesCompactedGetReader is a Reader for the CompactionManagerMetricsBytesCompactedGet structure. +type CompactionManagerMetricsBytesCompactedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CompactionManagerMetricsBytesCompactedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCompactionManagerMetricsBytesCompactedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCompactionManagerMetricsBytesCompactedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCompactionManagerMetricsBytesCompactedGetOK creates a CompactionManagerMetricsBytesCompactedGetOK with default headers values +func NewCompactionManagerMetricsBytesCompactedGetOK() *CompactionManagerMetricsBytesCompactedGetOK { + return &CompactionManagerMetricsBytesCompactedGetOK{} +} + +/* +CompactionManagerMetricsBytesCompactedGetOK handles this case with default header values. + +Success +*/ +type CompactionManagerMetricsBytesCompactedGetOK struct { + Payload int32 +} + +func (o *CompactionManagerMetricsBytesCompactedGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CompactionManagerMetricsBytesCompactedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCompactionManagerMetricsBytesCompactedGetDefault creates a CompactionManagerMetricsBytesCompactedGetDefault with default headers values +func NewCompactionManagerMetricsBytesCompactedGetDefault(code int) *CompactionManagerMetricsBytesCompactedGetDefault { + return &CompactionManagerMetricsBytesCompactedGetDefault{ + _statusCode: code, + } +} + +/* +CompactionManagerMetricsBytesCompactedGetDefault handles this case with default header values. + +internal server error +*/ +type CompactionManagerMetricsBytesCompactedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the compaction manager metrics bytes compacted get default response +func (o *CompactionManagerMetricsBytesCompactedGetDefault) Code() int { + return o._statusCode +} + +func (o *CompactionManagerMetricsBytesCompactedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CompactionManagerMetricsBytesCompactedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CompactionManagerMetricsBytesCompactedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_completed_tasks_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_completed_tasks_get_parameters.go new file mode 100644 index 00000000000..778934a81b8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_completed_tasks_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCompactionManagerMetricsCompletedTasksGetParams creates a new CompactionManagerMetricsCompletedTasksGetParams object +// with the default values initialized. +func NewCompactionManagerMetricsCompletedTasksGetParams() *CompactionManagerMetricsCompletedTasksGetParams { + + return &CompactionManagerMetricsCompletedTasksGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCompactionManagerMetricsCompletedTasksGetParamsWithTimeout creates a new CompactionManagerMetricsCompletedTasksGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCompactionManagerMetricsCompletedTasksGetParamsWithTimeout(timeout time.Duration) *CompactionManagerMetricsCompletedTasksGetParams { + + return &CompactionManagerMetricsCompletedTasksGetParams{ + + timeout: timeout, + } +} + +// NewCompactionManagerMetricsCompletedTasksGetParamsWithContext creates a new CompactionManagerMetricsCompletedTasksGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCompactionManagerMetricsCompletedTasksGetParamsWithContext(ctx context.Context) *CompactionManagerMetricsCompletedTasksGetParams { + + return &CompactionManagerMetricsCompletedTasksGetParams{ + + Context: ctx, + } +} + +// NewCompactionManagerMetricsCompletedTasksGetParamsWithHTTPClient creates a new CompactionManagerMetricsCompletedTasksGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCompactionManagerMetricsCompletedTasksGetParamsWithHTTPClient(client *http.Client) *CompactionManagerMetricsCompletedTasksGetParams { + + return &CompactionManagerMetricsCompletedTasksGetParams{ + HTTPClient: client, + } +} + +/* +CompactionManagerMetricsCompletedTasksGetParams contains all the parameters to send to the API endpoint +for the compaction manager metrics completed tasks get operation typically these are written to a http.Request +*/ +type CompactionManagerMetricsCompletedTasksGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the compaction manager metrics completed tasks get params +func (o *CompactionManagerMetricsCompletedTasksGetParams) WithTimeout(timeout time.Duration) *CompactionManagerMetricsCompletedTasksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the compaction manager metrics completed tasks get params +func (o *CompactionManagerMetricsCompletedTasksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the compaction manager metrics completed tasks get params +func (o *CompactionManagerMetricsCompletedTasksGetParams) WithContext(ctx context.Context) *CompactionManagerMetricsCompletedTasksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the compaction manager metrics completed tasks get params +func (o *CompactionManagerMetricsCompletedTasksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the compaction manager metrics completed tasks get params +func (o *CompactionManagerMetricsCompletedTasksGetParams) WithHTTPClient(client *http.Client) *CompactionManagerMetricsCompletedTasksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the compaction manager metrics completed tasks get params +func (o *CompactionManagerMetricsCompletedTasksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CompactionManagerMetricsCompletedTasksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_completed_tasks_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_completed_tasks_get_responses.go new file mode 100644 index 00000000000..45ec1c2b6ea --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_completed_tasks_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CompactionManagerMetricsCompletedTasksGetReader is a Reader for the CompactionManagerMetricsCompletedTasksGet structure. +type CompactionManagerMetricsCompletedTasksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CompactionManagerMetricsCompletedTasksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCompactionManagerMetricsCompletedTasksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCompactionManagerMetricsCompletedTasksGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCompactionManagerMetricsCompletedTasksGetOK creates a CompactionManagerMetricsCompletedTasksGetOK with default headers values +func NewCompactionManagerMetricsCompletedTasksGetOK() *CompactionManagerMetricsCompletedTasksGetOK { + return &CompactionManagerMetricsCompletedTasksGetOK{} +} + +/* +CompactionManagerMetricsCompletedTasksGetOK handles this case with default header values. + +Success +*/ +type CompactionManagerMetricsCompletedTasksGetOK struct { + Payload interface{} +} + +func (o *CompactionManagerMetricsCompletedTasksGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CompactionManagerMetricsCompletedTasksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCompactionManagerMetricsCompletedTasksGetDefault creates a CompactionManagerMetricsCompletedTasksGetDefault with default headers values +func NewCompactionManagerMetricsCompletedTasksGetDefault(code int) *CompactionManagerMetricsCompletedTasksGetDefault { + return &CompactionManagerMetricsCompletedTasksGetDefault{ + _statusCode: code, + } +} + +/* +CompactionManagerMetricsCompletedTasksGetDefault handles this case with default header values. + +internal server error +*/ +type CompactionManagerMetricsCompletedTasksGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the compaction manager metrics completed tasks get default response +func (o *CompactionManagerMetricsCompletedTasksGetDefault) Code() int { + return o._statusCode +} + +func (o *CompactionManagerMetricsCompletedTasksGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CompactionManagerMetricsCompletedTasksGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CompactionManagerMetricsCompletedTasksGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_pending_tasks_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_pending_tasks_get_parameters.go new file mode 100644 index 00000000000..17297c4ab76 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_pending_tasks_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCompactionManagerMetricsPendingTasksGetParams creates a new CompactionManagerMetricsPendingTasksGetParams object +// with the default values initialized. +func NewCompactionManagerMetricsPendingTasksGetParams() *CompactionManagerMetricsPendingTasksGetParams { + + return &CompactionManagerMetricsPendingTasksGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCompactionManagerMetricsPendingTasksGetParamsWithTimeout creates a new CompactionManagerMetricsPendingTasksGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCompactionManagerMetricsPendingTasksGetParamsWithTimeout(timeout time.Duration) *CompactionManagerMetricsPendingTasksGetParams { + + return &CompactionManagerMetricsPendingTasksGetParams{ + + timeout: timeout, + } +} + +// NewCompactionManagerMetricsPendingTasksGetParamsWithContext creates a new CompactionManagerMetricsPendingTasksGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCompactionManagerMetricsPendingTasksGetParamsWithContext(ctx context.Context) *CompactionManagerMetricsPendingTasksGetParams { + + return &CompactionManagerMetricsPendingTasksGetParams{ + + Context: ctx, + } +} + +// NewCompactionManagerMetricsPendingTasksGetParamsWithHTTPClient creates a new CompactionManagerMetricsPendingTasksGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCompactionManagerMetricsPendingTasksGetParamsWithHTTPClient(client *http.Client) *CompactionManagerMetricsPendingTasksGetParams { + + return &CompactionManagerMetricsPendingTasksGetParams{ + HTTPClient: client, + } +} + +/* +CompactionManagerMetricsPendingTasksGetParams contains all the parameters to send to the API endpoint +for the compaction manager metrics pending tasks get operation typically these are written to a http.Request +*/ +type CompactionManagerMetricsPendingTasksGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the compaction manager metrics pending tasks get params +func (o *CompactionManagerMetricsPendingTasksGetParams) WithTimeout(timeout time.Duration) *CompactionManagerMetricsPendingTasksGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the compaction manager metrics pending tasks get params +func (o *CompactionManagerMetricsPendingTasksGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the compaction manager metrics pending tasks get params +func (o *CompactionManagerMetricsPendingTasksGetParams) WithContext(ctx context.Context) *CompactionManagerMetricsPendingTasksGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the compaction manager metrics pending tasks get params +func (o *CompactionManagerMetricsPendingTasksGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the compaction manager metrics pending tasks get params +func (o *CompactionManagerMetricsPendingTasksGetParams) WithHTTPClient(client *http.Client) *CompactionManagerMetricsPendingTasksGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the compaction manager metrics pending tasks get params +func (o *CompactionManagerMetricsPendingTasksGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CompactionManagerMetricsPendingTasksGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_pending_tasks_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_pending_tasks_get_responses.go new file mode 100644 index 00000000000..d734e3d6524 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_pending_tasks_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CompactionManagerMetricsPendingTasksGetReader is a Reader for the CompactionManagerMetricsPendingTasksGet structure. +type CompactionManagerMetricsPendingTasksGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CompactionManagerMetricsPendingTasksGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCompactionManagerMetricsPendingTasksGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCompactionManagerMetricsPendingTasksGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCompactionManagerMetricsPendingTasksGetOK creates a CompactionManagerMetricsPendingTasksGetOK with default headers values +func NewCompactionManagerMetricsPendingTasksGetOK() *CompactionManagerMetricsPendingTasksGetOK { + return &CompactionManagerMetricsPendingTasksGetOK{} +} + +/* +CompactionManagerMetricsPendingTasksGetOK handles this case with default header values. + +Success +*/ +type CompactionManagerMetricsPendingTasksGetOK struct { + Payload int32 +} + +func (o *CompactionManagerMetricsPendingTasksGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *CompactionManagerMetricsPendingTasksGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCompactionManagerMetricsPendingTasksGetDefault creates a CompactionManagerMetricsPendingTasksGetDefault with default headers values +func NewCompactionManagerMetricsPendingTasksGetDefault(code int) *CompactionManagerMetricsPendingTasksGetDefault { + return &CompactionManagerMetricsPendingTasksGetDefault{ + _statusCode: code, + } +} + +/* +CompactionManagerMetricsPendingTasksGetDefault handles this case with default header values. + +internal server error +*/ +type CompactionManagerMetricsPendingTasksGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the compaction manager metrics pending tasks get default response +func (o *CompactionManagerMetricsPendingTasksGetDefault) Code() int { + return o._statusCode +} + +func (o *CompactionManagerMetricsPendingTasksGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CompactionManagerMetricsPendingTasksGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CompactionManagerMetricsPendingTasksGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_total_compactions_completed_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_total_compactions_completed_get_parameters.go new file mode 100644 index 00000000000..141f74f760a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_total_compactions_completed_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCompactionManagerMetricsTotalCompactionsCompletedGetParams creates a new CompactionManagerMetricsTotalCompactionsCompletedGetParams object +// with the default values initialized. +func NewCompactionManagerMetricsTotalCompactionsCompletedGetParams() *CompactionManagerMetricsTotalCompactionsCompletedGetParams { + + return &CompactionManagerMetricsTotalCompactionsCompletedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCompactionManagerMetricsTotalCompactionsCompletedGetParamsWithTimeout creates a new CompactionManagerMetricsTotalCompactionsCompletedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCompactionManagerMetricsTotalCompactionsCompletedGetParamsWithTimeout(timeout time.Duration) *CompactionManagerMetricsTotalCompactionsCompletedGetParams { + + return &CompactionManagerMetricsTotalCompactionsCompletedGetParams{ + + timeout: timeout, + } +} + +// NewCompactionManagerMetricsTotalCompactionsCompletedGetParamsWithContext creates a new CompactionManagerMetricsTotalCompactionsCompletedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewCompactionManagerMetricsTotalCompactionsCompletedGetParamsWithContext(ctx context.Context) *CompactionManagerMetricsTotalCompactionsCompletedGetParams { + + return &CompactionManagerMetricsTotalCompactionsCompletedGetParams{ + + Context: ctx, + } +} + +// NewCompactionManagerMetricsTotalCompactionsCompletedGetParamsWithHTTPClient creates a new CompactionManagerMetricsTotalCompactionsCompletedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCompactionManagerMetricsTotalCompactionsCompletedGetParamsWithHTTPClient(client *http.Client) *CompactionManagerMetricsTotalCompactionsCompletedGetParams { + + return &CompactionManagerMetricsTotalCompactionsCompletedGetParams{ + HTTPClient: client, + } +} + +/* +CompactionManagerMetricsTotalCompactionsCompletedGetParams contains all the parameters to send to the API endpoint +for the compaction manager metrics total compactions completed get operation typically these are written to a http.Request +*/ +type CompactionManagerMetricsTotalCompactionsCompletedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the compaction manager metrics total compactions completed get params +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetParams) WithTimeout(timeout time.Duration) *CompactionManagerMetricsTotalCompactionsCompletedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the compaction manager metrics total compactions completed get params +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the compaction manager metrics total compactions completed get params +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetParams) WithContext(ctx context.Context) *CompactionManagerMetricsTotalCompactionsCompletedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the compaction manager metrics total compactions completed get params +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the compaction manager metrics total compactions completed get params +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetParams) WithHTTPClient(client *http.Client) *CompactionManagerMetricsTotalCompactionsCompletedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the compaction manager metrics total compactions completed get params +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_total_compactions_completed_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_total_compactions_completed_get_responses.go new file mode 100644 index 00000000000..abe52c13e70 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_metrics_total_compactions_completed_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CompactionManagerMetricsTotalCompactionsCompletedGetReader is a Reader for the CompactionManagerMetricsTotalCompactionsCompletedGet structure. +type CompactionManagerMetricsTotalCompactionsCompletedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCompactionManagerMetricsTotalCompactionsCompletedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCompactionManagerMetricsTotalCompactionsCompletedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCompactionManagerMetricsTotalCompactionsCompletedGetOK creates a CompactionManagerMetricsTotalCompactionsCompletedGetOK with default headers values +func NewCompactionManagerMetricsTotalCompactionsCompletedGetOK() *CompactionManagerMetricsTotalCompactionsCompletedGetOK { + return &CompactionManagerMetricsTotalCompactionsCompletedGetOK{} +} + +/* +CompactionManagerMetricsTotalCompactionsCompletedGetOK handles this case with default header values. + +Success +*/ +type CompactionManagerMetricsTotalCompactionsCompletedGetOK struct { + Payload interface{} +} + +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewCompactionManagerMetricsTotalCompactionsCompletedGetDefault creates a CompactionManagerMetricsTotalCompactionsCompletedGetDefault with default headers values +func NewCompactionManagerMetricsTotalCompactionsCompletedGetDefault(code int) *CompactionManagerMetricsTotalCompactionsCompletedGetDefault { + return &CompactionManagerMetricsTotalCompactionsCompletedGetDefault{ + _statusCode: code, + } +} + +/* +CompactionManagerMetricsTotalCompactionsCompletedGetDefault handles this case with default header values. + +internal server error +*/ +type CompactionManagerMetricsTotalCompactionsCompletedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the compaction manager metrics total compactions completed get default response +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetDefault) Code() int { + return o._statusCode +} + +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CompactionManagerMetricsTotalCompactionsCompletedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_stop_compaction_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_stop_compaction_post_parameters.go new file mode 100644 index 00000000000..6a27b1d3298 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_stop_compaction_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewCompactionManagerStopCompactionPostParams creates a new CompactionManagerStopCompactionPostParams object +// with the default values initialized. +func NewCompactionManagerStopCompactionPostParams() *CompactionManagerStopCompactionPostParams { + var () + return &CompactionManagerStopCompactionPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewCompactionManagerStopCompactionPostParamsWithTimeout creates a new CompactionManagerStopCompactionPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewCompactionManagerStopCompactionPostParamsWithTimeout(timeout time.Duration) *CompactionManagerStopCompactionPostParams { + var () + return &CompactionManagerStopCompactionPostParams{ + + timeout: timeout, + } +} + +// NewCompactionManagerStopCompactionPostParamsWithContext creates a new CompactionManagerStopCompactionPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewCompactionManagerStopCompactionPostParamsWithContext(ctx context.Context) *CompactionManagerStopCompactionPostParams { + var () + return &CompactionManagerStopCompactionPostParams{ + + Context: ctx, + } +} + +// NewCompactionManagerStopCompactionPostParamsWithHTTPClient creates a new CompactionManagerStopCompactionPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewCompactionManagerStopCompactionPostParamsWithHTTPClient(client *http.Client) *CompactionManagerStopCompactionPostParams { + var () + return &CompactionManagerStopCompactionPostParams{ + HTTPClient: client, + } +} + +/* +CompactionManagerStopCompactionPostParams contains all the parameters to send to the API endpoint +for the compaction manager stop compaction post operation typically these are written to a http.Request +*/ +type CompactionManagerStopCompactionPostParams struct { + + /*Type + the type of compaction to stop. Can be one of: - COMPACTION - VALIDATION - CLEANUP - SCRUB - INDEX_BUILD + + */ + Type string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the compaction manager stop compaction post params +func (o *CompactionManagerStopCompactionPostParams) WithTimeout(timeout time.Duration) *CompactionManagerStopCompactionPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the compaction manager stop compaction post params +func (o *CompactionManagerStopCompactionPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the compaction manager stop compaction post params +func (o *CompactionManagerStopCompactionPostParams) WithContext(ctx context.Context) *CompactionManagerStopCompactionPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the compaction manager stop compaction post params +func (o *CompactionManagerStopCompactionPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the compaction manager stop compaction post params +func (o *CompactionManagerStopCompactionPostParams) WithHTTPClient(client *http.Client) *CompactionManagerStopCompactionPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the compaction manager stop compaction post params +func (o *CompactionManagerStopCompactionPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithType adds the typeVar to the compaction manager stop compaction post params +func (o *CompactionManagerStopCompactionPostParams) WithType(typeVar string) *CompactionManagerStopCompactionPostParams { + o.SetType(typeVar) + return o +} + +// SetType adds the type to the compaction manager stop compaction post params +func (o *CompactionManagerStopCompactionPostParams) SetType(typeVar string) { + o.Type = typeVar +} + +// WriteToRequest writes these params to a swagger request +func (o *CompactionManagerStopCompactionPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param type + qrType := o.Type + qType := qrType + if qType != "" { + if err := r.SetQueryParam("type", qType); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_stop_compaction_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_stop_compaction_post_responses.go new file mode 100644 index 00000000000..bba0091480c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/compaction_manager_stop_compaction_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// CompactionManagerStopCompactionPostReader is a Reader for the CompactionManagerStopCompactionPost structure. +type CompactionManagerStopCompactionPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *CompactionManagerStopCompactionPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewCompactionManagerStopCompactionPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewCompactionManagerStopCompactionPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewCompactionManagerStopCompactionPostOK creates a CompactionManagerStopCompactionPostOK with default headers values +func NewCompactionManagerStopCompactionPostOK() *CompactionManagerStopCompactionPostOK { + return &CompactionManagerStopCompactionPostOK{} +} + +/* +CompactionManagerStopCompactionPostOK handles this case with default header values. + +Success +*/ +type CompactionManagerStopCompactionPostOK struct { +} + +func (o *CompactionManagerStopCompactionPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewCompactionManagerStopCompactionPostDefault creates a CompactionManagerStopCompactionPostDefault with default headers values +func NewCompactionManagerStopCompactionPostDefault(code int) *CompactionManagerStopCompactionPostDefault { + return &CompactionManagerStopCompactionPostDefault{ + _statusCode: code, + } +} + +/* +CompactionManagerStopCompactionPostDefault handles this case with default header values. + +internal server error +*/ +type CompactionManagerStopCompactionPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the compaction manager stop compaction post default response +func (o *CompactionManagerStopCompactionPostDefault) Code() int { + return o._statusCode +} + +func (o *CompactionManagerStopCompactionPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *CompactionManagerStopCompactionPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *CompactionManagerStopCompactionPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_down_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_down_get_parameters.go new file mode 100644 index 00000000000..1fff6522690 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_down_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFailureDetectorCountEndpointDownGetParams creates a new FailureDetectorCountEndpointDownGetParams object +// with the default values initialized. +func NewFailureDetectorCountEndpointDownGetParams() *FailureDetectorCountEndpointDownGetParams { + + return &FailureDetectorCountEndpointDownGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFailureDetectorCountEndpointDownGetParamsWithTimeout creates a new FailureDetectorCountEndpointDownGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFailureDetectorCountEndpointDownGetParamsWithTimeout(timeout time.Duration) *FailureDetectorCountEndpointDownGetParams { + + return &FailureDetectorCountEndpointDownGetParams{ + + timeout: timeout, + } +} + +// NewFailureDetectorCountEndpointDownGetParamsWithContext creates a new FailureDetectorCountEndpointDownGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewFailureDetectorCountEndpointDownGetParamsWithContext(ctx context.Context) *FailureDetectorCountEndpointDownGetParams { + + return &FailureDetectorCountEndpointDownGetParams{ + + Context: ctx, + } +} + +// NewFailureDetectorCountEndpointDownGetParamsWithHTTPClient creates a new FailureDetectorCountEndpointDownGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFailureDetectorCountEndpointDownGetParamsWithHTTPClient(client *http.Client) *FailureDetectorCountEndpointDownGetParams { + + return &FailureDetectorCountEndpointDownGetParams{ + HTTPClient: client, + } +} + +/* +FailureDetectorCountEndpointDownGetParams contains all the parameters to send to the API endpoint +for the failure detector count endpoint down get operation typically these are written to a http.Request +*/ +type FailureDetectorCountEndpointDownGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the failure detector count endpoint down get params +func (o *FailureDetectorCountEndpointDownGetParams) WithTimeout(timeout time.Duration) *FailureDetectorCountEndpointDownGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the failure detector count endpoint down get params +func (o *FailureDetectorCountEndpointDownGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the failure detector count endpoint down get params +func (o *FailureDetectorCountEndpointDownGetParams) WithContext(ctx context.Context) *FailureDetectorCountEndpointDownGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the failure detector count endpoint down get params +func (o *FailureDetectorCountEndpointDownGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the failure detector count endpoint down get params +func (o *FailureDetectorCountEndpointDownGetParams) WithHTTPClient(client *http.Client) *FailureDetectorCountEndpointDownGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the failure detector count endpoint down get params +func (o *FailureDetectorCountEndpointDownGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FailureDetectorCountEndpointDownGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_down_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_down_get_responses.go new file mode 100644 index 00000000000..88e2f92973e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_down_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// FailureDetectorCountEndpointDownGetReader is a Reader for the FailureDetectorCountEndpointDownGet structure. +type FailureDetectorCountEndpointDownGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FailureDetectorCountEndpointDownGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFailureDetectorCountEndpointDownGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFailureDetectorCountEndpointDownGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFailureDetectorCountEndpointDownGetOK creates a FailureDetectorCountEndpointDownGetOK with default headers values +func NewFailureDetectorCountEndpointDownGetOK() *FailureDetectorCountEndpointDownGetOK { + return &FailureDetectorCountEndpointDownGetOK{} +} + +/* +FailureDetectorCountEndpointDownGetOK handles this case with default header values. + +Success +*/ +type FailureDetectorCountEndpointDownGetOK struct { + Payload int32 +} + +func (o *FailureDetectorCountEndpointDownGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *FailureDetectorCountEndpointDownGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFailureDetectorCountEndpointDownGetDefault creates a FailureDetectorCountEndpointDownGetDefault with default headers values +func NewFailureDetectorCountEndpointDownGetDefault(code int) *FailureDetectorCountEndpointDownGetDefault { + return &FailureDetectorCountEndpointDownGetDefault{ + _statusCode: code, + } +} + +/* +FailureDetectorCountEndpointDownGetDefault handles this case with default header values. + +internal server error +*/ +type FailureDetectorCountEndpointDownGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the failure detector count endpoint down get default response +func (o *FailureDetectorCountEndpointDownGetDefault) Code() int { + return o._statusCode +} + +func (o *FailureDetectorCountEndpointDownGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FailureDetectorCountEndpointDownGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FailureDetectorCountEndpointDownGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_up_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_up_get_parameters.go new file mode 100644 index 00000000000..aae7d189b43 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_up_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFailureDetectorCountEndpointUpGetParams creates a new FailureDetectorCountEndpointUpGetParams object +// with the default values initialized. +func NewFailureDetectorCountEndpointUpGetParams() *FailureDetectorCountEndpointUpGetParams { + + return &FailureDetectorCountEndpointUpGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFailureDetectorCountEndpointUpGetParamsWithTimeout creates a new FailureDetectorCountEndpointUpGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFailureDetectorCountEndpointUpGetParamsWithTimeout(timeout time.Duration) *FailureDetectorCountEndpointUpGetParams { + + return &FailureDetectorCountEndpointUpGetParams{ + + timeout: timeout, + } +} + +// NewFailureDetectorCountEndpointUpGetParamsWithContext creates a new FailureDetectorCountEndpointUpGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewFailureDetectorCountEndpointUpGetParamsWithContext(ctx context.Context) *FailureDetectorCountEndpointUpGetParams { + + return &FailureDetectorCountEndpointUpGetParams{ + + Context: ctx, + } +} + +// NewFailureDetectorCountEndpointUpGetParamsWithHTTPClient creates a new FailureDetectorCountEndpointUpGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFailureDetectorCountEndpointUpGetParamsWithHTTPClient(client *http.Client) *FailureDetectorCountEndpointUpGetParams { + + return &FailureDetectorCountEndpointUpGetParams{ + HTTPClient: client, + } +} + +/* +FailureDetectorCountEndpointUpGetParams contains all the parameters to send to the API endpoint +for the failure detector count endpoint up get operation typically these are written to a http.Request +*/ +type FailureDetectorCountEndpointUpGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the failure detector count endpoint up get params +func (o *FailureDetectorCountEndpointUpGetParams) WithTimeout(timeout time.Duration) *FailureDetectorCountEndpointUpGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the failure detector count endpoint up get params +func (o *FailureDetectorCountEndpointUpGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the failure detector count endpoint up get params +func (o *FailureDetectorCountEndpointUpGetParams) WithContext(ctx context.Context) *FailureDetectorCountEndpointUpGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the failure detector count endpoint up get params +func (o *FailureDetectorCountEndpointUpGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the failure detector count endpoint up get params +func (o *FailureDetectorCountEndpointUpGetParams) WithHTTPClient(client *http.Client) *FailureDetectorCountEndpointUpGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the failure detector count endpoint up get params +func (o *FailureDetectorCountEndpointUpGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FailureDetectorCountEndpointUpGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_up_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_up_get_responses.go new file mode 100644 index 00000000000..9d586c10502 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_count_endpoint_up_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// FailureDetectorCountEndpointUpGetReader is a Reader for the FailureDetectorCountEndpointUpGet structure. +type FailureDetectorCountEndpointUpGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FailureDetectorCountEndpointUpGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFailureDetectorCountEndpointUpGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFailureDetectorCountEndpointUpGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFailureDetectorCountEndpointUpGetOK creates a FailureDetectorCountEndpointUpGetOK with default headers values +func NewFailureDetectorCountEndpointUpGetOK() *FailureDetectorCountEndpointUpGetOK { + return &FailureDetectorCountEndpointUpGetOK{} +} + +/* +FailureDetectorCountEndpointUpGetOK handles this case with default header values. + +Success +*/ +type FailureDetectorCountEndpointUpGetOK struct { + Payload int32 +} + +func (o *FailureDetectorCountEndpointUpGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *FailureDetectorCountEndpointUpGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFailureDetectorCountEndpointUpGetDefault creates a FailureDetectorCountEndpointUpGetDefault with default headers values +func NewFailureDetectorCountEndpointUpGetDefault(code int) *FailureDetectorCountEndpointUpGetDefault { + return &FailureDetectorCountEndpointUpGetDefault{ + _statusCode: code, + } +} + +/* +FailureDetectorCountEndpointUpGetDefault handles this case with default header values. + +internal server error +*/ +type FailureDetectorCountEndpointUpGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the failure detector count endpoint up get default response +func (o *FailureDetectorCountEndpointUpGetDefault) Code() int { + return o._statusCode +} + +func (o *FailureDetectorCountEndpointUpGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FailureDetectorCountEndpointUpGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FailureDetectorCountEndpointUpGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoint_phi_values_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoint_phi_values_get_parameters.go new file mode 100644 index 00000000000..d0606c46498 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoint_phi_values_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFailureDetectorEndpointPhiValuesGetParams creates a new FailureDetectorEndpointPhiValuesGetParams object +// with the default values initialized. +func NewFailureDetectorEndpointPhiValuesGetParams() *FailureDetectorEndpointPhiValuesGetParams { + + return &FailureDetectorEndpointPhiValuesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFailureDetectorEndpointPhiValuesGetParamsWithTimeout creates a new FailureDetectorEndpointPhiValuesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFailureDetectorEndpointPhiValuesGetParamsWithTimeout(timeout time.Duration) *FailureDetectorEndpointPhiValuesGetParams { + + return &FailureDetectorEndpointPhiValuesGetParams{ + + timeout: timeout, + } +} + +// NewFailureDetectorEndpointPhiValuesGetParamsWithContext creates a new FailureDetectorEndpointPhiValuesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewFailureDetectorEndpointPhiValuesGetParamsWithContext(ctx context.Context) *FailureDetectorEndpointPhiValuesGetParams { + + return &FailureDetectorEndpointPhiValuesGetParams{ + + Context: ctx, + } +} + +// NewFailureDetectorEndpointPhiValuesGetParamsWithHTTPClient creates a new FailureDetectorEndpointPhiValuesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFailureDetectorEndpointPhiValuesGetParamsWithHTTPClient(client *http.Client) *FailureDetectorEndpointPhiValuesGetParams { + + return &FailureDetectorEndpointPhiValuesGetParams{ + HTTPClient: client, + } +} + +/* +FailureDetectorEndpointPhiValuesGetParams contains all the parameters to send to the API endpoint +for the failure detector endpoint phi values get operation typically these are written to a http.Request +*/ +type FailureDetectorEndpointPhiValuesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the failure detector endpoint phi values get params +func (o *FailureDetectorEndpointPhiValuesGetParams) WithTimeout(timeout time.Duration) *FailureDetectorEndpointPhiValuesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the failure detector endpoint phi values get params +func (o *FailureDetectorEndpointPhiValuesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the failure detector endpoint phi values get params +func (o *FailureDetectorEndpointPhiValuesGetParams) WithContext(ctx context.Context) *FailureDetectorEndpointPhiValuesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the failure detector endpoint phi values get params +func (o *FailureDetectorEndpointPhiValuesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the failure detector endpoint phi values get params +func (o *FailureDetectorEndpointPhiValuesGetParams) WithHTTPClient(client *http.Client) *FailureDetectorEndpointPhiValuesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the failure detector endpoint phi values get params +func (o *FailureDetectorEndpointPhiValuesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FailureDetectorEndpointPhiValuesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoint_phi_values_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoint_phi_values_get_responses.go new file mode 100644 index 00000000000..e96171593db --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoint_phi_values_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// FailureDetectorEndpointPhiValuesGetReader is a Reader for the FailureDetectorEndpointPhiValuesGet structure. +type FailureDetectorEndpointPhiValuesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FailureDetectorEndpointPhiValuesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFailureDetectorEndpointPhiValuesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFailureDetectorEndpointPhiValuesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFailureDetectorEndpointPhiValuesGetOK creates a FailureDetectorEndpointPhiValuesGetOK with default headers values +func NewFailureDetectorEndpointPhiValuesGetOK() *FailureDetectorEndpointPhiValuesGetOK { + return &FailureDetectorEndpointPhiValuesGetOK{} +} + +/* +FailureDetectorEndpointPhiValuesGetOK handles this case with default header values. + +Success +*/ +type FailureDetectorEndpointPhiValuesGetOK struct { + Payload interface{} +} + +func (o *FailureDetectorEndpointPhiValuesGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *FailureDetectorEndpointPhiValuesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFailureDetectorEndpointPhiValuesGetDefault creates a FailureDetectorEndpointPhiValuesGetDefault with default headers values +func NewFailureDetectorEndpointPhiValuesGetDefault(code int) *FailureDetectorEndpointPhiValuesGetDefault { + return &FailureDetectorEndpointPhiValuesGetDefault{ + _statusCode: code, + } +} + +/* +FailureDetectorEndpointPhiValuesGetDefault handles this case with default header values. + +internal server error +*/ +type FailureDetectorEndpointPhiValuesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the failure detector endpoint phi values get default response +func (o *FailureDetectorEndpointPhiValuesGetDefault) Code() int { + return o._statusCode +} + +func (o *FailureDetectorEndpointPhiValuesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FailureDetectorEndpointPhiValuesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FailureDetectorEndpointPhiValuesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_get_parameters.go new file mode 100644 index 00000000000..4e2db827c81 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFailureDetectorEndpointsGetParams creates a new FailureDetectorEndpointsGetParams object +// with the default values initialized. +func NewFailureDetectorEndpointsGetParams() *FailureDetectorEndpointsGetParams { + + return &FailureDetectorEndpointsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFailureDetectorEndpointsGetParamsWithTimeout creates a new FailureDetectorEndpointsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFailureDetectorEndpointsGetParamsWithTimeout(timeout time.Duration) *FailureDetectorEndpointsGetParams { + + return &FailureDetectorEndpointsGetParams{ + + timeout: timeout, + } +} + +// NewFailureDetectorEndpointsGetParamsWithContext creates a new FailureDetectorEndpointsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewFailureDetectorEndpointsGetParamsWithContext(ctx context.Context) *FailureDetectorEndpointsGetParams { + + return &FailureDetectorEndpointsGetParams{ + + Context: ctx, + } +} + +// NewFailureDetectorEndpointsGetParamsWithHTTPClient creates a new FailureDetectorEndpointsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFailureDetectorEndpointsGetParamsWithHTTPClient(client *http.Client) *FailureDetectorEndpointsGetParams { + + return &FailureDetectorEndpointsGetParams{ + HTTPClient: client, + } +} + +/* +FailureDetectorEndpointsGetParams contains all the parameters to send to the API endpoint +for the failure detector endpoints get operation typically these are written to a http.Request +*/ +type FailureDetectorEndpointsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the failure detector endpoints get params +func (o *FailureDetectorEndpointsGetParams) WithTimeout(timeout time.Duration) *FailureDetectorEndpointsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the failure detector endpoints get params +func (o *FailureDetectorEndpointsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the failure detector endpoints get params +func (o *FailureDetectorEndpointsGetParams) WithContext(ctx context.Context) *FailureDetectorEndpointsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the failure detector endpoints get params +func (o *FailureDetectorEndpointsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the failure detector endpoints get params +func (o *FailureDetectorEndpointsGetParams) WithHTTPClient(client *http.Client) *FailureDetectorEndpointsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the failure detector endpoints get params +func (o *FailureDetectorEndpointsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FailureDetectorEndpointsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_get_responses.go new file mode 100644 index 00000000000..687837f9085 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// FailureDetectorEndpointsGetReader is a Reader for the FailureDetectorEndpointsGet structure. +type FailureDetectorEndpointsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FailureDetectorEndpointsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFailureDetectorEndpointsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFailureDetectorEndpointsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFailureDetectorEndpointsGetOK creates a FailureDetectorEndpointsGetOK with default headers values +func NewFailureDetectorEndpointsGetOK() *FailureDetectorEndpointsGetOK { + return &FailureDetectorEndpointsGetOK{} +} + +/* +FailureDetectorEndpointsGetOK handles this case with default header values. + +Success +*/ +type FailureDetectorEndpointsGetOK struct { + Payload []*models.EndpointState +} + +func (o *FailureDetectorEndpointsGetOK) GetPayload() []*models.EndpointState { + return o.Payload +} + +func (o *FailureDetectorEndpointsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFailureDetectorEndpointsGetDefault creates a FailureDetectorEndpointsGetDefault with default headers values +func NewFailureDetectorEndpointsGetDefault(code int) *FailureDetectorEndpointsGetDefault { + return &FailureDetectorEndpointsGetDefault{ + _statusCode: code, + } +} + +/* +FailureDetectorEndpointsGetDefault handles this case with default header values. + +internal server error +*/ +type FailureDetectorEndpointsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the failure detector endpoints get default response +func (o *FailureDetectorEndpointsGetDefault) Code() int { + return o._statusCode +} + +func (o *FailureDetectorEndpointsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FailureDetectorEndpointsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FailureDetectorEndpointsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_states_by_addr_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_states_by_addr_get_parameters.go new file mode 100644 index 00000000000..ee78533eaa6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_states_by_addr_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFailureDetectorEndpointsStatesByAddrGetParams creates a new FailureDetectorEndpointsStatesByAddrGetParams object +// with the default values initialized. +func NewFailureDetectorEndpointsStatesByAddrGetParams() *FailureDetectorEndpointsStatesByAddrGetParams { + var () + return &FailureDetectorEndpointsStatesByAddrGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFailureDetectorEndpointsStatesByAddrGetParamsWithTimeout creates a new FailureDetectorEndpointsStatesByAddrGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFailureDetectorEndpointsStatesByAddrGetParamsWithTimeout(timeout time.Duration) *FailureDetectorEndpointsStatesByAddrGetParams { + var () + return &FailureDetectorEndpointsStatesByAddrGetParams{ + + timeout: timeout, + } +} + +// NewFailureDetectorEndpointsStatesByAddrGetParamsWithContext creates a new FailureDetectorEndpointsStatesByAddrGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewFailureDetectorEndpointsStatesByAddrGetParamsWithContext(ctx context.Context) *FailureDetectorEndpointsStatesByAddrGetParams { + var () + return &FailureDetectorEndpointsStatesByAddrGetParams{ + + Context: ctx, + } +} + +// NewFailureDetectorEndpointsStatesByAddrGetParamsWithHTTPClient creates a new FailureDetectorEndpointsStatesByAddrGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFailureDetectorEndpointsStatesByAddrGetParamsWithHTTPClient(client *http.Client) *FailureDetectorEndpointsStatesByAddrGetParams { + var () + return &FailureDetectorEndpointsStatesByAddrGetParams{ + HTTPClient: client, + } +} + +/* +FailureDetectorEndpointsStatesByAddrGetParams contains all the parameters to send to the API endpoint +for the failure detector endpoints states by addr get operation typically these are written to a http.Request +*/ +type FailureDetectorEndpointsStatesByAddrGetParams struct { + + /*Addr + The endpoint address + + */ + Addr string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the failure detector endpoints states by addr get params +func (o *FailureDetectorEndpointsStatesByAddrGetParams) WithTimeout(timeout time.Duration) *FailureDetectorEndpointsStatesByAddrGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the failure detector endpoints states by addr get params +func (o *FailureDetectorEndpointsStatesByAddrGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the failure detector endpoints states by addr get params +func (o *FailureDetectorEndpointsStatesByAddrGetParams) WithContext(ctx context.Context) *FailureDetectorEndpointsStatesByAddrGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the failure detector endpoints states by addr get params +func (o *FailureDetectorEndpointsStatesByAddrGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the failure detector endpoints states by addr get params +func (o *FailureDetectorEndpointsStatesByAddrGetParams) WithHTTPClient(client *http.Client) *FailureDetectorEndpointsStatesByAddrGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the failure detector endpoints states by addr get params +func (o *FailureDetectorEndpointsStatesByAddrGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAddr adds the addr to the failure detector endpoints states by addr get params +func (o *FailureDetectorEndpointsStatesByAddrGetParams) WithAddr(addr string) *FailureDetectorEndpointsStatesByAddrGetParams { + o.SetAddr(addr) + return o +} + +// SetAddr adds the addr to the failure detector endpoints states by addr get params +func (o *FailureDetectorEndpointsStatesByAddrGetParams) SetAddr(addr string) { + o.Addr = addr +} + +// WriteToRequest writes these params to a swagger request +func (o *FailureDetectorEndpointsStatesByAddrGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param addr + if err := r.SetPathParam("addr", o.Addr); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_states_by_addr_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_states_by_addr_get_responses.go new file mode 100644 index 00000000000..b1a30a3159d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_endpoints_states_by_addr_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// FailureDetectorEndpointsStatesByAddrGetReader is a Reader for the FailureDetectorEndpointsStatesByAddrGet structure. +type FailureDetectorEndpointsStatesByAddrGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FailureDetectorEndpointsStatesByAddrGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFailureDetectorEndpointsStatesByAddrGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFailureDetectorEndpointsStatesByAddrGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFailureDetectorEndpointsStatesByAddrGetOK creates a FailureDetectorEndpointsStatesByAddrGetOK with default headers values +func NewFailureDetectorEndpointsStatesByAddrGetOK() *FailureDetectorEndpointsStatesByAddrGetOK { + return &FailureDetectorEndpointsStatesByAddrGetOK{} +} + +/* +FailureDetectorEndpointsStatesByAddrGetOK handles this case with default header values. + +Success +*/ +type FailureDetectorEndpointsStatesByAddrGetOK struct { + Payload string +} + +func (o *FailureDetectorEndpointsStatesByAddrGetOK) GetPayload() string { + return o.Payload +} + +func (o *FailureDetectorEndpointsStatesByAddrGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFailureDetectorEndpointsStatesByAddrGetDefault creates a FailureDetectorEndpointsStatesByAddrGetDefault with default headers values +func NewFailureDetectorEndpointsStatesByAddrGetDefault(code int) *FailureDetectorEndpointsStatesByAddrGetDefault { + return &FailureDetectorEndpointsStatesByAddrGetDefault{ + _statusCode: code, + } +} + +/* +FailureDetectorEndpointsStatesByAddrGetDefault handles this case with default header values. + +internal server error +*/ +type FailureDetectorEndpointsStatesByAddrGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the failure detector endpoints states by addr get default response +func (o *FailureDetectorEndpointsStatesByAddrGetDefault) Code() int { + return o._statusCode +} + +func (o *FailureDetectorEndpointsStatesByAddrGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FailureDetectorEndpointsStatesByAddrGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FailureDetectorEndpointsStatesByAddrGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_get_parameters.go new file mode 100644 index 00000000000..bf2175a1141 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFailureDetectorPhiGetParams creates a new FailureDetectorPhiGetParams object +// with the default values initialized. +func NewFailureDetectorPhiGetParams() *FailureDetectorPhiGetParams { + + return &FailureDetectorPhiGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFailureDetectorPhiGetParamsWithTimeout creates a new FailureDetectorPhiGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFailureDetectorPhiGetParamsWithTimeout(timeout time.Duration) *FailureDetectorPhiGetParams { + + return &FailureDetectorPhiGetParams{ + + timeout: timeout, + } +} + +// NewFailureDetectorPhiGetParamsWithContext creates a new FailureDetectorPhiGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewFailureDetectorPhiGetParamsWithContext(ctx context.Context) *FailureDetectorPhiGetParams { + + return &FailureDetectorPhiGetParams{ + + Context: ctx, + } +} + +// NewFailureDetectorPhiGetParamsWithHTTPClient creates a new FailureDetectorPhiGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFailureDetectorPhiGetParamsWithHTTPClient(client *http.Client) *FailureDetectorPhiGetParams { + + return &FailureDetectorPhiGetParams{ + HTTPClient: client, + } +} + +/* +FailureDetectorPhiGetParams contains all the parameters to send to the API endpoint +for the failure detector phi get operation typically these are written to a http.Request +*/ +type FailureDetectorPhiGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the failure detector phi get params +func (o *FailureDetectorPhiGetParams) WithTimeout(timeout time.Duration) *FailureDetectorPhiGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the failure detector phi get params +func (o *FailureDetectorPhiGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the failure detector phi get params +func (o *FailureDetectorPhiGetParams) WithContext(ctx context.Context) *FailureDetectorPhiGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the failure detector phi get params +func (o *FailureDetectorPhiGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the failure detector phi get params +func (o *FailureDetectorPhiGetParams) WithHTTPClient(client *http.Client) *FailureDetectorPhiGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the failure detector phi get params +func (o *FailureDetectorPhiGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FailureDetectorPhiGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_get_responses.go new file mode 100644 index 00000000000..8ac26073ada --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// FailureDetectorPhiGetReader is a Reader for the FailureDetectorPhiGet structure. +type FailureDetectorPhiGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FailureDetectorPhiGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFailureDetectorPhiGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFailureDetectorPhiGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFailureDetectorPhiGetOK creates a FailureDetectorPhiGetOK with default headers values +func NewFailureDetectorPhiGetOK() *FailureDetectorPhiGetOK { + return &FailureDetectorPhiGetOK{} +} + +/* +FailureDetectorPhiGetOK handles this case with default header values. + +Success +*/ +type FailureDetectorPhiGetOK struct { + Payload string +} + +func (o *FailureDetectorPhiGetOK) GetPayload() string { + return o.Payload +} + +func (o *FailureDetectorPhiGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFailureDetectorPhiGetDefault creates a FailureDetectorPhiGetDefault with default headers values +func NewFailureDetectorPhiGetDefault(code int) *FailureDetectorPhiGetDefault { + return &FailureDetectorPhiGetDefault{ + _statusCode: code, + } +} + +/* +FailureDetectorPhiGetDefault handles this case with default header values. + +internal server error +*/ +type FailureDetectorPhiGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the failure detector phi get default response +func (o *FailureDetectorPhiGetDefault) Code() int { + return o._statusCode +} + +func (o *FailureDetectorPhiGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FailureDetectorPhiGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FailureDetectorPhiGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_post_parameters.go new file mode 100644 index 00000000000..76d817ace23 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFailureDetectorPhiPostParams creates a new FailureDetectorPhiPostParams object +// with the default values initialized. +func NewFailureDetectorPhiPostParams() *FailureDetectorPhiPostParams { + var () + return &FailureDetectorPhiPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFailureDetectorPhiPostParamsWithTimeout creates a new FailureDetectorPhiPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFailureDetectorPhiPostParamsWithTimeout(timeout time.Duration) *FailureDetectorPhiPostParams { + var () + return &FailureDetectorPhiPostParams{ + + timeout: timeout, + } +} + +// NewFailureDetectorPhiPostParamsWithContext creates a new FailureDetectorPhiPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewFailureDetectorPhiPostParamsWithContext(ctx context.Context) *FailureDetectorPhiPostParams { + var () + return &FailureDetectorPhiPostParams{ + + Context: ctx, + } +} + +// NewFailureDetectorPhiPostParamsWithHTTPClient creates a new FailureDetectorPhiPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFailureDetectorPhiPostParamsWithHTTPClient(client *http.Client) *FailureDetectorPhiPostParams { + var () + return &FailureDetectorPhiPostParams{ + HTTPClient: client, + } +} + +/* +FailureDetectorPhiPostParams contains all the parameters to send to the API endpoint +for the failure detector phi post operation typically these are written to a http.Request +*/ +type FailureDetectorPhiPostParams struct { + + /*Phi + The new phi value + + */ + Phi string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the failure detector phi post params +func (o *FailureDetectorPhiPostParams) WithTimeout(timeout time.Duration) *FailureDetectorPhiPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the failure detector phi post params +func (o *FailureDetectorPhiPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the failure detector phi post params +func (o *FailureDetectorPhiPostParams) WithContext(ctx context.Context) *FailureDetectorPhiPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the failure detector phi post params +func (o *FailureDetectorPhiPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the failure detector phi post params +func (o *FailureDetectorPhiPostParams) WithHTTPClient(client *http.Client) *FailureDetectorPhiPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the failure detector phi post params +func (o *FailureDetectorPhiPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithPhi adds the phi to the failure detector phi post params +func (o *FailureDetectorPhiPostParams) WithPhi(phi string) *FailureDetectorPhiPostParams { + o.SetPhi(phi) + return o +} + +// SetPhi adds the phi to the failure detector phi post params +func (o *FailureDetectorPhiPostParams) SetPhi(phi string) { + o.Phi = phi +} + +// WriteToRequest writes these params to a swagger request +func (o *FailureDetectorPhiPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param phi + qrPhi := o.Phi + qPhi := qrPhi + if qPhi != "" { + if err := r.SetQueryParam("phi", qPhi); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_post_responses.go new file mode 100644 index 00000000000..58943ffccf8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_phi_post_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// FailureDetectorPhiPostReader is a Reader for the FailureDetectorPhiPost structure. +type FailureDetectorPhiPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FailureDetectorPhiPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFailureDetectorPhiPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFailureDetectorPhiPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFailureDetectorPhiPostOK creates a FailureDetectorPhiPostOK with default headers values +func NewFailureDetectorPhiPostOK() *FailureDetectorPhiPostOK { + return &FailureDetectorPhiPostOK{} +} + +/* +FailureDetectorPhiPostOK handles this case with default header values. + +Success +*/ +type FailureDetectorPhiPostOK struct { + Payload interface{} +} + +func (o *FailureDetectorPhiPostOK) GetPayload() interface{} { + return o.Payload +} + +func (o *FailureDetectorPhiPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFailureDetectorPhiPostDefault creates a FailureDetectorPhiPostDefault with default headers values +func NewFailureDetectorPhiPostDefault(code int) *FailureDetectorPhiPostDefault { + return &FailureDetectorPhiPostDefault{ + _statusCode: code, + } +} + +/* +FailureDetectorPhiPostDefault handles this case with default header values. + +internal server error +*/ +type FailureDetectorPhiPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the failure detector phi post default response +func (o *FailureDetectorPhiPostDefault) Code() int { + return o._statusCode +} + +func (o *FailureDetectorPhiPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FailureDetectorPhiPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FailureDetectorPhiPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_simple_states_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_simple_states_get_parameters.go new file mode 100644 index 00000000000..ca8a22361fe --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_simple_states_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFailureDetectorSimpleStatesGetParams creates a new FailureDetectorSimpleStatesGetParams object +// with the default values initialized. +func NewFailureDetectorSimpleStatesGetParams() *FailureDetectorSimpleStatesGetParams { + + return &FailureDetectorSimpleStatesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFailureDetectorSimpleStatesGetParamsWithTimeout creates a new FailureDetectorSimpleStatesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFailureDetectorSimpleStatesGetParamsWithTimeout(timeout time.Duration) *FailureDetectorSimpleStatesGetParams { + + return &FailureDetectorSimpleStatesGetParams{ + + timeout: timeout, + } +} + +// NewFailureDetectorSimpleStatesGetParamsWithContext creates a new FailureDetectorSimpleStatesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewFailureDetectorSimpleStatesGetParamsWithContext(ctx context.Context) *FailureDetectorSimpleStatesGetParams { + + return &FailureDetectorSimpleStatesGetParams{ + + Context: ctx, + } +} + +// NewFailureDetectorSimpleStatesGetParamsWithHTTPClient creates a new FailureDetectorSimpleStatesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFailureDetectorSimpleStatesGetParamsWithHTTPClient(client *http.Client) *FailureDetectorSimpleStatesGetParams { + + return &FailureDetectorSimpleStatesGetParams{ + HTTPClient: client, + } +} + +/* +FailureDetectorSimpleStatesGetParams contains all the parameters to send to the API endpoint +for the failure detector simple states get operation typically these are written to a http.Request +*/ +type FailureDetectorSimpleStatesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the failure detector simple states get params +func (o *FailureDetectorSimpleStatesGetParams) WithTimeout(timeout time.Duration) *FailureDetectorSimpleStatesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the failure detector simple states get params +func (o *FailureDetectorSimpleStatesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the failure detector simple states get params +func (o *FailureDetectorSimpleStatesGetParams) WithContext(ctx context.Context) *FailureDetectorSimpleStatesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the failure detector simple states get params +func (o *FailureDetectorSimpleStatesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the failure detector simple states get params +func (o *FailureDetectorSimpleStatesGetParams) WithHTTPClient(client *http.Client) *FailureDetectorSimpleStatesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the failure detector simple states get params +func (o *FailureDetectorSimpleStatesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FailureDetectorSimpleStatesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_simple_states_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_simple_states_get_responses.go new file mode 100644 index 00000000000..f321a09e6fd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/failure_detector_simple_states_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// FailureDetectorSimpleStatesGetReader is a Reader for the FailureDetectorSimpleStatesGet structure. +type FailureDetectorSimpleStatesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FailureDetectorSimpleStatesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFailureDetectorSimpleStatesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFailureDetectorSimpleStatesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFailureDetectorSimpleStatesGetOK creates a FailureDetectorSimpleStatesGetOK with default headers values +func NewFailureDetectorSimpleStatesGetOK() *FailureDetectorSimpleStatesGetOK { + return &FailureDetectorSimpleStatesGetOK{} +} + +/* +FailureDetectorSimpleStatesGetOK handles this case with default header values. + +Success +*/ +type FailureDetectorSimpleStatesGetOK struct { + Payload []*models.Mapper +} + +func (o *FailureDetectorSimpleStatesGetOK) GetPayload() []*models.Mapper { + return o.Payload +} + +func (o *FailureDetectorSimpleStatesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFailureDetectorSimpleStatesGetDefault creates a FailureDetectorSimpleStatesGetDefault with default headers values +func NewFailureDetectorSimpleStatesGetDefault(code int) *FailureDetectorSimpleStatesGetDefault { + return &FailureDetectorSimpleStatesGetDefault{ + _statusCode: code, + } +} + +/* +FailureDetectorSimpleStatesGetDefault handles this case with default header values. + +internal server error +*/ +type FailureDetectorSimpleStatesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the failure detector simple states get default response +func (o *FailureDetectorSimpleStatesGetDefault) Code() int { + return o._statusCode +} + +func (o *FailureDetectorSimpleStatesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FailureDetectorSimpleStatesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FailureDetectorSimpleStatesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_assassinate_by_addr_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_assassinate_by_addr_post_parameters.go new file mode 100644 index 00000000000..8665150b70f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_assassinate_by_addr_post_parameters.go @@ -0,0 +1,169 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewGossiperAssassinateByAddrPostParams creates a new GossiperAssassinateByAddrPostParams object +// with the default values initialized. +func NewGossiperAssassinateByAddrPostParams() *GossiperAssassinateByAddrPostParams { + var () + return &GossiperAssassinateByAddrPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGossiperAssassinateByAddrPostParamsWithTimeout creates a new GossiperAssassinateByAddrPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGossiperAssassinateByAddrPostParamsWithTimeout(timeout time.Duration) *GossiperAssassinateByAddrPostParams { + var () + return &GossiperAssassinateByAddrPostParams{ + + timeout: timeout, + } +} + +// NewGossiperAssassinateByAddrPostParamsWithContext creates a new GossiperAssassinateByAddrPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewGossiperAssassinateByAddrPostParamsWithContext(ctx context.Context) *GossiperAssassinateByAddrPostParams { + var () + return &GossiperAssassinateByAddrPostParams{ + + Context: ctx, + } +} + +// NewGossiperAssassinateByAddrPostParamsWithHTTPClient creates a new GossiperAssassinateByAddrPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGossiperAssassinateByAddrPostParamsWithHTTPClient(client *http.Client) *GossiperAssassinateByAddrPostParams { + var () + return &GossiperAssassinateByAddrPostParams{ + HTTPClient: client, + } +} + +/* +GossiperAssassinateByAddrPostParams contains all the parameters to send to the API endpoint +for the gossiper assassinate by addr post operation typically these are written to a http.Request +*/ +type GossiperAssassinateByAddrPostParams struct { + + /*Addr + The endpoint address + + */ + Addr string + /*Unsafe + Set to True to perform an unsafe assassination + + */ + Unsafe *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the gossiper assassinate by addr post params +func (o *GossiperAssassinateByAddrPostParams) WithTimeout(timeout time.Duration) *GossiperAssassinateByAddrPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the gossiper assassinate by addr post params +func (o *GossiperAssassinateByAddrPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the gossiper assassinate by addr post params +func (o *GossiperAssassinateByAddrPostParams) WithContext(ctx context.Context) *GossiperAssassinateByAddrPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the gossiper assassinate by addr post params +func (o *GossiperAssassinateByAddrPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the gossiper assassinate by addr post params +func (o *GossiperAssassinateByAddrPostParams) WithHTTPClient(client *http.Client) *GossiperAssassinateByAddrPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the gossiper assassinate by addr post params +func (o *GossiperAssassinateByAddrPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAddr adds the addr to the gossiper assassinate by addr post params +func (o *GossiperAssassinateByAddrPostParams) WithAddr(addr string) *GossiperAssassinateByAddrPostParams { + o.SetAddr(addr) + return o +} + +// SetAddr adds the addr to the gossiper assassinate by addr post params +func (o *GossiperAssassinateByAddrPostParams) SetAddr(addr string) { + o.Addr = addr +} + +// WithUnsafe adds the unsafe to the gossiper assassinate by addr post params +func (o *GossiperAssassinateByAddrPostParams) WithUnsafe(unsafe *bool) *GossiperAssassinateByAddrPostParams { + o.SetUnsafe(unsafe) + return o +} + +// SetUnsafe adds the unsafe to the gossiper assassinate by addr post params +func (o *GossiperAssassinateByAddrPostParams) SetUnsafe(unsafe *bool) { + o.Unsafe = unsafe +} + +// WriteToRequest writes these params to a swagger request +func (o *GossiperAssassinateByAddrPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param addr + if err := r.SetPathParam("addr", o.Addr); err != nil { + return err + } + + if o.Unsafe != nil { + + // query param unsafe + var qrUnsafe bool + if o.Unsafe != nil { + qrUnsafe = *o.Unsafe + } + qUnsafe := swag.FormatBool(qrUnsafe) + if qUnsafe != "" { + if err := r.SetQueryParam("unsafe", qUnsafe); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_assassinate_by_addr_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_assassinate_by_addr_post_responses.go new file mode 100644 index 00000000000..188d9703133 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_assassinate_by_addr_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// GossiperAssassinateByAddrPostReader is a Reader for the GossiperAssassinateByAddrPost structure. +type GossiperAssassinateByAddrPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GossiperAssassinateByAddrPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGossiperAssassinateByAddrPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGossiperAssassinateByAddrPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGossiperAssassinateByAddrPostOK creates a GossiperAssassinateByAddrPostOK with default headers values +func NewGossiperAssassinateByAddrPostOK() *GossiperAssassinateByAddrPostOK { + return &GossiperAssassinateByAddrPostOK{} +} + +/* +GossiperAssassinateByAddrPostOK handles this case with default header values. + +Success +*/ +type GossiperAssassinateByAddrPostOK struct { +} + +func (o *GossiperAssassinateByAddrPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewGossiperAssassinateByAddrPostDefault creates a GossiperAssassinateByAddrPostDefault with default headers values +func NewGossiperAssassinateByAddrPostDefault(code int) *GossiperAssassinateByAddrPostDefault { + return &GossiperAssassinateByAddrPostDefault{ + _statusCode: code, + } +} + +/* +GossiperAssassinateByAddrPostDefault handles this case with default header values. + +internal server error +*/ +type GossiperAssassinateByAddrPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the gossiper assassinate by addr post default response +func (o *GossiperAssassinateByAddrPostDefault) Code() int { + return o._statusCode +} + +func (o *GossiperAssassinateByAddrPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *GossiperAssassinateByAddrPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *GossiperAssassinateByAddrPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_downtime_by_addr_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_downtime_by_addr_get_parameters.go new file mode 100644 index 00000000000..02a33e659d7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_downtime_by_addr_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGossiperDowntimeByAddrGetParams creates a new GossiperDowntimeByAddrGetParams object +// with the default values initialized. +func NewGossiperDowntimeByAddrGetParams() *GossiperDowntimeByAddrGetParams { + var () + return &GossiperDowntimeByAddrGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGossiperDowntimeByAddrGetParamsWithTimeout creates a new GossiperDowntimeByAddrGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGossiperDowntimeByAddrGetParamsWithTimeout(timeout time.Duration) *GossiperDowntimeByAddrGetParams { + var () + return &GossiperDowntimeByAddrGetParams{ + + timeout: timeout, + } +} + +// NewGossiperDowntimeByAddrGetParamsWithContext creates a new GossiperDowntimeByAddrGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewGossiperDowntimeByAddrGetParamsWithContext(ctx context.Context) *GossiperDowntimeByAddrGetParams { + var () + return &GossiperDowntimeByAddrGetParams{ + + Context: ctx, + } +} + +// NewGossiperDowntimeByAddrGetParamsWithHTTPClient creates a new GossiperDowntimeByAddrGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGossiperDowntimeByAddrGetParamsWithHTTPClient(client *http.Client) *GossiperDowntimeByAddrGetParams { + var () + return &GossiperDowntimeByAddrGetParams{ + HTTPClient: client, + } +} + +/* +GossiperDowntimeByAddrGetParams contains all the parameters to send to the API endpoint +for the gossiper downtime by addr get operation typically these are written to a http.Request +*/ +type GossiperDowntimeByAddrGetParams struct { + + /*Addr + The endpoint address + + */ + Addr string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the gossiper downtime by addr get params +func (o *GossiperDowntimeByAddrGetParams) WithTimeout(timeout time.Duration) *GossiperDowntimeByAddrGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the gossiper downtime by addr get params +func (o *GossiperDowntimeByAddrGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the gossiper downtime by addr get params +func (o *GossiperDowntimeByAddrGetParams) WithContext(ctx context.Context) *GossiperDowntimeByAddrGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the gossiper downtime by addr get params +func (o *GossiperDowntimeByAddrGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the gossiper downtime by addr get params +func (o *GossiperDowntimeByAddrGetParams) WithHTTPClient(client *http.Client) *GossiperDowntimeByAddrGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the gossiper downtime by addr get params +func (o *GossiperDowntimeByAddrGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAddr adds the addr to the gossiper downtime by addr get params +func (o *GossiperDowntimeByAddrGetParams) WithAddr(addr string) *GossiperDowntimeByAddrGetParams { + o.SetAddr(addr) + return o +} + +// SetAddr adds the addr to the gossiper downtime by addr get params +func (o *GossiperDowntimeByAddrGetParams) SetAddr(addr string) { + o.Addr = addr +} + +// WriteToRequest writes these params to a swagger request +func (o *GossiperDowntimeByAddrGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param addr + if err := r.SetPathParam("addr", o.Addr); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_downtime_by_addr_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_downtime_by_addr_get_responses.go new file mode 100644 index 00000000000..8274e4c61d4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_downtime_by_addr_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// GossiperDowntimeByAddrGetReader is a Reader for the GossiperDowntimeByAddrGet structure. +type GossiperDowntimeByAddrGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GossiperDowntimeByAddrGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGossiperDowntimeByAddrGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGossiperDowntimeByAddrGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGossiperDowntimeByAddrGetOK creates a GossiperDowntimeByAddrGetOK with default headers values +func NewGossiperDowntimeByAddrGetOK() *GossiperDowntimeByAddrGetOK { + return &GossiperDowntimeByAddrGetOK{} +} + +/* +GossiperDowntimeByAddrGetOK handles this case with default header values. + +Success +*/ +type GossiperDowntimeByAddrGetOK struct { + Payload interface{} +} + +func (o *GossiperDowntimeByAddrGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *GossiperDowntimeByAddrGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGossiperDowntimeByAddrGetDefault creates a GossiperDowntimeByAddrGetDefault with default headers values +func NewGossiperDowntimeByAddrGetDefault(code int) *GossiperDowntimeByAddrGetDefault { + return &GossiperDowntimeByAddrGetDefault{ + _statusCode: code, + } +} + +/* +GossiperDowntimeByAddrGetDefault handles this case with default header values. + +internal server error +*/ +type GossiperDowntimeByAddrGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the gossiper downtime by addr get default response +func (o *GossiperDowntimeByAddrGetDefault) Code() int { + return o._statusCode +} + +func (o *GossiperDowntimeByAddrGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *GossiperDowntimeByAddrGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *GossiperDowntimeByAddrGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_down_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_down_get_parameters.go new file mode 100644 index 00000000000..f7a3d069d5d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_down_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGossiperEndpointDownGetParams creates a new GossiperEndpointDownGetParams object +// with the default values initialized. +func NewGossiperEndpointDownGetParams() *GossiperEndpointDownGetParams { + + return &GossiperEndpointDownGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGossiperEndpointDownGetParamsWithTimeout creates a new GossiperEndpointDownGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGossiperEndpointDownGetParamsWithTimeout(timeout time.Duration) *GossiperEndpointDownGetParams { + + return &GossiperEndpointDownGetParams{ + + timeout: timeout, + } +} + +// NewGossiperEndpointDownGetParamsWithContext creates a new GossiperEndpointDownGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewGossiperEndpointDownGetParamsWithContext(ctx context.Context) *GossiperEndpointDownGetParams { + + return &GossiperEndpointDownGetParams{ + + Context: ctx, + } +} + +// NewGossiperEndpointDownGetParamsWithHTTPClient creates a new GossiperEndpointDownGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGossiperEndpointDownGetParamsWithHTTPClient(client *http.Client) *GossiperEndpointDownGetParams { + + return &GossiperEndpointDownGetParams{ + HTTPClient: client, + } +} + +/* +GossiperEndpointDownGetParams contains all the parameters to send to the API endpoint +for the gossiper endpoint down get operation typically these are written to a http.Request +*/ +type GossiperEndpointDownGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the gossiper endpoint down get params +func (o *GossiperEndpointDownGetParams) WithTimeout(timeout time.Duration) *GossiperEndpointDownGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the gossiper endpoint down get params +func (o *GossiperEndpointDownGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the gossiper endpoint down get params +func (o *GossiperEndpointDownGetParams) WithContext(ctx context.Context) *GossiperEndpointDownGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the gossiper endpoint down get params +func (o *GossiperEndpointDownGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the gossiper endpoint down get params +func (o *GossiperEndpointDownGetParams) WithHTTPClient(client *http.Client) *GossiperEndpointDownGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the gossiper endpoint down get params +func (o *GossiperEndpointDownGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *GossiperEndpointDownGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_down_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_down_get_responses.go new file mode 100644 index 00000000000..4b86c35f466 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_down_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// GossiperEndpointDownGetReader is a Reader for the GossiperEndpointDownGet structure. +type GossiperEndpointDownGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GossiperEndpointDownGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGossiperEndpointDownGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGossiperEndpointDownGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGossiperEndpointDownGetOK creates a GossiperEndpointDownGetOK with default headers values +func NewGossiperEndpointDownGetOK() *GossiperEndpointDownGetOK { + return &GossiperEndpointDownGetOK{} +} + +/* +GossiperEndpointDownGetOK handles this case with default header values. + +Success +*/ +type GossiperEndpointDownGetOK struct { + Payload []string +} + +func (o *GossiperEndpointDownGetOK) GetPayload() []string { + return o.Payload +} + +func (o *GossiperEndpointDownGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGossiperEndpointDownGetDefault creates a GossiperEndpointDownGetDefault with default headers values +func NewGossiperEndpointDownGetDefault(code int) *GossiperEndpointDownGetDefault { + return &GossiperEndpointDownGetDefault{ + _statusCode: code, + } +} + +/* +GossiperEndpointDownGetDefault handles this case with default header values. + +internal server error +*/ +type GossiperEndpointDownGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the gossiper endpoint down get default response +func (o *GossiperEndpointDownGetDefault) Code() int { + return o._statusCode +} + +func (o *GossiperEndpointDownGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *GossiperEndpointDownGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *GossiperEndpointDownGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_live_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_live_get_parameters.go new file mode 100644 index 00000000000..4b66665fe23 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_live_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGossiperEndpointLiveGetParams creates a new GossiperEndpointLiveGetParams object +// with the default values initialized. +func NewGossiperEndpointLiveGetParams() *GossiperEndpointLiveGetParams { + + return &GossiperEndpointLiveGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGossiperEndpointLiveGetParamsWithTimeout creates a new GossiperEndpointLiveGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGossiperEndpointLiveGetParamsWithTimeout(timeout time.Duration) *GossiperEndpointLiveGetParams { + + return &GossiperEndpointLiveGetParams{ + + timeout: timeout, + } +} + +// NewGossiperEndpointLiveGetParamsWithContext creates a new GossiperEndpointLiveGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewGossiperEndpointLiveGetParamsWithContext(ctx context.Context) *GossiperEndpointLiveGetParams { + + return &GossiperEndpointLiveGetParams{ + + Context: ctx, + } +} + +// NewGossiperEndpointLiveGetParamsWithHTTPClient creates a new GossiperEndpointLiveGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGossiperEndpointLiveGetParamsWithHTTPClient(client *http.Client) *GossiperEndpointLiveGetParams { + + return &GossiperEndpointLiveGetParams{ + HTTPClient: client, + } +} + +/* +GossiperEndpointLiveGetParams contains all the parameters to send to the API endpoint +for the gossiper endpoint live get operation typically these are written to a http.Request +*/ +type GossiperEndpointLiveGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the gossiper endpoint live get params +func (o *GossiperEndpointLiveGetParams) WithTimeout(timeout time.Duration) *GossiperEndpointLiveGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the gossiper endpoint live get params +func (o *GossiperEndpointLiveGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the gossiper endpoint live get params +func (o *GossiperEndpointLiveGetParams) WithContext(ctx context.Context) *GossiperEndpointLiveGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the gossiper endpoint live get params +func (o *GossiperEndpointLiveGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the gossiper endpoint live get params +func (o *GossiperEndpointLiveGetParams) WithHTTPClient(client *http.Client) *GossiperEndpointLiveGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the gossiper endpoint live get params +func (o *GossiperEndpointLiveGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *GossiperEndpointLiveGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_live_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_live_get_responses.go new file mode 100644 index 00000000000..d8cba3d279b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_endpoint_live_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// GossiperEndpointLiveGetReader is a Reader for the GossiperEndpointLiveGet structure. +type GossiperEndpointLiveGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GossiperEndpointLiveGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGossiperEndpointLiveGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGossiperEndpointLiveGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGossiperEndpointLiveGetOK creates a GossiperEndpointLiveGetOK with default headers values +func NewGossiperEndpointLiveGetOK() *GossiperEndpointLiveGetOK { + return &GossiperEndpointLiveGetOK{} +} + +/* +GossiperEndpointLiveGetOK handles this case with default header values. + +Success +*/ +type GossiperEndpointLiveGetOK struct { + Payload []string +} + +func (o *GossiperEndpointLiveGetOK) GetPayload() []string { + return o.Payload +} + +func (o *GossiperEndpointLiveGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGossiperEndpointLiveGetDefault creates a GossiperEndpointLiveGetDefault with default headers values +func NewGossiperEndpointLiveGetDefault(code int) *GossiperEndpointLiveGetDefault { + return &GossiperEndpointLiveGetDefault{ + _statusCode: code, + } +} + +/* +GossiperEndpointLiveGetDefault handles this case with default header values. + +internal server error +*/ +type GossiperEndpointLiveGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the gossiper endpoint live get default response +func (o *GossiperEndpointLiveGetDefault) Code() int { + return o._statusCode +} + +func (o *GossiperEndpointLiveGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *GossiperEndpointLiveGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *GossiperEndpointLiveGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_generation_number_by_addr_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_generation_number_by_addr_get_parameters.go new file mode 100644 index 00000000000..7e8c6e7aba0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_generation_number_by_addr_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGossiperGenerationNumberByAddrGetParams creates a new GossiperGenerationNumberByAddrGetParams object +// with the default values initialized. +func NewGossiperGenerationNumberByAddrGetParams() *GossiperGenerationNumberByAddrGetParams { + var () + return &GossiperGenerationNumberByAddrGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGossiperGenerationNumberByAddrGetParamsWithTimeout creates a new GossiperGenerationNumberByAddrGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGossiperGenerationNumberByAddrGetParamsWithTimeout(timeout time.Duration) *GossiperGenerationNumberByAddrGetParams { + var () + return &GossiperGenerationNumberByAddrGetParams{ + + timeout: timeout, + } +} + +// NewGossiperGenerationNumberByAddrGetParamsWithContext creates a new GossiperGenerationNumberByAddrGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewGossiperGenerationNumberByAddrGetParamsWithContext(ctx context.Context) *GossiperGenerationNumberByAddrGetParams { + var () + return &GossiperGenerationNumberByAddrGetParams{ + + Context: ctx, + } +} + +// NewGossiperGenerationNumberByAddrGetParamsWithHTTPClient creates a new GossiperGenerationNumberByAddrGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGossiperGenerationNumberByAddrGetParamsWithHTTPClient(client *http.Client) *GossiperGenerationNumberByAddrGetParams { + var () + return &GossiperGenerationNumberByAddrGetParams{ + HTTPClient: client, + } +} + +/* +GossiperGenerationNumberByAddrGetParams contains all the parameters to send to the API endpoint +for the gossiper generation number by addr get operation typically these are written to a http.Request +*/ +type GossiperGenerationNumberByAddrGetParams struct { + + /*Addr + The endpoint address + + */ + Addr string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the gossiper generation number by addr get params +func (o *GossiperGenerationNumberByAddrGetParams) WithTimeout(timeout time.Duration) *GossiperGenerationNumberByAddrGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the gossiper generation number by addr get params +func (o *GossiperGenerationNumberByAddrGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the gossiper generation number by addr get params +func (o *GossiperGenerationNumberByAddrGetParams) WithContext(ctx context.Context) *GossiperGenerationNumberByAddrGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the gossiper generation number by addr get params +func (o *GossiperGenerationNumberByAddrGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the gossiper generation number by addr get params +func (o *GossiperGenerationNumberByAddrGetParams) WithHTTPClient(client *http.Client) *GossiperGenerationNumberByAddrGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the gossiper generation number by addr get params +func (o *GossiperGenerationNumberByAddrGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAddr adds the addr to the gossiper generation number by addr get params +func (o *GossiperGenerationNumberByAddrGetParams) WithAddr(addr string) *GossiperGenerationNumberByAddrGetParams { + o.SetAddr(addr) + return o +} + +// SetAddr adds the addr to the gossiper generation number by addr get params +func (o *GossiperGenerationNumberByAddrGetParams) SetAddr(addr string) { + o.Addr = addr +} + +// WriteToRequest writes these params to a swagger request +func (o *GossiperGenerationNumberByAddrGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param addr + if err := r.SetPathParam("addr", o.Addr); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_generation_number_by_addr_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_generation_number_by_addr_get_responses.go new file mode 100644 index 00000000000..90420a7d798 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_generation_number_by_addr_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// GossiperGenerationNumberByAddrGetReader is a Reader for the GossiperGenerationNumberByAddrGet structure. +type GossiperGenerationNumberByAddrGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GossiperGenerationNumberByAddrGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGossiperGenerationNumberByAddrGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGossiperGenerationNumberByAddrGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGossiperGenerationNumberByAddrGetOK creates a GossiperGenerationNumberByAddrGetOK with default headers values +func NewGossiperGenerationNumberByAddrGetOK() *GossiperGenerationNumberByAddrGetOK { + return &GossiperGenerationNumberByAddrGetOK{} +} + +/* +GossiperGenerationNumberByAddrGetOK handles this case with default header values. + +Success +*/ +type GossiperGenerationNumberByAddrGetOK struct { + Payload int32 +} + +func (o *GossiperGenerationNumberByAddrGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *GossiperGenerationNumberByAddrGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGossiperGenerationNumberByAddrGetDefault creates a GossiperGenerationNumberByAddrGetDefault with default headers values +func NewGossiperGenerationNumberByAddrGetDefault(code int) *GossiperGenerationNumberByAddrGetDefault { + return &GossiperGenerationNumberByAddrGetDefault{ + _statusCode: code, + } +} + +/* +GossiperGenerationNumberByAddrGetDefault handles this case with default header values. + +internal server error +*/ +type GossiperGenerationNumberByAddrGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the gossiper generation number by addr get default response +func (o *GossiperGenerationNumberByAddrGetDefault) Code() int { + return o._statusCode +} + +func (o *GossiperGenerationNumberByAddrGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *GossiperGenerationNumberByAddrGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *GossiperGenerationNumberByAddrGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_heart_beat_version_by_addr_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_heart_beat_version_by_addr_get_parameters.go new file mode 100644 index 00000000000..a8b2c19c9a8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_heart_beat_version_by_addr_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewGossiperHeartBeatVersionByAddrGetParams creates a new GossiperHeartBeatVersionByAddrGetParams object +// with the default values initialized. +func NewGossiperHeartBeatVersionByAddrGetParams() *GossiperHeartBeatVersionByAddrGetParams { + var () + return &GossiperHeartBeatVersionByAddrGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewGossiperHeartBeatVersionByAddrGetParamsWithTimeout creates a new GossiperHeartBeatVersionByAddrGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewGossiperHeartBeatVersionByAddrGetParamsWithTimeout(timeout time.Duration) *GossiperHeartBeatVersionByAddrGetParams { + var () + return &GossiperHeartBeatVersionByAddrGetParams{ + + timeout: timeout, + } +} + +// NewGossiperHeartBeatVersionByAddrGetParamsWithContext creates a new GossiperHeartBeatVersionByAddrGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewGossiperHeartBeatVersionByAddrGetParamsWithContext(ctx context.Context) *GossiperHeartBeatVersionByAddrGetParams { + var () + return &GossiperHeartBeatVersionByAddrGetParams{ + + Context: ctx, + } +} + +// NewGossiperHeartBeatVersionByAddrGetParamsWithHTTPClient creates a new GossiperHeartBeatVersionByAddrGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewGossiperHeartBeatVersionByAddrGetParamsWithHTTPClient(client *http.Client) *GossiperHeartBeatVersionByAddrGetParams { + var () + return &GossiperHeartBeatVersionByAddrGetParams{ + HTTPClient: client, + } +} + +/* +GossiperHeartBeatVersionByAddrGetParams contains all the parameters to send to the API endpoint +for the gossiper heart beat version by addr get operation typically these are written to a http.Request +*/ +type GossiperHeartBeatVersionByAddrGetParams struct { + + /*Addr + The endpoint address + + */ + Addr string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the gossiper heart beat version by addr get params +func (o *GossiperHeartBeatVersionByAddrGetParams) WithTimeout(timeout time.Duration) *GossiperHeartBeatVersionByAddrGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the gossiper heart beat version by addr get params +func (o *GossiperHeartBeatVersionByAddrGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the gossiper heart beat version by addr get params +func (o *GossiperHeartBeatVersionByAddrGetParams) WithContext(ctx context.Context) *GossiperHeartBeatVersionByAddrGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the gossiper heart beat version by addr get params +func (o *GossiperHeartBeatVersionByAddrGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the gossiper heart beat version by addr get params +func (o *GossiperHeartBeatVersionByAddrGetParams) WithHTTPClient(client *http.Client) *GossiperHeartBeatVersionByAddrGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the gossiper heart beat version by addr get params +func (o *GossiperHeartBeatVersionByAddrGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAddr adds the addr to the gossiper heart beat version by addr get params +func (o *GossiperHeartBeatVersionByAddrGetParams) WithAddr(addr string) *GossiperHeartBeatVersionByAddrGetParams { + o.SetAddr(addr) + return o +} + +// SetAddr adds the addr to the gossiper heart beat version by addr get params +func (o *GossiperHeartBeatVersionByAddrGetParams) SetAddr(addr string) { + o.Addr = addr +} + +// WriteToRequest writes these params to a swagger request +func (o *GossiperHeartBeatVersionByAddrGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param addr + if err := r.SetPathParam("addr", o.Addr); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_heart_beat_version_by_addr_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_heart_beat_version_by_addr_get_responses.go new file mode 100644 index 00000000000..4966b4b09d0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/gossiper_heart_beat_version_by_addr_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// GossiperHeartBeatVersionByAddrGetReader is a Reader for the GossiperHeartBeatVersionByAddrGet structure. +type GossiperHeartBeatVersionByAddrGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *GossiperHeartBeatVersionByAddrGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewGossiperHeartBeatVersionByAddrGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewGossiperHeartBeatVersionByAddrGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewGossiperHeartBeatVersionByAddrGetOK creates a GossiperHeartBeatVersionByAddrGetOK with default headers values +func NewGossiperHeartBeatVersionByAddrGetOK() *GossiperHeartBeatVersionByAddrGetOK { + return &GossiperHeartBeatVersionByAddrGetOK{} +} + +/* +GossiperHeartBeatVersionByAddrGetOK handles this case with default header values. + +Success +*/ +type GossiperHeartBeatVersionByAddrGetOK struct { + Payload int32 +} + +func (o *GossiperHeartBeatVersionByAddrGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *GossiperHeartBeatVersionByAddrGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewGossiperHeartBeatVersionByAddrGetDefault creates a GossiperHeartBeatVersionByAddrGetDefault with default headers values +func NewGossiperHeartBeatVersionByAddrGetDefault(code int) *GossiperHeartBeatVersionByAddrGetDefault { + return &GossiperHeartBeatVersionByAddrGetDefault{ + _statusCode: code, + } +} + +/* +GossiperHeartBeatVersionByAddrGetDefault handles this case with default header values. + +internal server error +*/ +type GossiperHeartBeatVersionByAddrGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the gossiper heart beat version by addr get default response +func (o *GossiperHeartBeatVersionByAddrGetDefault) Code() int { + return o._statusCode +} + +func (o *GossiperHeartBeatVersionByAddrGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *GossiperHeartBeatVersionByAddrGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *GossiperHeartBeatVersionByAddrGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_delete_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_delete_parameters.go new file mode 100644 index 00000000000..7fa2f1a6760 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_delete_parameters.go @@ -0,0 +1,147 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewHintedHandoffHintsDeleteParams creates a new HintedHandoffHintsDeleteParams object +// with the default values initialized. +func NewHintedHandoffHintsDeleteParams() *HintedHandoffHintsDeleteParams { + var () + return &HintedHandoffHintsDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewHintedHandoffHintsDeleteParamsWithTimeout creates a new HintedHandoffHintsDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewHintedHandoffHintsDeleteParamsWithTimeout(timeout time.Duration) *HintedHandoffHintsDeleteParams { + var () + return &HintedHandoffHintsDeleteParams{ + + timeout: timeout, + } +} + +// NewHintedHandoffHintsDeleteParamsWithContext creates a new HintedHandoffHintsDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewHintedHandoffHintsDeleteParamsWithContext(ctx context.Context) *HintedHandoffHintsDeleteParams { + var () + return &HintedHandoffHintsDeleteParams{ + + Context: ctx, + } +} + +// NewHintedHandoffHintsDeleteParamsWithHTTPClient creates a new HintedHandoffHintsDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewHintedHandoffHintsDeleteParamsWithHTTPClient(client *http.Client) *HintedHandoffHintsDeleteParams { + var () + return &HintedHandoffHintsDeleteParams{ + HTTPClient: client, + } +} + +/* +HintedHandoffHintsDeleteParams contains all the parameters to send to the API endpoint +for the hinted handoff hints delete operation typically these are written to a http.Request +*/ +type HintedHandoffHintsDeleteParams struct { + + /*Host + Optional String rep. of endpoint address to delete hints for + + */ + Host *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the hinted handoff hints delete params +func (o *HintedHandoffHintsDeleteParams) WithTimeout(timeout time.Duration) *HintedHandoffHintsDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the hinted handoff hints delete params +func (o *HintedHandoffHintsDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the hinted handoff hints delete params +func (o *HintedHandoffHintsDeleteParams) WithContext(ctx context.Context) *HintedHandoffHintsDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the hinted handoff hints delete params +func (o *HintedHandoffHintsDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the hinted handoff hints delete params +func (o *HintedHandoffHintsDeleteParams) WithHTTPClient(client *http.Client) *HintedHandoffHintsDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the hinted handoff hints delete params +func (o *HintedHandoffHintsDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithHost adds the host to the hinted handoff hints delete params +func (o *HintedHandoffHintsDeleteParams) WithHost(host *string) *HintedHandoffHintsDeleteParams { + o.SetHost(host) + return o +} + +// SetHost adds the host to the hinted handoff hints delete params +func (o *HintedHandoffHintsDeleteParams) SetHost(host *string) { + o.Host = host +} + +// WriteToRequest writes these params to a swagger request +func (o *HintedHandoffHintsDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Host != nil { + + // query param host + var qrHost string + if o.Host != nil { + qrHost = *o.Host + } + qHost := qrHost + if qHost != "" { + if err := r.SetQueryParam("host", qHost); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_delete_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_delete_responses.go new file mode 100644 index 00000000000..020d75199ba --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_delete_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// HintedHandoffHintsDeleteReader is a Reader for the HintedHandoffHintsDelete structure. +type HintedHandoffHintsDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *HintedHandoffHintsDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewHintedHandoffHintsDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewHintedHandoffHintsDeleteDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewHintedHandoffHintsDeleteOK creates a HintedHandoffHintsDeleteOK with default headers values +func NewHintedHandoffHintsDeleteOK() *HintedHandoffHintsDeleteOK { + return &HintedHandoffHintsDeleteOK{} +} + +/* +HintedHandoffHintsDeleteOK handles this case with default header values. + +Success +*/ +type HintedHandoffHintsDeleteOK struct { +} + +func (o *HintedHandoffHintsDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewHintedHandoffHintsDeleteDefault creates a HintedHandoffHintsDeleteDefault with default headers values +func NewHintedHandoffHintsDeleteDefault(code int) *HintedHandoffHintsDeleteDefault { + return &HintedHandoffHintsDeleteDefault{ + _statusCode: code, + } +} + +/* +HintedHandoffHintsDeleteDefault handles this case with default header values. + +internal server error +*/ +type HintedHandoffHintsDeleteDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the hinted handoff hints delete default response +func (o *HintedHandoffHintsDeleteDefault) Code() int { + return o._statusCode +} + +func (o *HintedHandoffHintsDeleteDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *HintedHandoffHintsDeleteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *HintedHandoffHintsDeleteDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_get_parameters.go new file mode 100644 index 00000000000..e0e946ebae6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewHintedHandoffHintsGetParams creates a new HintedHandoffHintsGetParams object +// with the default values initialized. +func NewHintedHandoffHintsGetParams() *HintedHandoffHintsGetParams { + + return &HintedHandoffHintsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewHintedHandoffHintsGetParamsWithTimeout creates a new HintedHandoffHintsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewHintedHandoffHintsGetParamsWithTimeout(timeout time.Duration) *HintedHandoffHintsGetParams { + + return &HintedHandoffHintsGetParams{ + + timeout: timeout, + } +} + +// NewHintedHandoffHintsGetParamsWithContext creates a new HintedHandoffHintsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewHintedHandoffHintsGetParamsWithContext(ctx context.Context) *HintedHandoffHintsGetParams { + + return &HintedHandoffHintsGetParams{ + + Context: ctx, + } +} + +// NewHintedHandoffHintsGetParamsWithHTTPClient creates a new HintedHandoffHintsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewHintedHandoffHintsGetParamsWithHTTPClient(client *http.Client) *HintedHandoffHintsGetParams { + + return &HintedHandoffHintsGetParams{ + HTTPClient: client, + } +} + +/* +HintedHandoffHintsGetParams contains all the parameters to send to the API endpoint +for the hinted handoff hints get operation typically these are written to a http.Request +*/ +type HintedHandoffHintsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the hinted handoff hints get params +func (o *HintedHandoffHintsGetParams) WithTimeout(timeout time.Duration) *HintedHandoffHintsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the hinted handoff hints get params +func (o *HintedHandoffHintsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the hinted handoff hints get params +func (o *HintedHandoffHintsGetParams) WithContext(ctx context.Context) *HintedHandoffHintsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the hinted handoff hints get params +func (o *HintedHandoffHintsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the hinted handoff hints get params +func (o *HintedHandoffHintsGetParams) WithHTTPClient(client *http.Client) *HintedHandoffHintsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the hinted handoff hints get params +func (o *HintedHandoffHintsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *HintedHandoffHintsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_get_responses.go new file mode 100644 index 00000000000..d823709e539 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_hints_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// HintedHandoffHintsGetReader is a Reader for the HintedHandoffHintsGet structure. +type HintedHandoffHintsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *HintedHandoffHintsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewHintedHandoffHintsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewHintedHandoffHintsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewHintedHandoffHintsGetOK creates a HintedHandoffHintsGetOK with default headers values +func NewHintedHandoffHintsGetOK() *HintedHandoffHintsGetOK { + return &HintedHandoffHintsGetOK{} +} + +/* +HintedHandoffHintsGetOK handles this case with default header values. + +Success +*/ +type HintedHandoffHintsGetOK struct { + Payload []string +} + +func (o *HintedHandoffHintsGetOK) GetPayload() []string { + return o.Payload +} + +func (o *HintedHandoffHintsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewHintedHandoffHintsGetDefault creates a HintedHandoffHintsGetDefault with default headers values +func NewHintedHandoffHintsGetDefault(code int) *HintedHandoffHintsGetDefault { + return &HintedHandoffHintsGetDefault{ + _statusCode: code, + } +} + +/* +HintedHandoffHintsGetDefault handles this case with default header values. + +internal server error +*/ +type HintedHandoffHintsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the hinted handoff hints get default response +func (o *HintedHandoffHintsGetDefault) Code() int { + return o._statusCode +} + +func (o *HintedHandoffHintsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *HintedHandoffHintsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *HintedHandoffHintsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_create_hint_by_addr_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_create_hint_by_addr_get_parameters.go new file mode 100644 index 00000000000..cbc3477990d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_create_hint_by_addr_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewHintedHandoffMetricsCreateHintByAddrGetParams creates a new HintedHandoffMetricsCreateHintByAddrGetParams object +// with the default values initialized. +func NewHintedHandoffMetricsCreateHintByAddrGetParams() *HintedHandoffMetricsCreateHintByAddrGetParams { + var () + return &HintedHandoffMetricsCreateHintByAddrGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewHintedHandoffMetricsCreateHintByAddrGetParamsWithTimeout creates a new HintedHandoffMetricsCreateHintByAddrGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewHintedHandoffMetricsCreateHintByAddrGetParamsWithTimeout(timeout time.Duration) *HintedHandoffMetricsCreateHintByAddrGetParams { + var () + return &HintedHandoffMetricsCreateHintByAddrGetParams{ + + timeout: timeout, + } +} + +// NewHintedHandoffMetricsCreateHintByAddrGetParamsWithContext creates a new HintedHandoffMetricsCreateHintByAddrGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewHintedHandoffMetricsCreateHintByAddrGetParamsWithContext(ctx context.Context) *HintedHandoffMetricsCreateHintByAddrGetParams { + var () + return &HintedHandoffMetricsCreateHintByAddrGetParams{ + + Context: ctx, + } +} + +// NewHintedHandoffMetricsCreateHintByAddrGetParamsWithHTTPClient creates a new HintedHandoffMetricsCreateHintByAddrGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewHintedHandoffMetricsCreateHintByAddrGetParamsWithHTTPClient(client *http.Client) *HintedHandoffMetricsCreateHintByAddrGetParams { + var () + return &HintedHandoffMetricsCreateHintByAddrGetParams{ + HTTPClient: client, + } +} + +/* +HintedHandoffMetricsCreateHintByAddrGetParams contains all the parameters to send to the API endpoint +for the hinted handoff metrics create hint by addr get operation typically these are written to a http.Request +*/ +type HintedHandoffMetricsCreateHintByAddrGetParams struct { + + /*Addr + The peer address + + */ + Addr string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the hinted handoff metrics create hint by addr get params +func (o *HintedHandoffMetricsCreateHintByAddrGetParams) WithTimeout(timeout time.Duration) *HintedHandoffMetricsCreateHintByAddrGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the hinted handoff metrics create hint by addr get params +func (o *HintedHandoffMetricsCreateHintByAddrGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the hinted handoff metrics create hint by addr get params +func (o *HintedHandoffMetricsCreateHintByAddrGetParams) WithContext(ctx context.Context) *HintedHandoffMetricsCreateHintByAddrGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the hinted handoff metrics create hint by addr get params +func (o *HintedHandoffMetricsCreateHintByAddrGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the hinted handoff metrics create hint by addr get params +func (o *HintedHandoffMetricsCreateHintByAddrGetParams) WithHTTPClient(client *http.Client) *HintedHandoffMetricsCreateHintByAddrGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the hinted handoff metrics create hint by addr get params +func (o *HintedHandoffMetricsCreateHintByAddrGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAddr adds the addr to the hinted handoff metrics create hint by addr get params +func (o *HintedHandoffMetricsCreateHintByAddrGetParams) WithAddr(addr string) *HintedHandoffMetricsCreateHintByAddrGetParams { + o.SetAddr(addr) + return o +} + +// SetAddr adds the addr to the hinted handoff metrics create hint by addr get params +func (o *HintedHandoffMetricsCreateHintByAddrGetParams) SetAddr(addr string) { + o.Addr = addr +} + +// WriteToRequest writes these params to a swagger request +func (o *HintedHandoffMetricsCreateHintByAddrGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param addr + if err := r.SetPathParam("addr", o.Addr); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_create_hint_by_addr_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_create_hint_by_addr_get_responses.go new file mode 100644 index 00000000000..4a1a95d150a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_create_hint_by_addr_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// HintedHandoffMetricsCreateHintByAddrGetReader is a Reader for the HintedHandoffMetricsCreateHintByAddrGet structure. +type HintedHandoffMetricsCreateHintByAddrGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *HintedHandoffMetricsCreateHintByAddrGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewHintedHandoffMetricsCreateHintByAddrGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewHintedHandoffMetricsCreateHintByAddrGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewHintedHandoffMetricsCreateHintByAddrGetOK creates a HintedHandoffMetricsCreateHintByAddrGetOK with default headers values +func NewHintedHandoffMetricsCreateHintByAddrGetOK() *HintedHandoffMetricsCreateHintByAddrGetOK { + return &HintedHandoffMetricsCreateHintByAddrGetOK{} +} + +/* +HintedHandoffMetricsCreateHintByAddrGetOK handles this case with default header values. + +Success +*/ +type HintedHandoffMetricsCreateHintByAddrGetOK struct { + Payload int32 +} + +func (o *HintedHandoffMetricsCreateHintByAddrGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *HintedHandoffMetricsCreateHintByAddrGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewHintedHandoffMetricsCreateHintByAddrGetDefault creates a HintedHandoffMetricsCreateHintByAddrGetDefault with default headers values +func NewHintedHandoffMetricsCreateHintByAddrGetDefault(code int) *HintedHandoffMetricsCreateHintByAddrGetDefault { + return &HintedHandoffMetricsCreateHintByAddrGetDefault{ + _statusCode: code, + } +} + +/* +HintedHandoffMetricsCreateHintByAddrGetDefault handles this case with default header values. + +internal server error +*/ +type HintedHandoffMetricsCreateHintByAddrGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the hinted handoff metrics create hint by addr get default response +func (o *HintedHandoffMetricsCreateHintByAddrGetDefault) Code() int { + return o._statusCode +} + +func (o *HintedHandoffMetricsCreateHintByAddrGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *HintedHandoffMetricsCreateHintByAddrGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *HintedHandoffMetricsCreateHintByAddrGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_not_stored_hints_by_addr_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_not_stored_hints_by_addr_get_parameters.go new file mode 100644 index 00000000000..d18c0e820d4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_not_stored_hints_by_addr_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewHintedHandoffMetricsNotStoredHintsByAddrGetParams creates a new HintedHandoffMetricsNotStoredHintsByAddrGetParams object +// with the default values initialized. +func NewHintedHandoffMetricsNotStoredHintsByAddrGetParams() *HintedHandoffMetricsNotStoredHintsByAddrGetParams { + var () + return &HintedHandoffMetricsNotStoredHintsByAddrGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewHintedHandoffMetricsNotStoredHintsByAddrGetParamsWithTimeout creates a new HintedHandoffMetricsNotStoredHintsByAddrGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewHintedHandoffMetricsNotStoredHintsByAddrGetParamsWithTimeout(timeout time.Duration) *HintedHandoffMetricsNotStoredHintsByAddrGetParams { + var () + return &HintedHandoffMetricsNotStoredHintsByAddrGetParams{ + + timeout: timeout, + } +} + +// NewHintedHandoffMetricsNotStoredHintsByAddrGetParamsWithContext creates a new HintedHandoffMetricsNotStoredHintsByAddrGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewHintedHandoffMetricsNotStoredHintsByAddrGetParamsWithContext(ctx context.Context) *HintedHandoffMetricsNotStoredHintsByAddrGetParams { + var () + return &HintedHandoffMetricsNotStoredHintsByAddrGetParams{ + + Context: ctx, + } +} + +// NewHintedHandoffMetricsNotStoredHintsByAddrGetParamsWithHTTPClient creates a new HintedHandoffMetricsNotStoredHintsByAddrGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewHintedHandoffMetricsNotStoredHintsByAddrGetParamsWithHTTPClient(client *http.Client) *HintedHandoffMetricsNotStoredHintsByAddrGetParams { + var () + return &HintedHandoffMetricsNotStoredHintsByAddrGetParams{ + HTTPClient: client, + } +} + +/* +HintedHandoffMetricsNotStoredHintsByAddrGetParams contains all the parameters to send to the API endpoint +for the hinted handoff metrics not stored hints by addr get operation typically these are written to a http.Request +*/ +type HintedHandoffMetricsNotStoredHintsByAddrGetParams struct { + + /*Addr + The peer address + + */ + Addr string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the hinted handoff metrics not stored hints by addr get params +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetParams) WithTimeout(timeout time.Duration) *HintedHandoffMetricsNotStoredHintsByAddrGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the hinted handoff metrics not stored hints by addr get params +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the hinted handoff metrics not stored hints by addr get params +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetParams) WithContext(ctx context.Context) *HintedHandoffMetricsNotStoredHintsByAddrGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the hinted handoff metrics not stored hints by addr get params +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the hinted handoff metrics not stored hints by addr get params +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetParams) WithHTTPClient(client *http.Client) *HintedHandoffMetricsNotStoredHintsByAddrGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the hinted handoff metrics not stored hints by addr get params +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAddr adds the addr to the hinted handoff metrics not stored hints by addr get params +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetParams) WithAddr(addr string) *HintedHandoffMetricsNotStoredHintsByAddrGetParams { + o.SetAddr(addr) + return o +} + +// SetAddr adds the addr to the hinted handoff metrics not stored hints by addr get params +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetParams) SetAddr(addr string) { + o.Addr = addr +} + +// WriteToRequest writes these params to a swagger request +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param addr + if err := r.SetPathParam("addr", o.Addr); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_not_stored_hints_by_addr_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_not_stored_hints_by_addr_get_responses.go new file mode 100644 index 00000000000..153dfebdb44 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_metrics_not_stored_hints_by_addr_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// HintedHandoffMetricsNotStoredHintsByAddrGetReader is a Reader for the HintedHandoffMetricsNotStoredHintsByAddrGet structure. +type HintedHandoffMetricsNotStoredHintsByAddrGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewHintedHandoffMetricsNotStoredHintsByAddrGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewHintedHandoffMetricsNotStoredHintsByAddrGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewHintedHandoffMetricsNotStoredHintsByAddrGetOK creates a HintedHandoffMetricsNotStoredHintsByAddrGetOK with default headers values +func NewHintedHandoffMetricsNotStoredHintsByAddrGetOK() *HintedHandoffMetricsNotStoredHintsByAddrGetOK { + return &HintedHandoffMetricsNotStoredHintsByAddrGetOK{} +} + +/* +HintedHandoffMetricsNotStoredHintsByAddrGetOK handles this case with default header values. + +Success +*/ +type HintedHandoffMetricsNotStoredHintsByAddrGetOK struct { + Payload int32 +} + +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewHintedHandoffMetricsNotStoredHintsByAddrGetDefault creates a HintedHandoffMetricsNotStoredHintsByAddrGetDefault with default headers values +func NewHintedHandoffMetricsNotStoredHintsByAddrGetDefault(code int) *HintedHandoffMetricsNotStoredHintsByAddrGetDefault { + return &HintedHandoffMetricsNotStoredHintsByAddrGetDefault{ + _statusCode: code, + } +} + +/* +HintedHandoffMetricsNotStoredHintsByAddrGetDefault handles this case with default header values. + +internal server error +*/ +type HintedHandoffMetricsNotStoredHintsByAddrGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the hinted handoff metrics not stored hints by addr get default response +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetDefault) Code() int { + return o._statusCode +} + +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *HintedHandoffMetricsNotStoredHintsByAddrGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_pause_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_pause_post_parameters.go new file mode 100644 index 00000000000..51c6d796b91 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_pause_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewHintedHandoffPausePostParams creates a new HintedHandoffPausePostParams object +// with the default values initialized. +func NewHintedHandoffPausePostParams() *HintedHandoffPausePostParams { + var () + return &HintedHandoffPausePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewHintedHandoffPausePostParamsWithTimeout creates a new HintedHandoffPausePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewHintedHandoffPausePostParamsWithTimeout(timeout time.Duration) *HintedHandoffPausePostParams { + var () + return &HintedHandoffPausePostParams{ + + timeout: timeout, + } +} + +// NewHintedHandoffPausePostParamsWithContext creates a new HintedHandoffPausePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewHintedHandoffPausePostParamsWithContext(ctx context.Context) *HintedHandoffPausePostParams { + var () + return &HintedHandoffPausePostParams{ + + Context: ctx, + } +} + +// NewHintedHandoffPausePostParamsWithHTTPClient creates a new HintedHandoffPausePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewHintedHandoffPausePostParamsWithHTTPClient(client *http.Client) *HintedHandoffPausePostParams { + var () + return &HintedHandoffPausePostParams{ + HTTPClient: client, + } +} + +/* +HintedHandoffPausePostParams contains all the parameters to send to the API endpoint +for the hinted handoff pause post operation typically these are written to a http.Request +*/ +type HintedHandoffPausePostParams struct { + + /*Pause + pause status + + */ + Pause bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the hinted handoff pause post params +func (o *HintedHandoffPausePostParams) WithTimeout(timeout time.Duration) *HintedHandoffPausePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the hinted handoff pause post params +func (o *HintedHandoffPausePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the hinted handoff pause post params +func (o *HintedHandoffPausePostParams) WithContext(ctx context.Context) *HintedHandoffPausePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the hinted handoff pause post params +func (o *HintedHandoffPausePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the hinted handoff pause post params +func (o *HintedHandoffPausePostParams) WithHTTPClient(client *http.Client) *HintedHandoffPausePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the hinted handoff pause post params +func (o *HintedHandoffPausePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithPause adds the pause to the hinted handoff pause post params +func (o *HintedHandoffPausePostParams) WithPause(pause bool) *HintedHandoffPausePostParams { + o.SetPause(pause) + return o +} + +// SetPause adds the pause to the hinted handoff pause post params +func (o *HintedHandoffPausePostParams) SetPause(pause bool) { + o.Pause = pause +} + +// WriteToRequest writes these params to a swagger request +func (o *HintedHandoffPausePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param pause + qrPause := o.Pause + qPause := swag.FormatBool(qrPause) + if qPause != "" { + if err := r.SetQueryParam("pause", qPause); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_pause_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_pause_post_responses.go new file mode 100644 index 00000000000..d6fc4a9acc6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_pause_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// HintedHandoffPausePostReader is a Reader for the HintedHandoffPausePost structure. +type HintedHandoffPausePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *HintedHandoffPausePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewHintedHandoffPausePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewHintedHandoffPausePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewHintedHandoffPausePostOK creates a HintedHandoffPausePostOK with default headers values +func NewHintedHandoffPausePostOK() *HintedHandoffPausePostOK { + return &HintedHandoffPausePostOK{} +} + +/* +HintedHandoffPausePostOK handles this case with default header values. + +Success +*/ +type HintedHandoffPausePostOK struct { +} + +func (o *HintedHandoffPausePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewHintedHandoffPausePostDefault creates a HintedHandoffPausePostDefault with default headers values +func NewHintedHandoffPausePostDefault(code int) *HintedHandoffPausePostDefault { + return &HintedHandoffPausePostDefault{ + _statusCode: code, + } +} + +/* +HintedHandoffPausePostDefault handles this case with default header values. + +internal server error +*/ +type HintedHandoffPausePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the hinted handoff pause post default response +func (o *HintedHandoffPausePostDefault) Code() int { + return o._statusCode +} + +func (o *HintedHandoffPausePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *HintedHandoffPausePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *HintedHandoffPausePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_schedule_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_schedule_post_parameters.go new file mode 100644 index 00000000000..faf770ff552 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_schedule_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewHintedHandoffSchedulePostParams creates a new HintedHandoffSchedulePostParams object +// with the default values initialized. +func NewHintedHandoffSchedulePostParams() *HintedHandoffSchedulePostParams { + var () + return &HintedHandoffSchedulePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewHintedHandoffSchedulePostParamsWithTimeout creates a new HintedHandoffSchedulePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewHintedHandoffSchedulePostParamsWithTimeout(timeout time.Duration) *HintedHandoffSchedulePostParams { + var () + return &HintedHandoffSchedulePostParams{ + + timeout: timeout, + } +} + +// NewHintedHandoffSchedulePostParamsWithContext creates a new HintedHandoffSchedulePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewHintedHandoffSchedulePostParamsWithContext(ctx context.Context) *HintedHandoffSchedulePostParams { + var () + return &HintedHandoffSchedulePostParams{ + + Context: ctx, + } +} + +// NewHintedHandoffSchedulePostParamsWithHTTPClient creates a new HintedHandoffSchedulePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewHintedHandoffSchedulePostParamsWithHTTPClient(client *http.Client) *HintedHandoffSchedulePostParams { + var () + return &HintedHandoffSchedulePostParams{ + HTTPClient: client, + } +} + +/* +HintedHandoffSchedulePostParams contains all the parameters to send to the API endpoint +for the hinted handoff schedule post operation typically these are written to a http.Request +*/ +type HintedHandoffSchedulePostParams struct { + + /*Host + String rep. of endpoint address + + */ + Host string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the hinted handoff schedule post params +func (o *HintedHandoffSchedulePostParams) WithTimeout(timeout time.Duration) *HintedHandoffSchedulePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the hinted handoff schedule post params +func (o *HintedHandoffSchedulePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the hinted handoff schedule post params +func (o *HintedHandoffSchedulePostParams) WithContext(ctx context.Context) *HintedHandoffSchedulePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the hinted handoff schedule post params +func (o *HintedHandoffSchedulePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the hinted handoff schedule post params +func (o *HintedHandoffSchedulePostParams) WithHTTPClient(client *http.Client) *HintedHandoffSchedulePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the hinted handoff schedule post params +func (o *HintedHandoffSchedulePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithHost adds the host to the hinted handoff schedule post params +func (o *HintedHandoffSchedulePostParams) WithHost(host string) *HintedHandoffSchedulePostParams { + o.SetHost(host) + return o +} + +// SetHost adds the host to the hinted handoff schedule post params +func (o *HintedHandoffSchedulePostParams) SetHost(host string) { + o.Host = host +} + +// WriteToRequest writes these params to a swagger request +func (o *HintedHandoffSchedulePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param host + qrHost := o.Host + qHost := qrHost + if qHost != "" { + if err := r.SetQueryParam("host", qHost); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_schedule_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_schedule_post_responses.go new file mode 100644 index 00000000000..bf04e97d606 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/hinted_handoff_schedule_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// HintedHandoffSchedulePostReader is a Reader for the HintedHandoffSchedulePost structure. +type HintedHandoffSchedulePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *HintedHandoffSchedulePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewHintedHandoffSchedulePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewHintedHandoffSchedulePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewHintedHandoffSchedulePostOK creates a HintedHandoffSchedulePostOK with default headers values +func NewHintedHandoffSchedulePostOK() *HintedHandoffSchedulePostOK { + return &HintedHandoffSchedulePostOK{} +} + +/* +HintedHandoffSchedulePostOK handles this case with default header values. + +Success +*/ +type HintedHandoffSchedulePostOK struct { +} + +func (o *HintedHandoffSchedulePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewHintedHandoffSchedulePostDefault creates a HintedHandoffSchedulePostDefault with default headers values +func NewHintedHandoffSchedulePostDefault(code int) *HintedHandoffSchedulePostDefault { + return &HintedHandoffSchedulePostDefault{ + _statusCode: code, + } +} + +/* +HintedHandoffSchedulePostDefault handles this case with default header values. + +internal server error +*/ +type HintedHandoffSchedulePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the hinted handoff schedule post default response +func (o *HintedHandoffSchedulePostDefault) Code() int { + return o._statusCode +} + +func (o *HintedHandoffSchedulePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *HintedHandoffSchedulePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *HintedHandoffSchedulePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/lsa_compact_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/lsa_compact_post_parameters.go new file mode 100644 index 00000000000..8388b51b969 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/lsa_compact_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewLsaCompactPostParams creates a new LsaCompactPostParams object +// with the default values initialized. +func NewLsaCompactPostParams() *LsaCompactPostParams { + + return &LsaCompactPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewLsaCompactPostParamsWithTimeout creates a new LsaCompactPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewLsaCompactPostParamsWithTimeout(timeout time.Duration) *LsaCompactPostParams { + + return &LsaCompactPostParams{ + + timeout: timeout, + } +} + +// NewLsaCompactPostParamsWithContext creates a new LsaCompactPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewLsaCompactPostParamsWithContext(ctx context.Context) *LsaCompactPostParams { + + return &LsaCompactPostParams{ + + Context: ctx, + } +} + +// NewLsaCompactPostParamsWithHTTPClient creates a new LsaCompactPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewLsaCompactPostParamsWithHTTPClient(client *http.Client) *LsaCompactPostParams { + + return &LsaCompactPostParams{ + HTTPClient: client, + } +} + +/* +LsaCompactPostParams contains all the parameters to send to the API endpoint +for the lsa compact post operation typically these are written to a http.Request +*/ +type LsaCompactPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the lsa compact post params +func (o *LsaCompactPostParams) WithTimeout(timeout time.Duration) *LsaCompactPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the lsa compact post params +func (o *LsaCompactPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the lsa compact post params +func (o *LsaCompactPostParams) WithContext(ctx context.Context) *LsaCompactPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the lsa compact post params +func (o *LsaCompactPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the lsa compact post params +func (o *LsaCompactPostParams) WithHTTPClient(client *http.Client) *LsaCompactPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the lsa compact post params +func (o *LsaCompactPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *LsaCompactPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/lsa_compact_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/lsa_compact_post_responses.go new file mode 100644 index 00000000000..8b48d65bdc2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/lsa_compact_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// LsaCompactPostReader is a Reader for the LsaCompactPost structure. +type LsaCompactPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *LsaCompactPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewLsaCompactPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewLsaCompactPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewLsaCompactPostOK creates a LsaCompactPostOK with default headers values +func NewLsaCompactPostOK() *LsaCompactPostOK { + return &LsaCompactPostOK{} +} + +/* +LsaCompactPostOK handles this case with default header values. + +Success +*/ +type LsaCompactPostOK struct { +} + +func (o *LsaCompactPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewLsaCompactPostDefault creates a LsaCompactPostDefault with default headers values +func NewLsaCompactPostDefault(code int) *LsaCompactPostDefault { + return &LsaCompactPostDefault{ + _statusCode: code, + } +} + +/* +LsaCompactPostDefault handles this case with default header values. + +internal server error +*/ +type LsaCompactPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the lsa compact post default response +func (o *LsaCompactPostDefault) Code() int { + return o._statusCode +} + +func (o *LsaCompactPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *LsaCompactPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *LsaCompactPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_by_ver_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_by_ver_get_parameters.go new file mode 100644 index 00000000000..4b18dfb114b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_by_ver_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMessagingServiceMessagesDroppedByVerGetParams creates a new MessagingServiceMessagesDroppedByVerGetParams object +// with the default values initialized. +func NewMessagingServiceMessagesDroppedByVerGetParams() *MessagingServiceMessagesDroppedByVerGetParams { + + return &MessagingServiceMessagesDroppedByVerGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewMessagingServiceMessagesDroppedByVerGetParamsWithTimeout creates a new MessagingServiceMessagesDroppedByVerGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewMessagingServiceMessagesDroppedByVerGetParamsWithTimeout(timeout time.Duration) *MessagingServiceMessagesDroppedByVerGetParams { + + return &MessagingServiceMessagesDroppedByVerGetParams{ + + timeout: timeout, + } +} + +// NewMessagingServiceMessagesDroppedByVerGetParamsWithContext creates a new MessagingServiceMessagesDroppedByVerGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewMessagingServiceMessagesDroppedByVerGetParamsWithContext(ctx context.Context) *MessagingServiceMessagesDroppedByVerGetParams { + + return &MessagingServiceMessagesDroppedByVerGetParams{ + + Context: ctx, + } +} + +// NewMessagingServiceMessagesDroppedByVerGetParamsWithHTTPClient creates a new MessagingServiceMessagesDroppedByVerGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewMessagingServiceMessagesDroppedByVerGetParamsWithHTTPClient(client *http.Client) *MessagingServiceMessagesDroppedByVerGetParams { + + return &MessagingServiceMessagesDroppedByVerGetParams{ + HTTPClient: client, + } +} + +/* +MessagingServiceMessagesDroppedByVerGetParams contains all the parameters to send to the API endpoint +for the messaging service messages dropped by ver get operation typically these are written to a http.Request +*/ +type MessagingServiceMessagesDroppedByVerGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the messaging service messages dropped by ver get params +func (o *MessagingServiceMessagesDroppedByVerGetParams) WithTimeout(timeout time.Duration) *MessagingServiceMessagesDroppedByVerGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the messaging service messages dropped by ver get params +func (o *MessagingServiceMessagesDroppedByVerGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the messaging service messages dropped by ver get params +func (o *MessagingServiceMessagesDroppedByVerGetParams) WithContext(ctx context.Context) *MessagingServiceMessagesDroppedByVerGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the messaging service messages dropped by ver get params +func (o *MessagingServiceMessagesDroppedByVerGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the messaging service messages dropped by ver get params +func (o *MessagingServiceMessagesDroppedByVerGetParams) WithHTTPClient(client *http.Client) *MessagingServiceMessagesDroppedByVerGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the messaging service messages dropped by ver get params +func (o *MessagingServiceMessagesDroppedByVerGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *MessagingServiceMessagesDroppedByVerGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_by_ver_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_by_ver_get_responses.go new file mode 100644 index 00000000000..38b22c332b1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_by_ver_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// MessagingServiceMessagesDroppedByVerGetReader is a Reader for the MessagingServiceMessagesDroppedByVerGet structure. +type MessagingServiceMessagesDroppedByVerGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MessagingServiceMessagesDroppedByVerGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMessagingServiceMessagesDroppedByVerGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewMessagingServiceMessagesDroppedByVerGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewMessagingServiceMessagesDroppedByVerGetOK creates a MessagingServiceMessagesDroppedByVerGetOK with default headers values +func NewMessagingServiceMessagesDroppedByVerGetOK() *MessagingServiceMessagesDroppedByVerGetOK { + return &MessagingServiceMessagesDroppedByVerGetOK{} +} + +/* +MessagingServiceMessagesDroppedByVerGetOK handles this case with default header values. + +Success +*/ +type MessagingServiceMessagesDroppedByVerGetOK struct { + Payload []*models.VerbCounter +} + +func (o *MessagingServiceMessagesDroppedByVerGetOK) GetPayload() []*models.VerbCounter { + return o.Payload +} + +func (o *MessagingServiceMessagesDroppedByVerGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMessagingServiceMessagesDroppedByVerGetDefault creates a MessagingServiceMessagesDroppedByVerGetDefault with default headers values +func NewMessagingServiceMessagesDroppedByVerGetDefault(code int) *MessagingServiceMessagesDroppedByVerGetDefault { + return &MessagingServiceMessagesDroppedByVerGetDefault{ + _statusCode: code, + } +} + +/* +MessagingServiceMessagesDroppedByVerGetDefault handles this case with default header values. + +internal server error +*/ +type MessagingServiceMessagesDroppedByVerGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the messaging service messages dropped by ver get default response +func (o *MessagingServiceMessagesDroppedByVerGetDefault) Code() int { + return o._statusCode +} + +func (o *MessagingServiceMessagesDroppedByVerGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *MessagingServiceMessagesDroppedByVerGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *MessagingServiceMessagesDroppedByVerGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_get_parameters.go new file mode 100644 index 00000000000..4fe7283252a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMessagingServiceMessagesDroppedGetParams creates a new MessagingServiceMessagesDroppedGetParams object +// with the default values initialized. +func NewMessagingServiceMessagesDroppedGetParams() *MessagingServiceMessagesDroppedGetParams { + + return &MessagingServiceMessagesDroppedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewMessagingServiceMessagesDroppedGetParamsWithTimeout creates a new MessagingServiceMessagesDroppedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewMessagingServiceMessagesDroppedGetParamsWithTimeout(timeout time.Duration) *MessagingServiceMessagesDroppedGetParams { + + return &MessagingServiceMessagesDroppedGetParams{ + + timeout: timeout, + } +} + +// NewMessagingServiceMessagesDroppedGetParamsWithContext creates a new MessagingServiceMessagesDroppedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewMessagingServiceMessagesDroppedGetParamsWithContext(ctx context.Context) *MessagingServiceMessagesDroppedGetParams { + + return &MessagingServiceMessagesDroppedGetParams{ + + Context: ctx, + } +} + +// NewMessagingServiceMessagesDroppedGetParamsWithHTTPClient creates a new MessagingServiceMessagesDroppedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewMessagingServiceMessagesDroppedGetParamsWithHTTPClient(client *http.Client) *MessagingServiceMessagesDroppedGetParams { + + return &MessagingServiceMessagesDroppedGetParams{ + HTTPClient: client, + } +} + +/* +MessagingServiceMessagesDroppedGetParams contains all the parameters to send to the API endpoint +for the messaging service messages dropped get operation typically these are written to a http.Request +*/ +type MessagingServiceMessagesDroppedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the messaging service messages dropped get params +func (o *MessagingServiceMessagesDroppedGetParams) WithTimeout(timeout time.Duration) *MessagingServiceMessagesDroppedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the messaging service messages dropped get params +func (o *MessagingServiceMessagesDroppedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the messaging service messages dropped get params +func (o *MessagingServiceMessagesDroppedGetParams) WithContext(ctx context.Context) *MessagingServiceMessagesDroppedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the messaging service messages dropped get params +func (o *MessagingServiceMessagesDroppedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the messaging service messages dropped get params +func (o *MessagingServiceMessagesDroppedGetParams) WithHTTPClient(client *http.Client) *MessagingServiceMessagesDroppedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the messaging service messages dropped get params +func (o *MessagingServiceMessagesDroppedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *MessagingServiceMessagesDroppedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_get_responses.go new file mode 100644 index 00000000000..35f97b05dca --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_dropped_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// MessagingServiceMessagesDroppedGetReader is a Reader for the MessagingServiceMessagesDroppedGet structure. +type MessagingServiceMessagesDroppedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MessagingServiceMessagesDroppedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMessagingServiceMessagesDroppedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewMessagingServiceMessagesDroppedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewMessagingServiceMessagesDroppedGetOK creates a MessagingServiceMessagesDroppedGetOK with default headers values +func NewMessagingServiceMessagesDroppedGetOK() *MessagingServiceMessagesDroppedGetOK { + return &MessagingServiceMessagesDroppedGetOK{} +} + +/* +MessagingServiceMessagesDroppedGetOK handles this case with default header values. + +Success +*/ +type MessagingServiceMessagesDroppedGetOK struct { + Payload []*models.MessageCounter +} + +func (o *MessagingServiceMessagesDroppedGetOK) GetPayload() []*models.MessageCounter { + return o.Payload +} + +func (o *MessagingServiceMessagesDroppedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMessagingServiceMessagesDroppedGetDefault creates a MessagingServiceMessagesDroppedGetDefault with default headers values +func NewMessagingServiceMessagesDroppedGetDefault(code int) *MessagingServiceMessagesDroppedGetDefault { + return &MessagingServiceMessagesDroppedGetDefault{ + _statusCode: code, + } +} + +/* +MessagingServiceMessagesDroppedGetDefault handles this case with default header values. + +internal server error +*/ +type MessagingServiceMessagesDroppedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the messaging service messages dropped get default response +func (o *MessagingServiceMessagesDroppedGetDefault) Code() int { + return o._statusCode +} + +func (o *MessagingServiceMessagesDroppedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *MessagingServiceMessagesDroppedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *MessagingServiceMessagesDroppedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_exception_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_exception_get_parameters.go new file mode 100644 index 00000000000..5c59ac986ef --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_exception_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMessagingServiceMessagesExceptionGetParams creates a new MessagingServiceMessagesExceptionGetParams object +// with the default values initialized. +func NewMessagingServiceMessagesExceptionGetParams() *MessagingServiceMessagesExceptionGetParams { + + return &MessagingServiceMessagesExceptionGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewMessagingServiceMessagesExceptionGetParamsWithTimeout creates a new MessagingServiceMessagesExceptionGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewMessagingServiceMessagesExceptionGetParamsWithTimeout(timeout time.Duration) *MessagingServiceMessagesExceptionGetParams { + + return &MessagingServiceMessagesExceptionGetParams{ + + timeout: timeout, + } +} + +// NewMessagingServiceMessagesExceptionGetParamsWithContext creates a new MessagingServiceMessagesExceptionGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewMessagingServiceMessagesExceptionGetParamsWithContext(ctx context.Context) *MessagingServiceMessagesExceptionGetParams { + + return &MessagingServiceMessagesExceptionGetParams{ + + Context: ctx, + } +} + +// NewMessagingServiceMessagesExceptionGetParamsWithHTTPClient creates a new MessagingServiceMessagesExceptionGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewMessagingServiceMessagesExceptionGetParamsWithHTTPClient(client *http.Client) *MessagingServiceMessagesExceptionGetParams { + + return &MessagingServiceMessagesExceptionGetParams{ + HTTPClient: client, + } +} + +/* +MessagingServiceMessagesExceptionGetParams contains all the parameters to send to the API endpoint +for the messaging service messages exception get operation typically these are written to a http.Request +*/ +type MessagingServiceMessagesExceptionGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the messaging service messages exception get params +func (o *MessagingServiceMessagesExceptionGetParams) WithTimeout(timeout time.Duration) *MessagingServiceMessagesExceptionGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the messaging service messages exception get params +func (o *MessagingServiceMessagesExceptionGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the messaging service messages exception get params +func (o *MessagingServiceMessagesExceptionGetParams) WithContext(ctx context.Context) *MessagingServiceMessagesExceptionGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the messaging service messages exception get params +func (o *MessagingServiceMessagesExceptionGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the messaging service messages exception get params +func (o *MessagingServiceMessagesExceptionGetParams) WithHTTPClient(client *http.Client) *MessagingServiceMessagesExceptionGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the messaging service messages exception get params +func (o *MessagingServiceMessagesExceptionGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *MessagingServiceMessagesExceptionGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_exception_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_exception_get_responses.go new file mode 100644 index 00000000000..3338be820b4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_exception_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// MessagingServiceMessagesExceptionGetReader is a Reader for the MessagingServiceMessagesExceptionGet structure. +type MessagingServiceMessagesExceptionGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MessagingServiceMessagesExceptionGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMessagingServiceMessagesExceptionGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewMessagingServiceMessagesExceptionGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewMessagingServiceMessagesExceptionGetOK creates a MessagingServiceMessagesExceptionGetOK with default headers values +func NewMessagingServiceMessagesExceptionGetOK() *MessagingServiceMessagesExceptionGetOK { + return &MessagingServiceMessagesExceptionGetOK{} +} + +/* +MessagingServiceMessagesExceptionGetOK handles this case with default header values. + +Success +*/ +type MessagingServiceMessagesExceptionGetOK struct { + Payload []*models.MessageCounter +} + +func (o *MessagingServiceMessagesExceptionGetOK) GetPayload() []*models.MessageCounter { + return o.Payload +} + +func (o *MessagingServiceMessagesExceptionGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMessagingServiceMessagesExceptionGetDefault creates a MessagingServiceMessagesExceptionGetDefault with default headers values +func NewMessagingServiceMessagesExceptionGetDefault(code int) *MessagingServiceMessagesExceptionGetDefault { + return &MessagingServiceMessagesExceptionGetDefault{ + _statusCode: code, + } +} + +/* +MessagingServiceMessagesExceptionGetDefault handles this case with default header values. + +internal server error +*/ +type MessagingServiceMessagesExceptionGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the messaging service messages exception get default response +func (o *MessagingServiceMessagesExceptionGetDefault) Code() int { + return o._statusCode +} + +func (o *MessagingServiceMessagesExceptionGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *MessagingServiceMessagesExceptionGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *MessagingServiceMessagesExceptionGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_pending_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_pending_get_parameters.go new file mode 100644 index 00000000000..8607e73da29 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_pending_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMessagingServiceMessagesPendingGetParams creates a new MessagingServiceMessagesPendingGetParams object +// with the default values initialized. +func NewMessagingServiceMessagesPendingGetParams() *MessagingServiceMessagesPendingGetParams { + + return &MessagingServiceMessagesPendingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewMessagingServiceMessagesPendingGetParamsWithTimeout creates a new MessagingServiceMessagesPendingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewMessagingServiceMessagesPendingGetParamsWithTimeout(timeout time.Duration) *MessagingServiceMessagesPendingGetParams { + + return &MessagingServiceMessagesPendingGetParams{ + + timeout: timeout, + } +} + +// NewMessagingServiceMessagesPendingGetParamsWithContext creates a new MessagingServiceMessagesPendingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewMessagingServiceMessagesPendingGetParamsWithContext(ctx context.Context) *MessagingServiceMessagesPendingGetParams { + + return &MessagingServiceMessagesPendingGetParams{ + + Context: ctx, + } +} + +// NewMessagingServiceMessagesPendingGetParamsWithHTTPClient creates a new MessagingServiceMessagesPendingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewMessagingServiceMessagesPendingGetParamsWithHTTPClient(client *http.Client) *MessagingServiceMessagesPendingGetParams { + + return &MessagingServiceMessagesPendingGetParams{ + HTTPClient: client, + } +} + +/* +MessagingServiceMessagesPendingGetParams contains all the parameters to send to the API endpoint +for the messaging service messages pending get operation typically these are written to a http.Request +*/ +type MessagingServiceMessagesPendingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the messaging service messages pending get params +func (o *MessagingServiceMessagesPendingGetParams) WithTimeout(timeout time.Duration) *MessagingServiceMessagesPendingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the messaging service messages pending get params +func (o *MessagingServiceMessagesPendingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the messaging service messages pending get params +func (o *MessagingServiceMessagesPendingGetParams) WithContext(ctx context.Context) *MessagingServiceMessagesPendingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the messaging service messages pending get params +func (o *MessagingServiceMessagesPendingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the messaging service messages pending get params +func (o *MessagingServiceMessagesPendingGetParams) WithHTTPClient(client *http.Client) *MessagingServiceMessagesPendingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the messaging service messages pending get params +func (o *MessagingServiceMessagesPendingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *MessagingServiceMessagesPendingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_pending_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_pending_get_responses.go new file mode 100644 index 00000000000..cbcd5cae0d9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_pending_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// MessagingServiceMessagesPendingGetReader is a Reader for the MessagingServiceMessagesPendingGet structure. +type MessagingServiceMessagesPendingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MessagingServiceMessagesPendingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMessagingServiceMessagesPendingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewMessagingServiceMessagesPendingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewMessagingServiceMessagesPendingGetOK creates a MessagingServiceMessagesPendingGetOK with default headers values +func NewMessagingServiceMessagesPendingGetOK() *MessagingServiceMessagesPendingGetOK { + return &MessagingServiceMessagesPendingGetOK{} +} + +/* +MessagingServiceMessagesPendingGetOK handles this case with default header values. + +Success +*/ +type MessagingServiceMessagesPendingGetOK struct { + Payload []*models.MessageCounter +} + +func (o *MessagingServiceMessagesPendingGetOK) GetPayload() []*models.MessageCounter { + return o.Payload +} + +func (o *MessagingServiceMessagesPendingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMessagingServiceMessagesPendingGetDefault creates a MessagingServiceMessagesPendingGetDefault with default headers values +func NewMessagingServiceMessagesPendingGetDefault(code int) *MessagingServiceMessagesPendingGetDefault { + return &MessagingServiceMessagesPendingGetDefault{ + _statusCode: code, + } +} + +/* +MessagingServiceMessagesPendingGetDefault handles this case with default header values. + +internal server error +*/ +type MessagingServiceMessagesPendingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the messaging service messages pending get default response +func (o *MessagingServiceMessagesPendingGetDefault) Code() int { + return o._statusCode +} + +func (o *MessagingServiceMessagesPendingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *MessagingServiceMessagesPendingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *MessagingServiceMessagesPendingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_replied_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_replied_get_parameters.go new file mode 100644 index 00000000000..7212e474170 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_replied_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMessagingServiceMessagesRepliedGetParams creates a new MessagingServiceMessagesRepliedGetParams object +// with the default values initialized. +func NewMessagingServiceMessagesRepliedGetParams() *MessagingServiceMessagesRepliedGetParams { + + return &MessagingServiceMessagesRepliedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewMessagingServiceMessagesRepliedGetParamsWithTimeout creates a new MessagingServiceMessagesRepliedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewMessagingServiceMessagesRepliedGetParamsWithTimeout(timeout time.Duration) *MessagingServiceMessagesRepliedGetParams { + + return &MessagingServiceMessagesRepliedGetParams{ + + timeout: timeout, + } +} + +// NewMessagingServiceMessagesRepliedGetParamsWithContext creates a new MessagingServiceMessagesRepliedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewMessagingServiceMessagesRepliedGetParamsWithContext(ctx context.Context) *MessagingServiceMessagesRepliedGetParams { + + return &MessagingServiceMessagesRepliedGetParams{ + + Context: ctx, + } +} + +// NewMessagingServiceMessagesRepliedGetParamsWithHTTPClient creates a new MessagingServiceMessagesRepliedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewMessagingServiceMessagesRepliedGetParamsWithHTTPClient(client *http.Client) *MessagingServiceMessagesRepliedGetParams { + + return &MessagingServiceMessagesRepliedGetParams{ + HTTPClient: client, + } +} + +/* +MessagingServiceMessagesRepliedGetParams contains all the parameters to send to the API endpoint +for the messaging service messages replied get operation typically these are written to a http.Request +*/ +type MessagingServiceMessagesRepliedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the messaging service messages replied get params +func (o *MessagingServiceMessagesRepliedGetParams) WithTimeout(timeout time.Duration) *MessagingServiceMessagesRepliedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the messaging service messages replied get params +func (o *MessagingServiceMessagesRepliedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the messaging service messages replied get params +func (o *MessagingServiceMessagesRepliedGetParams) WithContext(ctx context.Context) *MessagingServiceMessagesRepliedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the messaging service messages replied get params +func (o *MessagingServiceMessagesRepliedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the messaging service messages replied get params +func (o *MessagingServiceMessagesRepliedGetParams) WithHTTPClient(client *http.Client) *MessagingServiceMessagesRepliedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the messaging service messages replied get params +func (o *MessagingServiceMessagesRepliedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *MessagingServiceMessagesRepliedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_replied_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_replied_get_responses.go new file mode 100644 index 00000000000..b028987d9a6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_replied_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// MessagingServiceMessagesRepliedGetReader is a Reader for the MessagingServiceMessagesRepliedGet structure. +type MessagingServiceMessagesRepliedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MessagingServiceMessagesRepliedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMessagingServiceMessagesRepliedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewMessagingServiceMessagesRepliedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewMessagingServiceMessagesRepliedGetOK creates a MessagingServiceMessagesRepliedGetOK with default headers values +func NewMessagingServiceMessagesRepliedGetOK() *MessagingServiceMessagesRepliedGetOK { + return &MessagingServiceMessagesRepliedGetOK{} +} + +/* +MessagingServiceMessagesRepliedGetOK handles this case with default header values. + +Success +*/ +type MessagingServiceMessagesRepliedGetOK struct { + Payload []*models.MessageCounter +} + +func (o *MessagingServiceMessagesRepliedGetOK) GetPayload() []*models.MessageCounter { + return o.Payload +} + +func (o *MessagingServiceMessagesRepliedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMessagingServiceMessagesRepliedGetDefault creates a MessagingServiceMessagesRepliedGetDefault with default headers values +func NewMessagingServiceMessagesRepliedGetDefault(code int) *MessagingServiceMessagesRepliedGetDefault { + return &MessagingServiceMessagesRepliedGetDefault{ + _statusCode: code, + } +} + +/* +MessagingServiceMessagesRepliedGetDefault handles this case with default header values. + +internal server error +*/ +type MessagingServiceMessagesRepliedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the messaging service messages replied get default response +func (o *MessagingServiceMessagesRepliedGetDefault) Code() int { + return o._statusCode +} + +func (o *MessagingServiceMessagesRepliedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *MessagingServiceMessagesRepliedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *MessagingServiceMessagesRepliedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_completed_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_completed_get_parameters.go new file mode 100644 index 00000000000..9c0d4231b06 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_completed_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMessagingServiceMessagesRespondCompletedGetParams creates a new MessagingServiceMessagesRespondCompletedGetParams object +// with the default values initialized. +func NewMessagingServiceMessagesRespondCompletedGetParams() *MessagingServiceMessagesRespondCompletedGetParams { + + return &MessagingServiceMessagesRespondCompletedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewMessagingServiceMessagesRespondCompletedGetParamsWithTimeout creates a new MessagingServiceMessagesRespondCompletedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewMessagingServiceMessagesRespondCompletedGetParamsWithTimeout(timeout time.Duration) *MessagingServiceMessagesRespondCompletedGetParams { + + return &MessagingServiceMessagesRespondCompletedGetParams{ + + timeout: timeout, + } +} + +// NewMessagingServiceMessagesRespondCompletedGetParamsWithContext creates a new MessagingServiceMessagesRespondCompletedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewMessagingServiceMessagesRespondCompletedGetParamsWithContext(ctx context.Context) *MessagingServiceMessagesRespondCompletedGetParams { + + return &MessagingServiceMessagesRespondCompletedGetParams{ + + Context: ctx, + } +} + +// NewMessagingServiceMessagesRespondCompletedGetParamsWithHTTPClient creates a new MessagingServiceMessagesRespondCompletedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewMessagingServiceMessagesRespondCompletedGetParamsWithHTTPClient(client *http.Client) *MessagingServiceMessagesRespondCompletedGetParams { + + return &MessagingServiceMessagesRespondCompletedGetParams{ + HTTPClient: client, + } +} + +/* +MessagingServiceMessagesRespondCompletedGetParams contains all the parameters to send to the API endpoint +for the messaging service messages respond completed get operation typically these are written to a http.Request +*/ +type MessagingServiceMessagesRespondCompletedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the messaging service messages respond completed get params +func (o *MessagingServiceMessagesRespondCompletedGetParams) WithTimeout(timeout time.Duration) *MessagingServiceMessagesRespondCompletedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the messaging service messages respond completed get params +func (o *MessagingServiceMessagesRespondCompletedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the messaging service messages respond completed get params +func (o *MessagingServiceMessagesRespondCompletedGetParams) WithContext(ctx context.Context) *MessagingServiceMessagesRespondCompletedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the messaging service messages respond completed get params +func (o *MessagingServiceMessagesRespondCompletedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the messaging service messages respond completed get params +func (o *MessagingServiceMessagesRespondCompletedGetParams) WithHTTPClient(client *http.Client) *MessagingServiceMessagesRespondCompletedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the messaging service messages respond completed get params +func (o *MessagingServiceMessagesRespondCompletedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *MessagingServiceMessagesRespondCompletedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_completed_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_completed_get_responses.go new file mode 100644 index 00000000000..68f1374756a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_completed_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// MessagingServiceMessagesRespondCompletedGetReader is a Reader for the MessagingServiceMessagesRespondCompletedGet structure. +type MessagingServiceMessagesRespondCompletedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MessagingServiceMessagesRespondCompletedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMessagingServiceMessagesRespondCompletedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewMessagingServiceMessagesRespondCompletedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewMessagingServiceMessagesRespondCompletedGetOK creates a MessagingServiceMessagesRespondCompletedGetOK with default headers values +func NewMessagingServiceMessagesRespondCompletedGetOK() *MessagingServiceMessagesRespondCompletedGetOK { + return &MessagingServiceMessagesRespondCompletedGetOK{} +} + +/* +MessagingServiceMessagesRespondCompletedGetOK handles this case with default header values. + +Success +*/ +type MessagingServiceMessagesRespondCompletedGetOK struct { + Payload []*models.MessageCounter +} + +func (o *MessagingServiceMessagesRespondCompletedGetOK) GetPayload() []*models.MessageCounter { + return o.Payload +} + +func (o *MessagingServiceMessagesRespondCompletedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMessagingServiceMessagesRespondCompletedGetDefault creates a MessagingServiceMessagesRespondCompletedGetDefault with default headers values +func NewMessagingServiceMessagesRespondCompletedGetDefault(code int) *MessagingServiceMessagesRespondCompletedGetDefault { + return &MessagingServiceMessagesRespondCompletedGetDefault{ + _statusCode: code, + } +} + +/* +MessagingServiceMessagesRespondCompletedGetDefault handles this case with default header values. + +internal server error +*/ +type MessagingServiceMessagesRespondCompletedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the messaging service messages respond completed get default response +func (o *MessagingServiceMessagesRespondCompletedGetDefault) Code() int { + return o._statusCode +} + +func (o *MessagingServiceMessagesRespondCompletedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *MessagingServiceMessagesRespondCompletedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *MessagingServiceMessagesRespondCompletedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_pending_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_pending_get_parameters.go new file mode 100644 index 00000000000..f8d8512484b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_pending_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMessagingServiceMessagesRespondPendingGetParams creates a new MessagingServiceMessagesRespondPendingGetParams object +// with the default values initialized. +func NewMessagingServiceMessagesRespondPendingGetParams() *MessagingServiceMessagesRespondPendingGetParams { + + return &MessagingServiceMessagesRespondPendingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewMessagingServiceMessagesRespondPendingGetParamsWithTimeout creates a new MessagingServiceMessagesRespondPendingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewMessagingServiceMessagesRespondPendingGetParamsWithTimeout(timeout time.Duration) *MessagingServiceMessagesRespondPendingGetParams { + + return &MessagingServiceMessagesRespondPendingGetParams{ + + timeout: timeout, + } +} + +// NewMessagingServiceMessagesRespondPendingGetParamsWithContext creates a new MessagingServiceMessagesRespondPendingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewMessagingServiceMessagesRespondPendingGetParamsWithContext(ctx context.Context) *MessagingServiceMessagesRespondPendingGetParams { + + return &MessagingServiceMessagesRespondPendingGetParams{ + + Context: ctx, + } +} + +// NewMessagingServiceMessagesRespondPendingGetParamsWithHTTPClient creates a new MessagingServiceMessagesRespondPendingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewMessagingServiceMessagesRespondPendingGetParamsWithHTTPClient(client *http.Client) *MessagingServiceMessagesRespondPendingGetParams { + + return &MessagingServiceMessagesRespondPendingGetParams{ + HTTPClient: client, + } +} + +/* +MessagingServiceMessagesRespondPendingGetParams contains all the parameters to send to the API endpoint +for the messaging service messages respond pending get operation typically these are written to a http.Request +*/ +type MessagingServiceMessagesRespondPendingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the messaging service messages respond pending get params +func (o *MessagingServiceMessagesRespondPendingGetParams) WithTimeout(timeout time.Duration) *MessagingServiceMessagesRespondPendingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the messaging service messages respond pending get params +func (o *MessagingServiceMessagesRespondPendingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the messaging service messages respond pending get params +func (o *MessagingServiceMessagesRespondPendingGetParams) WithContext(ctx context.Context) *MessagingServiceMessagesRespondPendingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the messaging service messages respond pending get params +func (o *MessagingServiceMessagesRespondPendingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the messaging service messages respond pending get params +func (o *MessagingServiceMessagesRespondPendingGetParams) WithHTTPClient(client *http.Client) *MessagingServiceMessagesRespondPendingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the messaging service messages respond pending get params +func (o *MessagingServiceMessagesRespondPendingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *MessagingServiceMessagesRespondPendingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_pending_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_pending_get_responses.go new file mode 100644 index 00000000000..8ad1fcfc83e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_respond_pending_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// MessagingServiceMessagesRespondPendingGetReader is a Reader for the MessagingServiceMessagesRespondPendingGet structure. +type MessagingServiceMessagesRespondPendingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MessagingServiceMessagesRespondPendingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMessagingServiceMessagesRespondPendingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewMessagingServiceMessagesRespondPendingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewMessagingServiceMessagesRespondPendingGetOK creates a MessagingServiceMessagesRespondPendingGetOK with default headers values +func NewMessagingServiceMessagesRespondPendingGetOK() *MessagingServiceMessagesRespondPendingGetOK { + return &MessagingServiceMessagesRespondPendingGetOK{} +} + +/* +MessagingServiceMessagesRespondPendingGetOK handles this case with default header values. + +Success +*/ +type MessagingServiceMessagesRespondPendingGetOK struct { + Payload []*models.MessageCounter +} + +func (o *MessagingServiceMessagesRespondPendingGetOK) GetPayload() []*models.MessageCounter { + return o.Payload +} + +func (o *MessagingServiceMessagesRespondPendingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMessagingServiceMessagesRespondPendingGetDefault creates a MessagingServiceMessagesRespondPendingGetDefault with default headers values +func NewMessagingServiceMessagesRespondPendingGetDefault(code int) *MessagingServiceMessagesRespondPendingGetDefault { + return &MessagingServiceMessagesRespondPendingGetDefault{ + _statusCode: code, + } +} + +/* +MessagingServiceMessagesRespondPendingGetDefault handles this case with default header values. + +internal server error +*/ +type MessagingServiceMessagesRespondPendingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the messaging service messages respond pending get default response +func (o *MessagingServiceMessagesRespondPendingGetDefault) Code() int { + return o._statusCode +} + +func (o *MessagingServiceMessagesRespondPendingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *MessagingServiceMessagesRespondPendingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *MessagingServiceMessagesRespondPendingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_sent_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_sent_get_parameters.go new file mode 100644 index 00000000000..7691e84b0cd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_sent_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMessagingServiceMessagesSentGetParams creates a new MessagingServiceMessagesSentGetParams object +// with the default values initialized. +func NewMessagingServiceMessagesSentGetParams() *MessagingServiceMessagesSentGetParams { + + return &MessagingServiceMessagesSentGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewMessagingServiceMessagesSentGetParamsWithTimeout creates a new MessagingServiceMessagesSentGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewMessagingServiceMessagesSentGetParamsWithTimeout(timeout time.Duration) *MessagingServiceMessagesSentGetParams { + + return &MessagingServiceMessagesSentGetParams{ + + timeout: timeout, + } +} + +// NewMessagingServiceMessagesSentGetParamsWithContext creates a new MessagingServiceMessagesSentGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewMessagingServiceMessagesSentGetParamsWithContext(ctx context.Context) *MessagingServiceMessagesSentGetParams { + + return &MessagingServiceMessagesSentGetParams{ + + Context: ctx, + } +} + +// NewMessagingServiceMessagesSentGetParamsWithHTTPClient creates a new MessagingServiceMessagesSentGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewMessagingServiceMessagesSentGetParamsWithHTTPClient(client *http.Client) *MessagingServiceMessagesSentGetParams { + + return &MessagingServiceMessagesSentGetParams{ + HTTPClient: client, + } +} + +/* +MessagingServiceMessagesSentGetParams contains all the parameters to send to the API endpoint +for the messaging service messages sent get operation typically these are written to a http.Request +*/ +type MessagingServiceMessagesSentGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the messaging service messages sent get params +func (o *MessagingServiceMessagesSentGetParams) WithTimeout(timeout time.Duration) *MessagingServiceMessagesSentGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the messaging service messages sent get params +func (o *MessagingServiceMessagesSentGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the messaging service messages sent get params +func (o *MessagingServiceMessagesSentGetParams) WithContext(ctx context.Context) *MessagingServiceMessagesSentGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the messaging service messages sent get params +func (o *MessagingServiceMessagesSentGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the messaging service messages sent get params +func (o *MessagingServiceMessagesSentGetParams) WithHTTPClient(client *http.Client) *MessagingServiceMessagesSentGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the messaging service messages sent get params +func (o *MessagingServiceMessagesSentGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *MessagingServiceMessagesSentGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_sent_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_sent_get_responses.go new file mode 100644 index 00000000000..bbaaa4807f0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_sent_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// MessagingServiceMessagesSentGetReader is a Reader for the MessagingServiceMessagesSentGet structure. +type MessagingServiceMessagesSentGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MessagingServiceMessagesSentGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMessagingServiceMessagesSentGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewMessagingServiceMessagesSentGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewMessagingServiceMessagesSentGetOK creates a MessagingServiceMessagesSentGetOK with default headers values +func NewMessagingServiceMessagesSentGetOK() *MessagingServiceMessagesSentGetOK { + return &MessagingServiceMessagesSentGetOK{} +} + +/* +MessagingServiceMessagesSentGetOK handles this case with default header values. + +Success +*/ +type MessagingServiceMessagesSentGetOK struct { + Payload []*models.MessageCounter +} + +func (o *MessagingServiceMessagesSentGetOK) GetPayload() []*models.MessageCounter { + return o.Payload +} + +func (o *MessagingServiceMessagesSentGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMessagingServiceMessagesSentGetDefault creates a MessagingServiceMessagesSentGetDefault with default headers values +func NewMessagingServiceMessagesSentGetDefault(code int) *MessagingServiceMessagesSentGetDefault { + return &MessagingServiceMessagesSentGetDefault{ + _statusCode: code, + } +} + +/* +MessagingServiceMessagesSentGetDefault handles this case with default header values. + +internal server error +*/ +type MessagingServiceMessagesSentGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the messaging service messages sent get default response +func (o *MessagingServiceMessagesSentGetDefault) Code() int { + return o._statusCode +} + +func (o *MessagingServiceMessagesSentGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *MessagingServiceMessagesSentGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *MessagingServiceMessagesSentGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_timeout_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_timeout_get_parameters.go new file mode 100644 index 00000000000..031ec2aa81d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_timeout_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMessagingServiceMessagesTimeoutGetParams creates a new MessagingServiceMessagesTimeoutGetParams object +// with the default values initialized. +func NewMessagingServiceMessagesTimeoutGetParams() *MessagingServiceMessagesTimeoutGetParams { + + return &MessagingServiceMessagesTimeoutGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewMessagingServiceMessagesTimeoutGetParamsWithTimeout creates a new MessagingServiceMessagesTimeoutGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewMessagingServiceMessagesTimeoutGetParamsWithTimeout(timeout time.Duration) *MessagingServiceMessagesTimeoutGetParams { + + return &MessagingServiceMessagesTimeoutGetParams{ + + timeout: timeout, + } +} + +// NewMessagingServiceMessagesTimeoutGetParamsWithContext creates a new MessagingServiceMessagesTimeoutGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewMessagingServiceMessagesTimeoutGetParamsWithContext(ctx context.Context) *MessagingServiceMessagesTimeoutGetParams { + + return &MessagingServiceMessagesTimeoutGetParams{ + + Context: ctx, + } +} + +// NewMessagingServiceMessagesTimeoutGetParamsWithHTTPClient creates a new MessagingServiceMessagesTimeoutGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewMessagingServiceMessagesTimeoutGetParamsWithHTTPClient(client *http.Client) *MessagingServiceMessagesTimeoutGetParams { + + return &MessagingServiceMessagesTimeoutGetParams{ + HTTPClient: client, + } +} + +/* +MessagingServiceMessagesTimeoutGetParams contains all the parameters to send to the API endpoint +for the messaging service messages timeout get operation typically these are written to a http.Request +*/ +type MessagingServiceMessagesTimeoutGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the messaging service messages timeout get params +func (o *MessagingServiceMessagesTimeoutGetParams) WithTimeout(timeout time.Duration) *MessagingServiceMessagesTimeoutGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the messaging service messages timeout get params +func (o *MessagingServiceMessagesTimeoutGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the messaging service messages timeout get params +func (o *MessagingServiceMessagesTimeoutGetParams) WithContext(ctx context.Context) *MessagingServiceMessagesTimeoutGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the messaging service messages timeout get params +func (o *MessagingServiceMessagesTimeoutGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the messaging service messages timeout get params +func (o *MessagingServiceMessagesTimeoutGetParams) WithHTTPClient(client *http.Client) *MessagingServiceMessagesTimeoutGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the messaging service messages timeout get params +func (o *MessagingServiceMessagesTimeoutGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *MessagingServiceMessagesTimeoutGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_timeout_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_timeout_get_responses.go new file mode 100644 index 00000000000..df6b542640f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_messages_timeout_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// MessagingServiceMessagesTimeoutGetReader is a Reader for the MessagingServiceMessagesTimeoutGet structure. +type MessagingServiceMessagesTimeoutGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MessagingServiceMessagesTimeoutGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMessagingServiceMessagesTimeoutGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewMessagingServiceMessagesTimeoutGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewMessagingServiceMessagesTimeoutGetOK creates a MessagingServiceMessagesTimeoutGetOK with default headers values +func NewMessagingServiceMessagesTimeoutGetOK() *MessagingServiceMessagesTimeoutGetOK { + return &MessagingServiceMessagesTimeoutGetOK{} +} + +/* +MessagingServiceMessagesTimeoutGetOK handles this case with default header values. + +Success +*/ +type MessagingServiceMessagesTimeoutGetOK struct { + Payload []*models.MessageCounter +} + +func (o *MessagingServiceMessagesTimeoutGetOK) GetPayload() []*models.MessageCounter { + return o.Payload +} + +func (o *MessagingServiceMessagesTimeoutGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMessagingServiceMessagesTimeoutGetDefault creates a MessagingServiceMessagesTimeoutGetDefault with default headers values +func NewMessagingServiceMessagesTimeoutGetDefault(code int) *MessagingServiceMessagesTimeoutGetDefault { + return &MessagingServiceMessagesTimeoutGetDefault{ + _statusCode: code, + } +} + +/* +MessagingServiceMessagesTimeoutGetDefault handles this case with default header values. + +internal server error +*/ +type MessagingServiceMessagesTimeoutGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the messaging service messages timeout get default response +func (o *MessagingServiceMessagesTimeoutGetDefault) Code() int { + return o._statusCode +} + +func (o *MessagingServiceMessagesTimeoutGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *MessagingServiceMessagesTimeoutGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *MessagingServiceMessagesTimeoutGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_version_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_version_get_parameters.go new file mode 100644 index 00000000000..1da5af95515 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_version_get_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewMessagingServiceVersionGetParams creates a new MessagingServiceVersionGetParams object +// with the default values initialized. +func NewMessagingServiceVersionGetParams() *MessagingServiceVersionGetParams { + var () + return &MessagingServiceVersionGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewMessagingServiceVersionGetParamsWithTimeout creates a new MessagingServiceVersionGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewMessagingServiceVersionGetParamsWithTimeout(timeout time.Duration) *MessagingServiceVersionGetParams { + var () + return &MessagingServiceVersionGetParams{ + + timeout: timeout, + } +} + +// NewMessagingServiceVersionGetParamsWithContext creates a new MessagingServiceVersionGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewMessagingServiceVersionGetParamsWithContext(ctx context.Context) *MessagingServiceVersionGetParams { + var () + return &MessagingServiceVersionGetParams{ + + Context: ctx, + } +} + +// NewMessagingServiceVersionGetParamsWithHTTPClient creates a new MessagingServiceVersionGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewMessagingServiceVersionGetParamsWithHTTPClient(client *http.Client) *MessagingServiceVersionGetParams { + var () + return &MessagingServiceVersionGetParams{ + HTTPClient: client, + } +} + +/* +MessagingServiceVersionGetParams contains all the parameters to send to the API endpoint +for the messaging service version get operation typically these are written to a http.Request +*/ +type MessagingServiceVersionGetParams struct { + + /*Addr + Address + + */ + Addr string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the messaging service version get params +func (o *MessagingServiceVersionGetParams) WithTimeout(timeout time.Duration) *MessagingServiceVersionGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the messaging service version get params +func (o *MessagingServiceVersionGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the messaging service version get params +func (o *MessagingServiceVersionGetParams) WithContext(ctx context.Context) *MessagingServiceVersionGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the messaging service version get params +func (o *MessagingServiceVersionGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the messaging service version get params +func (o *MessagingServiceVersionGetParams) WithHTTPClient(client *http.Client) *MessagingServiceVersionGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the messaging service version get params +func (o *MessagingServiceVersionGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithAddr adds the addr to the messaging service version get params +func (o *MessagingServiceVersionGetParams) WithAddr(addr string) *MessagingServiceVersionGetParams { + o.SetAddr(addr) + return o +} + +// SetAddr adds the addr to the messaging service version get params +func (o *MessagingServiceVersionGetParams) SetAddr(addr string) { + o.Addr = addr +} + +// WriteToRequest writes these params to a swagger request +func (o *MessagingServiceVersionGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param addr + qrAddr := o.Addr + qAddr := qrAddr + if qAddr != "" { + if err := r.SetQueryParam("addr", qAddr); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_version_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_version_get_responses.go new file mode 100644 index 00000000000..5cb4d307893 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/messaging_service_version_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// MessagingServiceVersionGetReader is a Reader for the MessagingServiceVersionGet structure. +type MessagingServiceVersionGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *MessagingServiceVersionGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewMessagingServiceVersionGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewMessagingServiceVersionGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewMessagingServiceVersionGetOK creates a MessagingServiceVersionGetOK with default headers values +func NewMessagingServiceVersionGetOK() *MessagingServiceVersionGetOK { + return &MessagingServiceVersionGetOK{} +} + +/* +MessagingServiceVersionGetOK handles this case with default header values. + +Success +*/ +type MessagingServiceVersionGetOK struct { + Payload int32 +} + +func (o *MessagingServiceVersionGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *MessagingServiceVersionGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewMessagingServiceVersionGetDefault creates a MessagingServiceVersionGetDefault with default headers values +func NewMessagingServiceVersionGetDefault(code int) *MessagingServiceVersionGetDefault { + return &MessagingServiceVersionGetDefault{ + _statusCode: code, + } +} + +/* +MessagingServiceVersionGetDefault handles this case with default header values. + +internal server error +*/ +type MessagingServiceVersionGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the messaging service version get default response +func (o *MessagingServiceVersionGetDefault) Code() int { + return o._statusCode +} + +func (o *MessagingServiceVersionGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *MessagingServiceVersionGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *MessagingServiceVersionGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/operations_client.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/operations_client.go new file mode 100644 index 00000000000..08e45b63371 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/operations_client.go @@ -0,0 +1,14713 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new operations API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for operations API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientService is the interface for Client methods +type ClientService interface { + CacheServiceCounterCacheCapacityPost(params *CacheServiceCounterCacheCapacityPostParams) (*CacheServiceCounterCacheCapacityPostOK, error) + + CacheServiceCounterCacheKeysToSaveGet(params *CacheServiceCounterCacheKeysToSaveGetParams) (*CacheServiceCounterCacheKeysToSaveGetOK, error) + + CacheServiceCounterCacheKeysToSavePost(params *CacheServiceCounterCacheKeysToSavePostParams) (*CacheServiceCounterCacheKeysToSavePostOK, error) + + CacheServiceCounterCacheSavePeriodGet(params *CacheServiceCounterCacheSavePeriodGetParams) (*CacheServiceCounterCacheSavePeriodGetOK, error) + + CacheServiceCounterCacheSavePeriodPost(params *CacheServiceCounterCacheSavePeriodPostParams) (*CacheServiceCounterCacheSavePeriodPostOK, error) + + CacheServiceInvalidateCounterCachePost(params *CacheServiceInvalidateCounterCachePostParams) (*CacheServiceInvalidateCounterCachePostOK, error) + + CacheServiceInvalidateKeyCachePost(params *CacheServiceInvalidateKeyCachePostParams) (*CacheServiceInvalidateKeyCachePostOK, error) + + CacheServiceKeyCacheCapacityPost(params *CacheServiceKeyCacheCapacityPostParams) (*CacheServiceKeyCacheCapacityPostOK, error) + + CacheServiceKeyCacheKeysToSaveGet(params *CacheServiceKeyCacheKeysToSaveGetParams) (*CacheServiceKeyCacheKeysToSaveGetOK, error) + + CacheServiceKeyCacheKeysToSavePost(params *CacheServiceKeyCacheKeysToSavePostParams) (*CacheServiceKeyCacheKeysToSavePostOK, error) + + CacheServiceKeyCacheSavePeriodGet(params *CacheServiceKeyCacheSavePeriodGetParams) (*CacheServiceKeyCacheSavePeriodGetOK, error) + + CacheServiceKeyCacheSavePeriodPost(params *CacheServiceKeyCacheSavePeriodPostParams) (*CacheServiceKeyCacheSavePeriodPostOK, error) + + CacheServiceMetricsCounterCapacityGet(params *CacheServiceMetricsCounterCapacityGetParams) (*CacheServiceMetricsCounterCapacityGetOK, error) + + CacheServiceMetricsCounterEntriesGet(params *CacheServiceMetricsCounterEntriesGetParams) (*CacheServiceMetricsCounterEntriesGetOK, error) + + CacheServiceMetricsCounterHitRateGet(params *CacheServiceMetricsCounterHitRateGetParams) (*CacheServiceMetricsCounterHitRateGetOK, error) + + CacheServiceMetricsCounterHitsGet(params *CacheServiceMetricsCounterHitsGetParams) (*CacheServiceMetricsCounterHitsGetOK, error) + + CacheServiceMetricsCounterHitsMovingAvrageGet(params *CacheServiceMetricsCounterHitsMovingAvrageGetParams) (*CacheServiceMetricsCounterHitsMovingAvrageGetOK, error) + + CacheServiceMetricsCounterRequestsGet(params *CacheServiceMetricsCounterRequestsGetParams) (*CacheServiceMetricsCounterRequestsGetOK, error) + + CacheServiceMetricsCounterRequestsMovingAvrageGet(params *CacheServiceMetricsCounterRequestsMovingAvrageGetParams) (*CacheServiceMetricsCounterRequestsMovingAvrageGetOK, error) + + CacheServiceMetricsCounterSizeGet(params *CacheServiceMetricsCounterSizeGetParams) (*CacheServiceMetricsCounterSizeGetOK, error) + + CacheServiceMetricsKeyCapacityGet(params *CacheServiceMetricsKeyCapacityGetParams) (*CacheServiceMetricsKeyCapacityGetOK, error) + + CacheServiceMetricsKeyEntriesGet(params *CacheServiceMetricsKeyEntriesGetParams) (*CacheServiceMetricsKeyEntriesGetOK, error) + + CacheServiceMetricsKeyHitRateGet(params *CacheServiceMetricsKeyHitRateGetParams) (*CacheServiceMetricsKeyHitRateGetOK, error) + + CacheServiceMetricsKeyHitsGet(params *CacheServiceMetricsKeyHitsGetParams) (*CacheServiceMetricsKeyHitsGetOK, error) + + CacheServiceMetricsKeyHitsMovingAvrageGet(params *CacheServiceMetricsKeyHitsMovingAvrageGetParams) (*CacheServiceMetricsKeyHitsMovingAvrageGetOK, error) + + CacheServiceMetricsKeyRequestsGet(params *CacheServiceMetricsKeyRequestsGetParams) (*CacheServiceMetricsKeyRequestsGetOK, error) + + CacheServiceMetricsKeyRequestsMovingAvrageGet(params *CacheServiceMetricsKeyRequestsMovingAvrageGetParams) (*CacheServiceMetricsKeyRequestsMovingAvrageGetOK, error) + + CacheServiceMetricsKeySizeGet(params *CacheServiceMetricsKeySizeGetParams) (*CacheServiceMetricsKeySizeGetOK, error) + + CacheServiceMetricsRowCapacityGet(params *CacheServiceMetricsRowCapacityGetParams) (*CacheServiceMetricsRowCapacityGetOK, error) + + CacheServiceMetricsRowEntriesGet(params *CacheServiceMetricsRowEntriesGetParams) (*CacheServiceMetricsRowEntriesGetOK, error) + + CacheServiceMetricsRowHitRateGet(params *CacheServiceMetricsRowHitRateGetParams) (*CacheServiceMetricsRowHitRateGetOK, error) + + CacheServiceMetricsRowHitsGet(params *CacheServiceMetricsRowHitsGetParams) (*CacheServiceMetricsRowHitsGetOK, error) + + CacheServiceMetricsRowHitsMovingAvrageGet(params *CacheServiceMetricsRowHitsMovingAvrageGetParams) (*CacheServiceMetricsRowHitsMovingAvrageGetOK, error) + + CacheServiceMetricsRowRequestsGet(params *CacheServiceMetricsRowRequestsGetParams) (*CacheServiceMetricsRowRequestsGetOK, error) + + CacheServiceMetricsRowRequestsMovingAvrageGet(params *CacheServiceMetricsRowRequestsMovingAvrageGetParams) (*CacheServiceMetricsRowRequestsMovingAvrageGetOK, error) + + CacheServiceMetricsRowSizeGet(params *CacheServiceMetricsRowSizeGetParams) (*CacheServiceMetricsRowSizeGetOK, error) + + CacheServiceRowCacheCapacityPost(params *CacheServiceRowCacheCapacityPostParams) (*CacheServiceRowCacheCapacityPostOK, error) + + CacheServiceRowCacheKeysToSaveGet(params *CacheServiceRowCacheKeysToSaveGetParams) (*CacheServiceRowCacheKeysToSaveGetOK, error) + + CacheServiceRowCacheKeysToSavePost(params *CacheServiceRowCacheKeysToSavePostParams) (*CacheServiceRowCacheKeysToSavePostOK, error) + + CacheServiceRowCacheSavePeriodGet(params *CacheServiceRowCacheSavePeriodGetParams) (*CacheServiceRowCacheSavePeriodGetOK, error) + + CacheServiceRowCacheSavePeriodPost(params *CacheServiceRowCacheSavePeriodPostParams) (*CacheServiceRowCacheSavePeriodPostOK, error) + + CacheServiceSaveCachesPost(params *CacheServiceSaveCachesPostParams) (*CacheServiceSaveCachesPostOK, error) + + CollectdByPluginidGet(params *CollectdByPluginidGetParams) (*CollectdByPluginidGetOK, error) + + CollectdByPluginidPost(params *CollectdByPluginidPostParams) (*CollectdByPluginidPostOK, error) + + CollectdGet(params *CollectdGetParams) (*CollectdGetOK, error) + + CollectdPost(params *CollectdPostParams) (*CollectdPostOK, error) + + ColumnFamilyAutocompactionByNameDelete(params *ColumnFamilyAutocompactionByNameDeleteParams) (*ColumnFamilyAutocompactionByNameDeleteOK, error) + + ColumnFamilyAutocompactionByNameGet(params *ColumnFamilyAutocompactionByNameGetParams) (*ColumnFamilyAutocompactionByNameGetOK, error) + + ColumnFamilyAutocompactionByNamePost(params *ColumnFamilyAutocompactionByNamePostParams) (*ColumnFamilyAutocompactionByNamePostOK, error) + + ColumnFamilyBuiltIndexesByNameGet(params *ColumnFamilyBuiltIndexesByNameGetParams) (*ColumnFamilyBuiltIndexesByNameGetOK, error) + + ColumnFamilyCompactionByNamePost(params *ColumnFamilyCompactionByNamePostParams) (*ColumnFamilyCompactionByNamePostOK, error) + + ColumnFamilyCompactionStrategyByNameGet(params *ColumnFamilyCompactionStrategyByNameGetParams) (*ColumnFamilyCompactionStrategyByNameGetOK, error) + + ColumnFamilyCompactionStrategyByNamePost(params *ColumnFamilyCompactionStrategyByNamePostParams) (*ColumnFamilyCompactionStrategyByNamePostOK, error) + + ColumnFamilyCompressionParametersByNameGet(params *ColumnFamilyCompressionParametersByNameGetParams) (*ColumnFamilyCompressionParametersByNameGetOK, error) + + ColumnFamilyCompressionParametersByNamePost(params *ColumnFamilyCompressionParametersByNamePostParams) (*ColumnFamilyCompressionParametersByNamePostOK, error) + + ColumnFamilyCrcCheckChanceByNamePost(params *ColumnFamilyCrcCheckChanceByNamePostParams) (*ColumnFamilyCrcCheckChanceByNamePostOK, error) + + ColumnFamilyDroppableRatioByNameGet(params *ColumnFamilyDroppableRatioByNameGetParams) (*ColumnFamilyDroppableRatioByNameGetOK, error) + + ColumnFamilyEstimateKeysByNameGet(params *ColumnFamilyEstimateKeysByNameGetParams) (*ColumnFamilyEstimateKeysByNameGetOK, error) + + ColumnFamilyGet(params *ColumnFamilyGetParams) (*ColumnFamilyGetOK, error) + + ColumnFamilyLoadSstableByNamePost(params *ColumnFamilyLoadSstableByNamePostParams) (*ColumnFamilyLoadSstableByNamePostOK, error) + + ColumnFamilyMajorCompactionByNamePost(params *ColumnFamilyMajorCompactionByNamePostParams) (*ColumnFamilyMajorCompactionByNamePostOK, error) + + ColumnFamilyMaximumCompactionByNameGet(params *ColumnFamilyMaximumCompactionByNameGetParams) (*ColumnFamilyMaximumCompactionByNameGetOK, error) + + ColumnFamilyMaximumCompactionByNamePost(params *ColumnFamilyMaximumCompactionByNamePostParams) (*ColumnFamilyMaximumCompactionByNamePostOK, error) + + ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGet(params *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) (*ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK, error) + + ColumnFamilyMetricsAllMemtablesLiveDataSizeGet(params *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams) (*ColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK, error) + + ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGet(params *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) (*ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK, error) + + ColumnFamilyMetricsAllMemtablesOffHeapSizeGet(params *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams) (*ColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK, error) + + ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGet(params *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) (*ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK, error) + + ColumnFamilyMetricsAllMemtablesOnHeapSizeGet(params *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams) (*ColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK, error) + + ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGet(params *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) (*ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK, error) + + ColumnFamilyMetricsBloomFilterDiskSpaceUsedGet(params *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams) (*ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK, error) + + ColumnFamilyMetricsBloomFilterFalsePositivesByNameGet(params *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) (*ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK, error) + + ColumnFamilyMetricsBloomFilterFalsePositivesGet(params *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams) (*ColumnFamilyMetricsBloomFilterFalsePositivesGetOK, error) + + ColumnFamilyMetricsBloomFilterFalseRatioByNameGet(params *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) (*ColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK, error) + + ColumnFamilyMetricsBloomFilterFalseRatioGet(params *ColumnFamilyMetricsBloomFilterFalseRatioGetParams) (*ColumnFamilyMetricsBloomFilterFalseRatioGetOK, error) + + ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGet(params *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) (*ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK, error) + + ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGet(params *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams) (*ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK, error) + + ColumnFamilyMetricsCasCommitByNameGet(params *ColumnFamilyMetricsCasCommitByNameGetParams) (*ColumnFamilyMetricsCasCommitByNameGetOK, error) + + ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGet(params *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK, error) + + ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK, error) + + ColumnFamilyMetricsCasPrepareByNameGet(params *ColumnFamilyMetricsCasPrepareByNameGetParams) (*ColumnFamilyMetricsCasPrepareByNameGetOK, error) + + ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGet(params *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK, error) + + ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK, error) + + ColumnFamilyMetricsCasProposeByNameGet(params *ColumnFamilyMetricsCasProposeByNameGetParams) (*ColumnFamilyMetricsCasProposeByNameGetOK, error) + + ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGet(params *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK, error) + + ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK, error) + + ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGet(params *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) (*ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK, error) + + ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGet(params *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) (*ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK, error) + + ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGet(params *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams) (*ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK, error) + + ColumnFamilyMetricsCompressionRatioByNameGet(params *ColumnFamilyMetricsCompressionRatioByNameGetParams) (*ColumnFamilyMetricsCompressionRatioByNameGetOK, error) + + ColumnFamilyMetricsCompressionRatioGet(params *ColumnFamilyMetricsCompressionRatioGetParams) (*ColumnFamilyMetricsCompressionRatioGetOK, error) + + ColumnFamilyMetricsCoordinatorReadGet(params *ColumnFamilyMetricsCoordinatorReadGetParams) (*ColumnFamilyMetricsCoordinatorReadGetOK, error) + + ColumnFamilyMetricsCoordinatorScanGet(params *ColumnFamilyMetricsCoordinatorScanGetParams) (*ColumnFamilyMetricsCoordinatorScanGetOK, error) + + ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGet(params *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) (*ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK, error) + + ColumnFamilyMetricsEstimatedRowCountByNameGet(params *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) (*ColumnFamilyMetricsEstimatedRowCountByNameGetOK, error) + + ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGet(params *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) (*ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK, error) + + ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGet(params *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) (*ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK, error) + + ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGet(params *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams) (*ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK, error) + + ColumnFamilyMetricsKeyCacheHitRateByNameGet(params *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) (*ColumnFamilyMetricsKeyCacheHitRateByNameGetOK, error) + + ColumnFamilyMetricsLiveDiskSpaceUsedByNameGet(params *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) (*ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK, error) + + ColumnFamilyMetricsLiveDiskSpaceUsedGet(params *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams) (*ColumnFamilyMetricsLiveDiskSpaceUsedGetOK, error) + + ColumnFamilyMetricsLiveScannedHistogramByNameGet(params *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) (*ColumnFamilyMetricsLiveScannedHistogramByNameGetOK, error) + + ColumnFamilyMetricsLiveSsTableCountByNameGet(params *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) (*ColumnFamilyMetricsLiveSsTableCountByNameGetOK, error) + + ColumnFamilyMetricsLiveSsTableCountGet(params *ColumnFamilyMetricsLiveSsTableCountGetParams) (*ColumnFamilyMetricsLiveSsTableCountGetOK, error) + + ColumnFamilyMetricsMaxRowSizeByNameGet(params *ColumnFamilyMetricsMaxRowSizeByNameGetParams) (*ColumnFamilyMetricsMaxRowSizeByNameGetOK, error) + + ColumnFamilyMetricsMaxRowSizeGet(params *ColumnFamilyMetricsMaxRowSizeGetParams) (*ColumnFamilyMetricsMaxRowSizeGetOK, error) + + ColumnFamilyMetricsMeanRowSizeByNameGet(params *ColumnFamilyMetricsMeanRowSizeByNameGetParams) (*ColumnFamilyMetricsMeanRowSizeByNameGetOK, error) + + ColumnFamilyMetricsMeanRowSizeGet(params *ColumnFamilyMetricsMeanRowSizeGetParams) (*ColumnFamilyMetricsMeanRowSizeGetOK, error) + + ColumnFamilyMetricsMemtableColumnsCountByNameGet(params *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) (*ColumnFamilyMetricsMemtableColumnsCountByNameGetOK, error) + + ColumnFamilyMetricsMemtableColumnsCountGet(params *ColumnFamilyMetricsMemtableColumnsCountGetParams) (*ColumnFamilyMetricsMemtableColumnsCountGetOK, error) + + ColumnFamilyMetricsMemtableLiveDataSizeByNameGet(params *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) (*ColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK, error) + + ColumnFamilyMetricsMemtableLiveDataSizeGet(params *ColumnFamilyMetricsMemtableLiveDataSizeGetParams) (*ColumnFamilyMetricsMemtableLiveDataSizeGetOK, error) + + ColumnFamilyMetricsMemtableOffHeapSizeByNameGet(params *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) (*ColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK, error) + + ColumnFamilyMetricsMemtableOffHeapSizeGet(params *ColumnFamilyMetricsMemtableOffHeapSizeGetParams) (*ColumnFamilyMetricsMemtableOffHeapSizeGetOK, error) + + ColumnFamilyMetricsMemtableOnHeapSizeByNameGet(params *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) (*ColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK, error) + + ColumnFamilyMetricsMemtableOnHeapSizeGet(params *ColumnFamilyMetricsMemtableOnHeapSizeGetParams) (*ColumnFamilyMetricsMemtableOnHeapSizeGetOK, error) + + ColumnFamilyMetricsMemtableSwitchCountByNameGet(params *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) (*ColumnFamilyMetricsMemtableSwitchCountByNameGetOK, error) + + ColumnFamilyMetricsMemtableSwitchCountGet(params *ColumnFamilyMetricsMemtableSwitchCountGetParams) (*ColumnFamilyMetricsMemtableSwitchCountGetOK, error) + + ColumnFamilyMetricsMinRowSizeByNameGet(params *ColumnFamilyMetricsMinRowSizeByNameGetParams) (*ColumnFamilyMetricsMinRowSizeByNameGetOK, error) + + ColumnFamilyMetricsMinRowSizeGet(params *ColumnFamilyMetricsMinRowSizeGetParams) (*ColumnFamilyMetricsMinRowSizeGetOK, error) + + ColumnFamilyMetricsPendingCompactionsByNameGet(params *ColumnFamilyMetricsPendingCompactionsByNameGetParams) (*ColumnFamilyMetricsPendingCompactionsByNameGetOK, error) + + ColumnFamilyMetricsPendingCompactionsGet(params *ColumnFamilyMetricsPendingCompactionsGetParams) (*ColumnFamilyMetricsPendingCompactionsGetOK, error) + + ColumnFamilyMetricsPendingFlushesByNameGet(params *ColumnFamilyMetricsPendingFlushesByNameGetParams) (*ColumnFamilyMetricsPendingFlushesByNameGetOK, error) + + ColumnFamilyMetricsPendingFlushesGet(params *ColumnFamilyMetricsPendingFlushesGetParams) (*ColumnFamilyMetricsPendingFlushesGetOK, error) + + ColumnFamilyMetricsRangeLatencyByNameGet(params *ColumnFamilyMetricsRangeLatencyByNameGetParams) (*ColumnFamilyMetricsRangeLatencyByNameGetOK, error) + + ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGet(params *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK, error) + + ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK, error) + + ColumnFamilyMetricsRangeLatencyGet(params *ColumnFamilyMetricsRangeLatencyGetParams) (*ColumnFamilyMetricsRangeLatencyGetOK, error) + + ColumnFamilyMetricsReadByNameGet(params *ColumnFamilyMetricsReadByNameGetParams) (*ColumnFamilyMetricsReadByNameGetOK, error) + + ColumnFamilyMetricsReadGet(params *ColumnFamilyMetricsReadGetParams) (*ColumnFamilyMetricsReadGetOK, error) + + ColumnFamilyMetricsReadLatencyByNameGet(params *ColumnFamilyMetricsReadLatencyByNameGetParams) (*ColumnFamilyMetricsReadLatencyByNameGetOK, error) + + ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGet(params *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK, error) + + ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK, error) + + ColumnFamilyMetricsReadLatencyGet(params *ColumnFamilyMetricsReadLatencyGetParams) (*ColumnFamilyMetricsReadLatencyGetOK, error) + + ColumnFamilyMetricsReadLatencyHistogramByNameGet(params *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) (*ColumnFamilyMetricsReadLatencyHistogramByNameGetOK, error) + + ColumnFamilyMetricsReadLatencyHistogramGet(params *ColumnFamilyMetricsReadLatencyHistogramGetParams) (*ColumnFamilyMetricsReadLatencyHistogramGetOK, error) + + ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGet(params *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) (*ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK, error) + + ColumnFamilyMetricsReadLatencyMovingAverageHistogramGet(params *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams) (*ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK, error) + + ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGet(params *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) (*ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK, error) + + ColumnFamilyMetricsRecentBloomFilterFalsePositivesGet(params *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams) (*ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK, error) + + ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGet(params *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) (*ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK, error) + + ColumnFamilyMetricsRecentBloomFilterFalseRatioGet(params *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams) (*ColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK, error) + + ColumnFamilyMetricsRowCacheHitByNameGet(params *ColumnFamilyMetricsRowCacheHitByNameGetParams) (*ColumnFamilyMetricsRowCacheHitByNameGetOK, error) + + ColumnFamilyMetricsRowCacheHitGet(params *ColumnFamilyMetricsRowCacheHitGetParams) (*ColumnFamilyMetricsRowCacheHitGetOK, error) + + ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGet(params *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) (*ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK, error) + + ColumnFamilyMetricsRowCacheHitOutOfRangeGet(params *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams) (*ColumnFamilyMetricsRowCacheHitOutOfRangeGetOK, error) + + ColumnFamilyMetricsRowCacheMissByNameGet(params *ColumnFamilyMetricsRowCacheMissByNameGetParams) (*ColumnFamilyMetricsRowCacheMissByNameGetOK, error) + + ColumnFamilyMetricsRowCacheMissGet(params *ColumnFamilyMetricsRowCacheMissGetParams) (*ColumnFamilyMetricsRowCacheMissGetOK, error) + + ColumnFamilyMetricsSnapshotsSizeByNameGet(params *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) (*ColumnFamilyMetricsSnapshotsSizeByNameGetOK, error) + + ColumnFamilyMetricsSpeculativeRetriesByNameGet(params *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) (*ColumnFamilyMetricsSpeculativeRetriesByNameGetOK, error) + + ColumnFamilyMetricsSpeculativeRetriesGet(params *ColumnFamilyMetricsSpeculativeRetriesGetParams) (*ColumnFamilyMetricsSpeculativeRetriesGetOK, error) + + ColumnFamilyMetricsSstablesPerReadHistogramByNameGet(params *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) (*ColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK, error) + + ColumnFamilyMetricsTombstoneScannedHistogramByNameGet(params *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) (*ColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK, error) + + ColumnFamilyMetricsTotalDiskSpaceUsedByNameGet(params *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) (*ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK, error) + + ColumnFamilyMetricsTotalDiskSpaceUsedGet(params *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams) (*ColumnFamilyMetricsTotalDiskSpaceUsedGetOK, error) + + ColumnFamilyMetricsTrueSnapshotsSizeGet(params *ColumnFamilyMetricsTrueSnapshotsSizeGetParams) (*ColumnFamilyMetricsTrueSnapshotsSizeGetOK, error) + + ColumnFamilyMetricsWaitingOnFreeMemtableGet(params *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams) (*ColumnFamilyMetricsWaitingOnFreeMemtableGetOK, error) + + ColumnFamilyMetricsWriteByNameGet(params *ColumnFamilyMetricsWriteByNameGetParams) (*ColumnFamilyMetricsWriteByNameGetOK, error) + + ColumnFamilyMetricsWriteGet(params *ColumnFamilyMetricsWriteGetParams) (*ColumnFamilyMetricsWriteGetOK, error) + + ColumnFamilyMetricsWriteLatencyByNameGet(params *ColumnFamilyMetricsWriteLatencyByNameGetParams) (*ColumnFamilyMetricsWriteLatencyByNameGetOK, error) + + ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGet(params *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK, error) + + ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK, error) + + ColumnFamilyMetricsWriteLatencyGet(params *ColumnFamilyMetricsWriteLatencyGetParams) (*ColumnFamilyMetricsWriteLatencyGetOK, error) + + ColumnFamilyMetricsWriteLatencyHistogramByNameGet(params *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) (*ColumnFamilyMetricsWriteLatencyHistogramByNameGetOK, error) + + ColumnFamilyMetricsWriteLatencyHistogramGet(params *ColumnFamilyMetricsWriteLatencyHistogramGetParams) (*ColumnFamilyMetricsWriteLatencyHistogramGetOK, error) + + ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGet(params *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) (*ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK, error) + + ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGet(params *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams) (*ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK, error) + + ColumnFamilyMinimumCompactionByNameGet(params *ColumnFamilyMinimumCompactionByNameGetParams) (*ColumnFamilyMinimumCompactionByNameGetOK, error) + + ColumnFamilyMinimumCompactionByNamePost(params *ColumnFamilyMinimumCompactionByNamePostParams) (*ColumnFamilyMinimumCompactionByNamePostOK, error) + + ColumnFamilyNameGet(params *ColumnFamilyNameGetParams) (*ColumnFamilyNameGetOK, error) + + ColumnFamilyNameKeyspaceGet(params *ColumnFamilyNameKeyspaceGetParams) (*ColumnFamilyNameKeyspaceGetOK, error) + + ColumnFamilySstablesByKeyByNameGet(params *ColumnFamilySstablesByKeyByNameGetParams) (*ColumnFamilySstablesByKeyByNameGetOK, error) + + ColumnFamilySstablesPerLevelByNameGet(params *ColumnFamilySstablesPerLevelByNameGetParams) (*ColumnFamilySstablesPerLevelByNameGetOK, error) + + ColumnFamilySstablesUnleveledByNameGet(params *ColumnFamilySstablesUnleveledByNameGetParams) (*ColumnFamilySstablesUnleveledByNameGetOK, error) + + CommitLogMetricsWaitingOnCommitGet(params *CommitLogMetricsWaitingOnCommitGetParams) (*CommitLogMetricsWaitingOnCommitGetOK, error) + + CommitLogMetricsWaitingOnSegmentAllocationGet(params *CommitLogMetricsWaitingOnSegmentAllocationGetParams) (*CommitLogMetricsWaitingOnSegmentAllocationGetOK, error) + + CommitlogMetricsCompletedTasksGet(params *CommitlogMetricsCompletedTasksGetParams) (*CommitlogMetricsCompletedTasksGetOK, error) + + CommitlogMetricsPendingTasksGet(params *CommitlogMetricsPendingTasksGetParams) (*CommitlogMetricsPendingTasksGetOK, error) + + CommitlogMetricsTotalCommitLogSizeGet(params *CommitlogMetricsTotalCommitLogSizeGetParams) (*CommitlogMetricsTotalCommitLogSizeGetOK, error) + + CommitlogRecoverByPathPost(params *CommitlogRecoverByPathPostParams) (*CommitlogRecoverByPathPostOK, error) + + CommitlogSegmentsActiveGet(params *CommitlogSegmentsActiveGetParams) (*CommitlogSegmentsActiveGetOK, error) + + CommitlogSegmentsArchivingGet(params *CommitlogSegmentsArchivingGetParams) (*CommitlogSegmentsArchivingGetOK, error) + + CompactionManagerCompactionHistoryGet(params *CompactionManagerCompactionHistoryGetParams) (*CompactionManagerCompactionHistoryGetOK, error) + + CompactionManagerCompactionInfoGet(params *CompactionManagerCompactionInfoGetParams) (*CompactionManagerCompactionInfoGetOK, error) + + CompactionManagerCompactionsGet(params *CompactionManagerCompactionsGetParams) (*CompactionManagerCompactionsGetOK, error) + + CompactionManagerForceUserDefinedCompactionPost(params *CompactionManagerForceUserDefinedCompactionPostParams) (*CompactionManagerForceUserDefinedCompactionPostOK, error) + + CompactionManagerMetricsBytesCompactedGet(params *CompactionManagerMetricsBytesCompactedGetParams) (*CompactionManagerMetricsBytesCompactedGetOK, error) + + CompactionManagerMetricsCompletedTasksGet(params *CompactionManagerMetricsCompletedTasksGetParams) (*CompactionManagerMetricsCompletedTasksGetOK, error) + + CompactionManagerMetricsPendingTasksGet(params *CompactionManagerMetricsPendingTasksGetParams) (*CompactionManagerMetricsPendingTasksGetOK, error) + + CompactionManagerMetricsTotalCompactionsCompletedGet(params *CompactionManagerMetricsTotalCompactionsCompletedGetParams) (*CompactionManagerMetricsTotalCompactionsCompletedGetOK, error) + + CompactionManagerStopCompactionPost(params *CompactionManagerStopCompactionPostParams) (*CompactionManagerStopCompactionPostOK, error) + + FailureDetectorCountEndpointDownGet(params *FailureDetectorCountEndpointDownGetParams) (*FailureDetectorCountEndpointDownGetOK, error) + + FailureDetectorCountEndpointUpGet(params *FailureDetectorCountEndpointUpGetParams) (*FailureDetectorCountEndpointUpGetOK, error) + + FailureDetectorEndpointPhiValuesGet(params *FailureDetectorEndpointPhiValuesGetParams) (*FailureDetectorEndpointPhiValuesGetOK, error) + + FailureDetectorEndpointsGet(params *FailureDetectorEndpointsGetParams) (*FailureDetectorEndpointsGetOK, error) + + FailureDetectorEndpointsStatesByAddrGet(params *FailureDetectorEndpointsStatesByAddrGetParams) (*FailureDetectorEndpointsStatesByAddrGetOK, error) + + FailureDetectorPhiGet(params *FailureDetectorPhiGetParams) (*FailureDetectorPhiGetOK, error) + + FailureDetectorPhiPost(params *FailureDetectorPhiPostParams) (*FailureDetectorPhiPostOK, error) + + FailureDetectorSimpleStatesGet(params *FailureDetectorSimpleStatesGetParams) (*FailureDetectorSimpleStatesGetOK, error) + + GossiperAssassinateByAddrPost(params *GossiperAssassinateByAddrPostParams) (*GossiperAssassinateByAddrPostOK, error) + + GossiperDowntimeByAddrGet(params *GossiperDowntimeByAddrGetParams) (*GossiperDowntimeByAddrGetOK, error) + + GossiperEndpointDownGet(params *GossiperEndpointDownGetParams) (*GossiperEndpointDownGetOK, error) + + GossiperEndpointLiveGet(params *GossiperEndpointLiveGetParams) (*GossiperEndpointLiveGetOK, error) + + GossiperGenerationNumberByAddrGet(params *GossiperGenerationNumberByAddrGetParams) (*GossiperGenerationNumberByAddrGetOK, error) + + GossiperHeartBeatVersionByAddrGet(params *GossiperHeartBeatVersionByAddrGetParams) (*GossiperHeartBeatVersionByAddrGetOK, error) + + HintedHandoffHintsDelete(params *HintedHandoffHintsDeleteParams) (*HintedHandoffHintsDeleteOK, error) + + HintedHandoffHintsGet(params *HintedHandoffHintsGetParams) (*HintedHandoffHintsGetOK, error) + + HintedHandoffMetricsCreateHintByAddrGet(params *HintedHandoffMetricsCreateHintByAddrGetParams) (*HintedHandoffMetricsCreateHintByAddrGetOK, error) + + HintedHandoffMetricsNotStoredHintsByAddrGet(params *HintedHandoffMetricsNotStoredHintsByAddrGetParams) (*HintedHandoffMetricsNotStoredHintsByAddrGetOK, error) + + HintedHandoffPausePost(params *HintedHandoffPausePostParams) (*HintedHandoffPausePostOK, error) + + HintedHandoffSchedulePost(params *HintedHandoffSchedulePostParams) (*HintedHandoffSchedulePostOK, error) + + LsaCompactPost(params *LsaCompactPostParams) (*LsaCompactPostOK, error) + + MessagingServiceMessagesDroppedByVerGet(params *MessagingServiceMessagesDroppedByVerGetParams) (*MessagingServiceMessagesDroppedByVerGetOK, error) + + MessagingServiceMessagesDroppedGet(params *MessagingServiceMessagesDroppedGetParams) (*MessagingServiceMessagesDroppedGetOK, error) + + MessagingServiceMessagesExceptionGet(params *MessagingServiceMessagesExceptionGetParams) (*MessagingServiceMessagesExceptionGetOK, error) + + MessagingServiceMessagesPendingGet(params *MessagingServiceMessagesPendingGetParams) (*MessagingServiceMessagesPendingGetOK, error) + + MessagingServiceMessagesRepliedGet(params *MessagingServiceMessagesRepliedGetParams) (*MessagingServiceMessagesRepliedGetOK, error) + + MessagingServiceMessagesRespondCompletedGet(params *MessagingServiceMessagesRespondCompletedGetParams) (*MessagingServiceMessagesRespondCompletedGetOK, error) + + MessagingServiceMessagesRespondPendingGet(params *MessagingServiceMessagesRespondPendingGetParams) (*MessagingServiceMessagesRespondPendingGetOK, error) + + MessagingServiceMessagesSentGet(params *MessagingServiceMessagesSentGetParams) (*MessagingServiceMessagesSentGetOK, error) + + MessagingServiceMessagesTimeoutGet(params *MessagingServiceMessagesTimeoutGetParams) (*MessagingServiceMessagesTimeoutGetOK, error) + + MessagingServiceVersionGet(params *MessagingServiceVersionGetParams) (*MessagingServiceVersionGetOK, error) + + SnitchDatacenterGet(params *SnitchDatacenterGetParams) (*SnitchDatacenterGetOK, error) + + SnitchNameGet(params *SnitchNameGetParams) (*SnitchNameGetOK, error) + + SnitchRackGet(params *SnitchRackGetParams) (*SnitchRackGetOK, error) + + StorageProxyCasContentionTimeoutGet(params *StorageProxyCasContentionTimeoutGetParams) (*StorageProxyCasContentionTimeoutGetOK, error) + + StorageProxyCasContentionTimeoutPost(params *StorageProxyCasContentionTimeoutPostParams) (*StorageProxyCasContentionTimeoutPostOK, error) + + StorageProxyCounterWriteRPCTimeoutGet(params *StorageProxyCounterWriteRPCTimeoutGetParams) (*StorageProxyCounterWriteRPCTimeoutGetOK, error) + + StorageProxyCounterWriteRPCTimeoutPost(params *StorageProxyCounterWriteRPCTimeoutPostParams) (*StorageProxyCounterWriteRPCTimeoutPostOK, error) + + StorageProxyHintedHandoffEnabledByDcGet(params *StorageProxyHintedHandoffEnabledByDcGetParams) (*StorageProxyHintedHandoffEnabledByDcGetOK, error) + + StorageProxyHintedHandoffEnabledByDcPost(params *StorageProxyHintedHandoffEnabledByDcPostParams) (*StorageProxyHintedHandoffEnabledByDcPostOK, error) + + StorageProxyHintedHandoffEnabledGet(params *StorageProxyHintedHandoffEnabledGetParams) (*StorageProxyHintedHandoffEnabledGetOK, error) + + StorageProxyHintedHandoffEnabledPost(params *StorageProxyHintedHandoffEnabledPostParams) (*StorageProxyHintedHandoffEnabledPostOK, error) + + StorageProxyHintsInProgressGet(params *StorageProxyHintsInProgressGetParams) (*StorageProxyHintsInProgressGetOK, error) + + StorageProxyMaxHintWindowGet(params *StorageProxyMaxHintWindowGetParams) (*StorageProxyMaxHintWindowGetOK, error) + + StorageProxyMaxHintWindowPost(params *StorageProxyMaxHintWindowPostParams) (*StorageProxyMaxHintWindowPostOK, error) + + StorageProxyMaxHintsInProgressGet(params *StorageProxyMaxHintsInProgressGetParams) (*StorageProxyMaxHintsInProgressGetOK, error) + + StorageProxyMaxHintsInProgressPost(params *StorageProxyMaxHintsInProgressPostParams) (*StorageProxyMaxHintsInProgressPostOK, error) + + StorageProxyMetricsCasReadConditionNotMetGet(params *StorageProxyMetricsCasReadConditionNotMetGetParams) (*StorageProxyMetricsCasReadConditionNotMetGetOK, error) + + StorageProxyMetricsCasReadContentionGet(params *StorageProxyMetricsCasReadContentionGetParams) (*StorageProxyMetricsCasReadContentionGetOK, error) + + StorageProxyMetricsCasReadTimeoutsGet(params *StorageProxyMetricsCasReadTimeoutsGetParams) (*StorageProxyMetricsCasReadTimeoutsGetOK, error) + + StorageProxyMetricsCasReadUnavailablesGet(params *StorageProxyMetricsCasReadUnavailablesGetParams) (*StorageProxyMetricsCasReadUnavailablesGetOK, error) + + StorageProxyMetricsCasReadUnfinishedCommitGet(params *StorageProxyMetricsCasReadUnfinishedCommitGetParams) (*StorageProxyMetricsCasReadUnfinishedCommitGetOK, error) + + StorageProxyMetricsCasWriteConditionNotMetGet(params *StorageProxyMetricsCasWriteConditionNotMetGetParams) (*StorageProxyMetricsCasWriteConditionNotMetGetOK, error) + + StorageProxyMetricsCasWriteContentionGet(params *StorageProxyMetricsCasWriteContentionGetParams) (*StorageProxyMetricsCasWriteContentionGetOK, error) + + StorageProxyMetricsCasWriteTimeoutsGet(params *StorageProxyMetricsCasWriteTimeoutsGetParams) (*StorageProxyMetricsCasWriteTimeoutsGetOK, error) + + StorageProxyMetricsCasWriteUnavailablesGet(params *StorageProxyMetricsCasWriteUnavailablesGetParams) (*StorageProxyMetricsCasWriteUnavailablesGetOK, error) + + StorageProxyMetricsCasWriteUnfinishedCommitGet(params *StorageProxyMetricsCasWriteUnfinishedCommitGetParams) (*StorageProxyMetricsCasWriteUnfinishedCommitGetOK, error) + + StorageProxyMetricsRangeEstimatedHistogramGet(params *StorageProxyMetricsRangeEstimatedHistogramGetParams) (*StorageProxyMetricsRangeEstimatedHistogramGetOK, error) + + StorageProxyMetricsRangeGet(params *StorageProxyMetricsRangeGetParams) (*StorageProxyMetricsRangeGetOK, error) + + StorageProxyMetricsRangeHistogramGet(params *StorageProxyMetricsRangeHistogramGetParams) (*StorageProxyMetricsRangeHistogramGetOK, error) + + StorageProxyMetricsRangeMovingAverageHistogramGet(params *StorageProxyMetricsRangeMovingAverageHistogramGetParams) (*StorageProxyMetricsRangeMovingAverageHistogramGetOK, error) + + StorageProxyMetricsRangeTimeoutsGet(params *StorageProxyMetricsRangeTimeoutsGetParams) (*StorageProxyMetricsRangeTimeoutsGetOK, error) + + StorageProxyMetricsRangeTimeoutsRatesGet(params *StorageProxyMetricsRangeTimeoutsRatesGetParams) (*StorageProxyMetricsRangeTimeoutsRatesGetOK, error) + + StorageProxyMetricsRangeUnavailablesGet(params *StorageProxyMetricsRangeUnavailablesGetParams) (*StorageProxyMetricsRangeUnavailablesGetOK, error) + + StorageProxyMetricsRangeUnavailablesRatesGet(params *StorageProxyMetricsRangeUnavailablesRatesGetParams) (*StorageProxyMetricsRangeUnavailablesRatesGetOK, error) + + StorageProxyMetricsReadEstimatedHistogramGet(params *StorageProxyMetricsReadEstimatedHistogramGetParams) (*StorageProxyMetricsReadEstimatedHistogramGetOK, error) + + StorageProxyMetricsReadGet(params *StorageProxyMetricsReadGetParams) (*StorageProxyMetricsReadGetOK, error) + + StorageProxyMetricsReadHistogramGet(params *StorageProxyMetricsReadHistogramGetParams) (*StorageProxyMetricsReadHistogramGetOK, error) + + StorageProxyMetricsReadMovingAverageHistogramGet(params *StorageProxyMetricsReadMovingAverageHistogramGetParams) (*StorageProxyMetricsReadMovingAverageHistogramGetOK, error) + + StorageProxyMetricsReadTimeoutsGet(params *StorageProxyMetricsReadTimeoutsGetParams) (*StorageProxyMetricsReadTimeoutsGetOK, error) + + StorageProxyMetricsReadTimeoutsRatesGet(params *StorageProxyMetricsReadTimeoutsRatesGetParams) (*StorageProxyMetricsReadTimeoutsRatesGetOK, error) + + StorageProxyMetricsReadUnavailablesGet(params *StorageProxyMetricsReadUnavailablesGetParams) (*StorageProxyMetricsReadUnavailablesGetOK, error) + + StorageProxyMetricsReadUnavailablesRatesGet(params *StorageProxyMetricsReadUnavailablesRatesGetParams) (*StorageProxyMetricsReadUnavailablesRatesGetOK, error) + + StorageProxyMetricsWriteEstimatedHistogramGet(params *StorageProxyMetricsWriteEstimatedHistogramGetParams) (*StorageProxyMetricsWriteEstimatedHistogramGetOK, error) + + StorageProxyMetricsWriteGet(params *StorageProxyMetricsWriteGetParams) (*StorageProxyMetricsWriteGetOK, error) + + StorageProxyMetricsWriteHistogramGet(params *StorageProxyMetricsWriteHistogramGetParams) (*StorageProxyMetricsWriteHistogramGetOK, error) + + StorageProxyMetricsWriteMovingAverageHistogramGet(params *StorageProxyMetricsWriteMovingAverageHistogramGetParams) (*StorageProxyMetricsWriteMovingAverageHistogramGetOK, error) + + StorageProxyMetricsWriteTimeoutsGet(params *StorageProxyMetricsWriteTimeoutsGetParams) (*StorageProxyMetricsWriteTimeoutsGetOK, error) + + StorageProxyMetricsWriteTimeoutsRatesGet(params *StorageProxyMetricsWriteTimeoutsRatesGetParams) (*StorageProxyMetricsWriteTimeoutsRatesGetOK, error) + + StorageProxyMetricsWriteUnavailablesGet(params *StorageProxyMetricsWriteUnavailablesGetParams) (*StorageProxyMetricsWriteUnavailablesGetOK, error) + + StorageProxyMetricsWriteUnavailablesRatesGet(params *StorageProxyMetricsWriteUnavailablesRatesGetParams) (*StorageProxyMetricsWriteUnavailablesRatesGetOK, error) + + StorageProxyRangeRPCTimeoutGet(params *StorageProxyRangeRPCTimeoutGetParams) (*StorageProxyRangeRPCTimeoutGetOK, error) + + StorageProxyRangeRPCTimeoutPost(params *StorageProxyRangeRPCTimeoutPostParams) (*StorageProxyRangeRPCTimeoutPostOK, error) + + StorageProxyReadRepairAttemptedGet(params *StorageProxyReadRepairAttemptedGetParams) (*StorageProxyReadRepairAttemptedGetOK, error) + + StorageProxyReadRepairRepairedBackgroundGet(params *StorageProxyReadRepairRepairedBackgroundGetParams) (*StorageProxyReadRepairRepairedBackgroundGetOK, error) + + StorageProxyReadRepairRepairedBlockingGet(params *StorageProxyReadRepairRepairedBlockingGetParams) (*StorageProxyReadRepairRepairedBlockingGetOK, error) + + StorageProxyReadRPCTimeoutGet(params *StorageProxyReadRPCTimeoutGetParams) (*StorageProxyReadRPCTimeoutGetOK, error) + + StorageProxyReadRPCTimeoutPost(params *StorageProxyReadRPCTimeoutPostParams) (*StorageProxyReadRPCTimeoutPostOK, error) + + StorageProxyReloadTriggerClassesPost(params *StorageProxyReloadTriggerClassesPostParams) (*StorageProxyReloadTriggerClassesPostOK, error) + + StorageProxyRPCTimeoutGet(params *StorageProxyRPCTimeoutGetParams) (*StorageProxyRPCTimeoutGetOK, error) + + StorageProxyRPCTimeoutPost(params *StorageProxyRPCTimeoutPostParams) (*StorageProxyRPCTimeoutPostOK, error) + + StorageProxySchemaVersionsGet(params *StorageProxySchemaVersionsGetParams) (*StorageProxySchemaVersionsGetOK, error) + + StorageProxyTotalHintsGet(params *StorageProxyTotalHintsGetParams) (*StorageProxyTotalHintsGetOK, error) + + StorageProxyTruncateRPCTimeoutGet(params *StorageProxyTruncateRPCTimeoutGetParams) (*StorageProxyTruncateRPCTimeoutGetOK, error) + + StorageProxyTruncateRPCTimeoutPost(params *StorageProxyTruncateRPCTimeoutPostParams) (*StorageProxyTruncateRPCTimeoutPostOK, error) + + StorageProxyWriteRPCTimeoutGet(params *StorageProxyWriteRPCTimeoutGetParams) (*StorageProxyWriteRPCTimeoutGetOK, error) + + StorageProxyWriteRPCTimeoutPost(params *StorageProxyWriteRPCTimeoutPostParams) (*StorageProxyWriteRPCTimeoutPostOK, error) + + StorageServiceActiveRepairGet(params *StorageServiceActiveRepairGetParams) (*StorageServiceActiveRepairGetOK, error) + + StorageServiceAutoCompactionByKeyspaceDelete(params *StorageServiceAutoCompactionByKeyspaceDeleteParams) (*StorageServiceAutoCompactionByKeyspaceDeleteOK, error) + + StorageServiceAutoCompactionByKeyspacePost(params *StorageServiceAutoCompactionByKeyspacePostParams) (*StorageServiceAutoCompactionByKeyspacePostOK, error) + + StorageServiceBatchSizeFailureThresholdGet(params *StorageServiceBatchSizeFailureThresholdGetParams) (*StorageServiceBatchSizeFailureThresholdGetOK, error) + + StorageServiceBatchSizeFailureThresholdPost(params *StorageServiceBatchSizeFailureThresholdPostParams) (*StorageServiceBatchSizeFailureThresholdPostOK, error) + + StorageServiceBulkLoadAsyncByPathGet(params *StorageServiceBulkLoadAsyncByPathGetParams) (*StorageServiceBulkLoadAsyncByPathGetOK, error) + + StorageServiceBulkLoadByPathPost(params *StorageServiceBulkLoadByPathPostParams) (*StorageServiceBulkLoadByPathPostOK, error) + + StorageServiceClusterNameGet(params *StorageServiceClusterNameGetParams) (*StorageServiceClusterNameGetOK, error) + + StorageServiceCommitlogGet(params *StorageServiceCommitlogGetParams) (*StorageServiceCommitlogGetOK, error) + + StorageServiceCompactionThroughputGet(params *StorageServiceCompactionThroughputGetParams) (*StorageServiceCompactionThroughputGetOK, error) + + StorageServiceCompactionThroughputPost(params *StorageServiceCompactionThroughputPostParams) (*StorageServiceCompactionThroughputPostOK, error) + + StorageServiceDataFileLocationsGet(params *StorageServiceDataFileLocationsGetParams) (*StorageServiceDataFileLocationsGetOK, error) + + StorageServiceDecommissionPost(params *StorageServiceDecommissionPostParams) (*StorageServiceDecommissionPostOK, error) + + StorageServiceDeliverHintsPost(params *StorageServiceDeliverHintsPostParams) (*StorageServiceDeliverHintsPostOK, error) + + StorageServiceDescribeRingByKeyspaceGet(params *StorageServiceDescribeRingByKeyspaceGetParams) (*StorageServiceDescribeRingByKeyspaceGetOK, error) + + StorageServiceDrainGet(params *StorageServiceDrainGetParams) (*StorageServiceDrainGetOK, error) + + StorageServiceDrainPost(params *StorageServiceDrainPostParams) (*StorageServiceDrainPostOK, error) + + StorageServiceForceRemoveCompletionPost(params *StorageServiceForceRemoveCompletionPostParams) (*StorageServiceForceRemoveCompletionPostOK, error) + + StorageServiceForceTerminatePost(params *StorageServiceForceTerminatePostParams) (*StorageServiceForceTerminatePostOK, error) + + StorageServiceForceTerminateRepairPost(params *StorageServiceForceTerminateRepairPostParams) (*StorageServiceForceTerminateRepairPostOK, error) + + StorageServiceGenerationNumberGet(params *StorageServiceGenerationNumberGetParams) (*StorageServiceGenerationNumberGetOK, error) + + StorageServiceGossipingDelete(params *StorageServiceGossipingDeleteParams) (*StorageServiceGossipingDeleteOK, error) + + StorageServiceGossipingGet(params *StorageServiceGossipingGetParams) (*StorageServiceGossipingGetOK, error) + + StorageServiceGossipingPost(params *StorageServiceGossipingPostParams) (*StorageServiceGossipingPostOK, error) + + StorageServiceHintedHandoffPost(params *StorageServiceHintedHandoffPostParams) (*StorageServiceHintedHandoffPostOK, error) + + StorageServiceHostIDGet(params *StorageServiceHostIDGetParams) (*StorageServiceHostIDGetOK, error) + + StorageServiceHostidLocalGet(params *StorageServiceHostidLocalGetParams) (*StorageServiceHostidLocalGetOK, error) + + StorageServiceIncrementalBackupsGet(params *StorageServiceIncrementalBackupsGetParams) (*StorageServiceIncrementalBackupsGetOK, error) + + StorageServiceIncrementalBackupsPost(params *StorageServiceIncrementalBackupsPostParams) (*StorageServiceIncrementalBackupsPostOK, error) + + StorageServiceIsInitializedGet(params *StorageServiceIsInitializedGetParams) (*StorageServiceIsInitializedGetOK, error) + + StorageServiceIsStartingGet(params *StorageServiceIsStartingGetParams) (*StorageServiceIsStartingGetOK, error) + + StorageServiceJoinRingGet(params *StorageServiceJoinRingGetParams) (*StorageServiceJoinRingGetOK, error) + + StorageServiceJoinRingPost(params *StorageServiceJoinRingPostParams) (*StorageServiceJoinRingPostOK, error) + + StorageServiceKeyspaceCleanupByKeyspacePost(params *StorageServiceKeyspaceCleanupByKeyspacePostParams) (*StorageServiceKeyspaceCleanupByKeyspacePostOK, error) + + StorageServiceKeyspaceCompactionByKeyspacePost(params *StorageServiceKeyspaceCompactionByKeyspacePostParams) (*StorageServiceKeyspaceCompactionByKeyspacePostOK, error) + + StorageServiceKeyspaceFlushByKeyspacePost(params *StorageServiceKeyspaceFlushByKeyspacePostParams) (*StorageServiceKeyspaceFlushByKeyspacePostOK, error) + + StorageServiceKeyspaceScrubByKeyspaceGet(params *StorageServiceKeyspaceScrubByKeyspaceGetParams) (*StorageServiceKeyspaceScrubByKeyspaceGetOK, error) + + StorageServiceKeyspaceUpgradeSstablesByKeyspaceGet(params *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) (*StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK, error) + + StorageServiceKeyspacesGet(params *StorageServiceKeyspacesGetParams) (*StorageServiceKeyspacesGetOK, error) + + StorageServiceLoadGet(params *StorageServiceLoadGetParams) (*StorageServiceLoadGetOK, error) + + StorageServiceLoadMapGet(params *StorageServiceLoadMapGetParams) (*StorageServiceLoadMapGetOK, error) + + StorageServiceLoggingLevelGet(params *StorageServiceLoggingLevelGetParams) (*StorageServiceLoggingLevelGetOK, error) + + StorageServiceLoggingLevelPost(params *StorageServiceLoggingLevelPostParams) (*StorageServiceLoggingLevelPostOK, error) + + StorageServiceMetricsExceptionsGet(params *StorageServiceMetricsExceptionsGetParams) (*StorageServiceMetricsExceptionsGetOK, error) + + StorageServiceMetricsHintsInProgressGet(params *StorageServiceMetricsHintsInProgressGetParams) (*StorageServiceMetricsHintsInProgressGetOK, error) + + StorageServiceMetricsLoadGet(params *StorageServiceMetricsLoadGetParams) (*StorageServiceMetricsLoadGetOK, error) + + StorageServiceMetricsTotalHintsGet(params *StorageServiceMetricsTotalHintsGetParams) (*StorageServiceMetricsTotalHintsGetOK, error) + + StorageServiceMovePost(params *StorageServiceMovePostParams) (*StorageServiceMovePostOK, error) + + StorageServiceNativeTransportDelete(params *StorageServiceNativeTransportDeleteParams) (*StorageServiceNativeTransportDeleteOK, error) + + StorageServiceNativeTransportGet(params *StorageServiceNativeTransportGetParams) (*StorageServiceNativeTransportGetOK, error) + + StorageServiceNativeTransportPost(params *StorageServiceNativeTransportPostParams) (*StorageServiceNativeTransportPostOK, error) + + StorageServiceNaturalEndpointsByKeyspaceGet(params *StorageServiceNaturalEndpointsByKeyspaceGetParams) (*StorageServiceNaturalEndpointsByKeyspaceGetOK, error) + + StorageServiceNodesJoiningGet(params *StorageServiceNodesJoiningGetParams) (*StorageServiceNodesJoiningGetOK, error) + + StorageServiceNodesLeavingGet(params *StorageServiceNodesLeavingGetParams) (*StorageServiceNodesLeavingGetOK, error) + + StorageServiceNodesMovingGet(params *StorageServiceNodesMovingGetParams) (*StorageServiceNodesMovingGetOK, error) + + StorageServiceOperationModeGet(params *StorageServiceOperationModeGetParams) (*StorageServiceOperationModeGetOK, error) + + StorageServiceOwnershipByKeyspaceGet(params *StorageServiceOwnershipByKeyspaceGetParams) (*StorageServiceOwnershipByKeyspaceGetOK, error) + + StorageServiceOwnershipGet(params *StorageServiceOwnershipGetParams) (*StorageServiceOwnershipGetOK, error) + + StorageServicePartitionerNameGet(params *StorageServicePartitionerNameGetParams) (*StorageServicePartitionerNameGetOK, error) + + StorageServicePendingRangeByKeyspaceGet(params *StorageServicePendingRangeByKeyspaceGetParams) (*StorageServicePendingRangeByKeyspaceGetOK, error) + + StorageServiceRangeToEndpointMapByKeyspaceGet(params *StorageServiceRangeToEndpointMapByKeyspaceGetParams) (*StorageServiceRangeToEndpointMapByKeyspaceGetOK, error) + + StorageServiceRebuildPost(params *StorageServiceRebuildPostParams) (*StorageServiceRebuildPostOK, error) + + StorageServiceReleaseVersionGet(params *StorageServiceReleaseVersionGetParams) (*StorageServiceReleaseVersionGetOK, error) + + StorageServiceRelocalSchemaPost(params *StorageServiceRelocalSchemaPostParams) (*StorageServiceRelocalSchemaPostOK, error) + + StorageServiceRemovalStatusGet(params *StorageServiceRemovalStatusGetParams) (*StorageServiceRemovalStatusGetOK, error) + + StorageServiceRemoveNodePost(params *StorageServiceRemoveNodePostParams) (*StorageServiceRemoveNodePostOK, error) + + StorageServiceRepairAsyncByKeyspaceGet(params *StorageServiceRepairAsyncByKeyspaceGetParams) (*StorageServiceRepairAsyncByKeyspaceGetOK, error) + + StorageServiceRepairAsyncByKeyspacePost(params *StorageServiceRepairAsyncByKeyspacePostParams) (*StorageServiceRepairAsyncByKeyspacePostOK, error) + + StorageServiceRepairStatus(params *StorageServiceRepairStatusParams) (*StorageServiceRepairStatusOK, error) + + StorageServiceRescheduleFailedDeletionsPost(params *StorageServiceRescheduleFailedDeletionsPostParams) (*StorageServiceRescheduleFailedDeletionsPostOK, error) + + StorageServiceRPCServerDelete(params *StorageServiceRPCServerDeleteParams) (*StorageServiceRPCServerDeleteOK, error) + + StorageServiceRPCServerGet(params *StorageServiceRPCServerGetParams) (*StorageServiceRPCServerGetOK, error) + + StorageServiceRPCServerPost(params *StorageServiceRPCServerPostParams) (*StorageServiceRPCServerPostOK, error) + + StorageServiceSampleKeyRangeGet(params *StorageServiceSampleKeyRangeGetParams) (*StorageServiceSampleKeyRangeGetOK, error) + + StorageServiceSavedCachesLocationGet(params *StorageServiceSavedCachesLocationGetParams) (*StorageServiceSavedCachesLocationGetOK, error) + + StorageServiceSchemaVersionGet(params *StorageServiceSchemaVersionGetParams) (*StorageServiceSchemaVersionGetOK, error) + + StorageServiceScyllaReleaseVersionGet(params *StorageServiceScyllaReleaseVersionGetParams) (*StorageServiceScyllaReleaseVersionGetOK, error) + + StorageServiceSlowQueryGet(params *StorageServiceSlowQueryGetParams) (*StorageServiceSlowQueryGetOK, error) + + StorageServiceSlowQueryPost(params *StorageServiceSlowQueryPostParams) (*StorageServiceSlowQueryPostOK, error) + + StorageServiceSnapshotsDelete(params *StorageServiceSnapshotsDeleteParams) (*StorageServiceSnapshotsDeleteOK, error) + + StorageServiceSnapshotsGet(params *StorageServiceSnapshotsGetParams) (*StorageServiceSnapshotsGetOK, error) + + StorageServiceSnapshotsPost(params *StorageServiceSnapshotsPostParams) (*StorageServiceSnapshotsPostOK, error) + + StorageServiceSnapshotsSizeTrueGet(params *StorageServiceSnapshotsSizeTrueGetParams) (*StorageServiceSnapshotsSizeTrueGetOK, error) + + StorageServiceSstablesByKeyspacePost(params *StorageServiceSstablesByKeyspacePostParams) (*StorageServiceSstablesByKeyspacePostOK, error) + + StorageServiceStopDaemonPost(params *StorageServiceStopDaemonPostParams) (*StorageServiceStopDaemonPostOK, error) + + StorageServiceStreamThroughputGet(params *StorageServiceStreamThroughputGetParams) (*StorageServiceStreamThroughputGetOK, error) + + StorageServiceStreamThroughputPost(params *StorageServiceStreamThroughputPostParams) (*StorageServiceStreamThroughputPostOK, error) + + StorageServiceTokensByEndpointGet(params *StorageServiceTokensByEndpointGetParams) (*StorageServiceTokensByEndpointGetOK, error) + + StorageServiceTokensEndpointGet(params *StorageServiceTokensEndpointGetParams) (*StorageServiceTokensEndpointGetOK, error) + + StorageServiceTokensGet(params *StorageServiceTokensGetParams) (*StorageServiceTokensGetOK, error) + + StorageServiceTombstoneFailureThresholdGet(params *StorageServiceTombstoneFailureThresholdGetParams) (*StorageServiceTombstoneFailureThresholdGetOK, error) + + StorageServiceTombstoneFailureThresholdPost(params *StorageServiceTombstoneFailureThresholdPostParams) (*StorageServiceTombstoneFailureThresholdPostOK, error) + + StorageServiceTombstoneWarnThresholdGet(params *StorageServiceTombstoneWarnThresholdGetParams) (*StorageServiceTombstoneWarnThresholdGetOK, error) + + StorageServiceTombstoneWarnThresholdPost(params *StorageServiceTombstoneWarnThresholdPostParams) (*StorageServiceTombstoneWarnThresholdPostOK, error) + + StorageServiceTraceProbabilityGet(params *StorageServiceTraceProbabilityGetParams) (*StorageServiceTraceProbabilityGetOK, error) + + StorageServiceTraceProbabilityPost(params *StorageServiceTraceProbabilityPostParams) (*StorageServiceTraceProbabilityPostOK, error) + + StorageServiceTruncateByKeyspacePost(params *StorageServiceTruncateByKeyspacePostParams) (*StorageServiceTruncateByKeyspacePostOK, error) + + StorageServiceUpdateSnitchPost(params *StorageServiceUpdateSnitchPostParams) (*StorageServiceUpdateSnitchPostOK, error) + + StorageServiceViewBuildStatusesByKeyspaceAndViewGet(params *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) (*StorageServiceViewBuildStatusesByKeyspaceAndViewGetOK, error) + + StreamManagerGet(params *StreamManagerGetParams) (*StreamManagerGetOK, error) + + StreamManagerMetricsIncomingByPeerGet(params *StreamManagerMetricsIncomingByPeerGetParams) (*StreamManagerMetricsIncomingByPeerGetOK, error) + + StreamManagerMetricsIncomingGet(params *StreamManagerMetricsIncomingGetParams) (*StreamManagerMetricsIncomingGetOK, error) + + StreamManagerMetricsOutboundGet(params *StreamManagerMetricsOutboundGetParams) (*StreamManagerMetricsOutboundGetOK, error) + + StreamManagerMetricsOutgoingByPeerGet(params *StreamManagerMetricsOutgoingByPeerGetParams) (*StreamManagerMetricsOutgoingByPeerGetOK, error) + + StreamManagerMetricsOutgoingGet(params *StreamManagerMetricsOutgoingGetParams) (*StreamManagerMetricsOutgoingGetOK, error) + + SystemLoggerByNameGet(params *SystemLoggerByNameGetParams) (*SystemLoggerByNameGetOK, error) + + SystemLoggerByNamePost(params *SystemLoggerByNamePostParams) (*SystemLoggerByNamePostOK, error) + + SystemLoggerGet(params *SystemLoggerGetParams) (*SystemLoggerGetOK, error) + + SystemLoggerPost(params *SystemLoggerPostParams) (*SystemLoggerPostOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +CacheServiceCounterCacheCapacityPost sets counter cache capacity in mb + +set counter cache capacity in mb +*/ +func (a *Client) CacheServiceCounterCacheCapacityPost(params *CacheServiceCounterCacheCapacityPostParams) (*CacheServiceCounterCacheCapacityPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceCounterCacheCapacityPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceCounterCacheCapacityPost", + Method: "POST", + PathPattern: "/cache_service/counter_cache_capacity", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceCounterCacheCapacityPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceCounterCacheCapacityPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceCounterCacheCapacityPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceCounterCacheKeysToSaveGet gets counter cache keys to save + +get counter cache keys to save +*/ +func (a *Client) CacheServiceCounterCacheKeysToSaveGet(params *CacheServiceCounterCacheKeysToSaveGetParams) (*CacheServiceCounterCacheKeysToSaveGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceCounterCacheKeysToSaveGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceCounterCacheKeysToSaveGet", + Method: "GET", + PathPattern: "/cache_service/counter_cache_keys_to_save", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceCounterCacheKeysToSaveGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceCounterCacheKeysToSaveGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceCounterCacheKeysToSaveGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceCounterCacheKeysToSavePost sets counter cache keys to save + +set counter cache keys to save +*/ +func (a *Client) CacheServiceCounterCacheKeysToSavePost(params *CacheServiceCounterCacheKeysToSavePostParams) (*CacheServiceCounterCacheKeysToSavePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceCounterCacheKeysToSavePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceCounterCacheKeysToSavePost", + Method: "POST", + PathPattern: "/cache_service/counter_cache_keys_to_save", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceCounterCacheKeysToSavePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceCounterCacheKeysToSavePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceCounterCacheKeysToSavePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceCounterCacheSavePeriodGet gets counter cache save period in seconds + +get counter cache save period in seconds +*/ +func (a *Client) CacheServiceCounterCacheSavePeriodGet(params *CacheServiceCounterCacheSavePeriodGetParams) (*CacheServiceCounterCacheSavePeriodGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceCounterCacheSavePeriodGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceCounterCacheSavePeriodGet", + Method: "GET", + PathPattern: "/cache_service/counter_cache_save_period", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceCounterCacheSavePeriodGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceCounterCacheSavePeriodGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceCounterCacheSavePeriodGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceCounterCacheSavePeriodPost sets counter cache save period in seconds + +set counter cache save period in seconds +*/ +func (a *Client) CacheServiceCounterCacheSavePeriodPost(params *CacheServiceCounterCacheSavePeriodPostParams) (*CacheServiceCounterCacheSavePeriodPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceCounterCacheSavePeriodPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceCounterCacheSavePeriodPost", + Method: "POST", + PathPattern: "/cache_service/counter_cache_save_period", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceCounterCacheSavePeriodPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceCounterCacheSavePeriodPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceCounterCacheSavePeriodPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceInvalidateCounterCachePost invalidates counter cache + +invalidate counter cache +*/ +func (a *Client) CacheServiceInvalidateCounterCachePost(params *CacheServiceInvalidateCounterCachePostParams) (*CacheServiceInvalidateCounterCachePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceInvalidateCounterCachePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceInvalidateCounterCachePost", + Method: "POST", + PathPattern: "/cache_service/invalidate_counter_cache", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceInvalidateCounterCachePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceInvalidateCounterCachePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceInvalidateCounterCachePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceInvalidateKeyCachePost invalidates key cache + +invalidate the key cache; for use after invalidating row cache +*/ +func (a *Client) CacheServiceInvalidateKeyCachePost(params *CacheServiceInvalidateKeyCachePostParams) (*CacheServiceInvalidateKeyCachePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceInvalidateKeyCachePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceInvalidateKeyCachePost", + Method: "POST", + PathPattern: "/cache_service/invalidate_key_cache", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceInvalidateKeyCachePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceInvalidateKeyCachePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceInvalidateKeyCachePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceKeyCacheCapacityPost sets key cache capacity in mb + +set key cache capacity in mb +*/ +func (a *Client) CacheServiceKeyCacheCapacityPost(params *CacheServiceKeyCacheCapacityPostParams) (*CacheServiceKeyCacheCapacityPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceKeyCacheCapacityPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceKeyCacheCapacityPost", + Method: "POST", + PathPattern: "/cache_service/key_cache_capacity", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceKeyCacheCapacityPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceKeyCacheCapacityPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceKeyCacheCapacityPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceKeyCacheKeysToSaveGet gets key cache keys to save + +get key cache keys to save +*/ +func (a *Client) CacheServiceKeyCacheKeysToSaveGet(params *CacheServiceKeyCacheKeysToSaveGetParams) (*CacheServiceKeyCacheKeysToSaveGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceKeyCacheKeysToSaveGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceKeyCacheKeysToSaveGet", + Method: "GET", + PathPattern: "/cache_service/key_cache_keys_to_save", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceKeyCacheKeysToSaveGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceKeyCacheKeysToSaveGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceKeyCacheKeysToSaveGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceKeyCacheKeysToSavePost sets key cache keys to save + +set key cache keys to save +*/ +func (a *Client) CacheServiceKeyCacheKeysToSavePost(params *CacheServiceKeyCacheKeysToSavePostParams) (*CacheServiceKeyCacheKeysToSavePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceKeyCacheKeysToSavePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceKeyCacheKeysToSavePost", + Method: "POST", + PathPattern: "/cache_service/key_cache_keys_to_save", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceKeyCacheKeysToSavePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceKeyCacheKeysToSavePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceKeyCacheKeysToSavePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceKeyCacheSavePeriodGet gets key cache save period in seconds + +get key cache save period in seconds +*/ +func (a *Client) CacheServiceKeyCacheSavePeriodGet(params *CacheServiceKeyCacheSavePeriodGetParams) (*CacheServiceKeyCacheSavePeriodGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceKeyCacheSavePeriodGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceKeyCacheSavePeriodGet", + Method: "GET", + PathPattern: "/cache_service/key_cache_save_period", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceKeyCacheSavePeriodGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceKeyCacheSavePeriodGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceKeyCacheSavePeriodGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceKeyCacheSavePeriodPost sets key cache save period in seconds + +set key cache save period in seconds +*/ +func (a *Client) CacheServiceKeyCacheSavePeriodPost(params *CacheServiceKeyCacheSavePeriodPostParams) (*CacheServiceKeyCacheSavePeriodPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceKeyCacheSavePeriodPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceKeyCacheSavePeriodPost", + Method: "POST", + PathPattern: "/cache_service/key_cache_save_period", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceKeyCacheSavePeriodPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceKeyCacheSavePeriodPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceKeyCacheSavePeriodPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsCounterCapacityGet gets counter capacity + +Get counter capacity +*/ +func (a *Client) CacheServiceMetricsCounterCapacityGet(params *CacheServiceMetricsCounterCapacityGetParams) (*CacheServiceMetricsCounterCapacityGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsCounterCapacityGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsCounterCapacityGet", + Method: "GET", + PathPattern: "/cache_service/metrics/counter/capacity", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsCounterCapacityGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsCounterCapacityGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsCounterCapacityGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsCounterEntriesGet gets counter entries + +Get counter entries +*/ +func (a *Client) CacheServiceMetricsCounterEntriesGet(params *CacheServiceMetricsCounterEntriesGetParams) (*CacheServiceMetricsCounterEntriesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsCounterEntriesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsCounterEntriesGet", + Method: "GET", + PathPattern: "/cache_service/metrics/counter/entries", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsCounterEntriesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsCounterEntriesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsCounterEntriesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsCounterHitRateGet gets counter hit rate + +Get counter hit rate +*/ +func (a *Client) CacheServiceMetricsCounterHitRateGet(params *CacheServiceMetricsCounterHitRateGetParams) (*CacheServiceMetricsCounterHitRateGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsCounterHitRateGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsCounterHitRateGet", + Method: "GET", + PathPattern: "/cache_service/metrics/counter/hit_rate", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsCounterHitRateGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsCounterHitRateGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsCounterHitRateGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsCounterHitsGet gets counter hits + +Get counter hits +*/ +func (a *Client) CacheServiceMetricsCounterHitsGet(params *CacheServiceMetricsCounterHitsGetParams) (*CacheServiceMetricsCounterHitsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsCounterHitsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsCounterHitsGet", + Method: "GET", + PathPattern: "/cache_service/metrics/counter/hits", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsCounterHitsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsCounterHitsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsCounterHitsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsCounterHitsMovingAvrageGet gets counter hits moving avrage + +Get counter hits moving avrage +*/ +func (a *Client) CacheServiceMetricsCounterHitsMovingAvrageGet(params *CacheServiceMetricsCounterHitsMovingAvrageGetParams) (*CacheServiceMetricsCounterHitsMovingAvrageGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsCounterHitsMovingAvrageGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsCounterHitsMovingAvrageGet", + Method: "GET", + PathPattern: "/cache_service/metrics/counter/hits_moving_avrage", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsCounterHitsMovingAvrageGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsCounterHitsMovingAvrageGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsCounterHitsMovingAvrageGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsCounterRequestsGet gets counter requests + +Get counter requests +*/ +func (a *Client) CacheServiceMetricsCounterRequestsGet(params *CacheServiceMetricsCounterRequestsGetParams) (*CacheServiceMetricsCounterRequestsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsCounterRequestsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsCounterRequestsGet", + Method: "GET", + PathPattern: "/cache_service/metrics/counter/requests", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsCounterRequestsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsCounterRequestsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsCounterRequestsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsCounterRequestsMovingAvrageGet gets counter requests moving avrage + +Get counter requests moving avrage +*/ +func (a *Client) CacheServiceMetricsCounterRequestsMovingAvrageGet(params *CacheServiceMetricsCounterRequestsMovingAvrageGetParams) (*CacheServiceMetricsCounterRequestsMovingAvrageGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsCounterRequestsMovingAvrageGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsCounterRequestsMovingAvrageGet", + Method: "GET", + PathPattern: "/cache_service/metrics/counter/requests_moving_avrage", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsCounterRequestsMovingAvrageGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsCounterRequestsMovingAvrageGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsCounterRequestsMovingAvrageGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsCounterSizeGet gets counter size + +Get counter cache waited size +*/ +func (a *Client) CacheServiceMetricsCounterSizeGet(params *CacheServiceMetricsCounterSizeGetParams) (*CacheServiceMetricsCounterSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsCounterSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsCounterSizeGet", + Method: "GET", + PathPattern: "/cache_service/metrics/counter/size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsCounterSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsCounterSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsCounterSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsKeyCapacityGet gets key capacity + +Get key capacity +*/ +func (a *Client) CacheServiceMetricsKeyCapacityGet(params *CacheServiceMetricsKeyCapacityGetParams) (*CacheServiceMetricsKeyCapacityGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsKeyCapacityGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsKeyCapacityGet", + Method: "GET", + PathPattern: "/cache_service/metrics/key/capacity", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsKeyCapacityGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsKeyCapacityGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsKeyCapacityGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsKeyEntriesGet gets key entries + +Get key entries +*/ +func (a *Client) CacheServiceMetricsKeyEntriesGet(params *CacheServiceMetricsKeyEntriesGetParams) (*CacheServiceMetricsKeyEntriesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsKeyEntriesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsKeyEntriesGet", + Method: "GET", + PathPattern: "/cache_service/metrics/key/entries", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsKeyEntriesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsKeyEntriesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsKeyEntriesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsKeyHitRateGet gets key hit rate + +Get key hit rate +*/ +func (a *Client) CacheServiceMetricsKeyHitRateGet(params *CacheServiceMetricsKeyHitRateGetParams) (*CacheServiceMetricsKeyHitRateGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsKeyHitRateGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsKeyHitRateGet", + Method: "GET", + PathPattern: "/cache_service/metrics/key/hit_rate", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsKeyHitRateGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsKeyHitRateGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsKeyHitRateGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsKeyHitsGet gets key hits + +Get key hits +*/ +func (a *Client) CacheServiceMetricsKeyHitsGet(params *CacheServiceMetricsKeyHitsGetParams) (*CacheServiceMetricsKeyHitsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsKeyHitsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsKeyHitsGet", + Method: "GET", + PathPattern: "/cache_service/metrics/key/hits", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsKeyHitsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsKeyHitsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsKeyHitsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsKeyHitsMovingAvrageGet gets key hits moving avrage + +Get key hits moving avrage +*/ +func (a *Client) CacheServiceMetricsKeyHitsMovingAvrageGet(params *CacheServiceMetricsKeyHitsMovingAvrageGetParams) (*CacheServiceMetricsKeyHitsMovingAvrageGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsKeyHitsMovingAvrageGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsKeyHitsMovingAvrageGet", + Method: "GET", + PathPattern: "/cache_service/metrics/key/hits_moving_avrage", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsKeyHitsMovingAvrageGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsKeyHitsMovingAvrageGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsKeyHitsMovingAvrageGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsKeyRequestsGet gets key requests + +Get key requests +*/ +func (a *Client) CacheServiceMetricsKeyRequestsGet(params *CacheServiceMetricsKeyRequestsGetParams) (*CacheServiceMetricsKeyRequestsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsKeyRequestsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsKeyRequestsGet", + Method: "GET", + PathPattern: "/cache_service/metrics/key/requests", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsKeyRequestsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsKeyRequestsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsKeyRequestsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsKeyRequestsMovingAvrageGet gets key requests moving avrage + +Get key requests moving avrage +*/ +func (a *Client) CacheServiceMetricsKeyRequestsMovingAvrageGet(params *CacheServiceMetricsKeyRequestsMovingAvrageGetParams) (*CacheServiceMetricsKeyRequestsMovingAvrageGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsKeyRequestsMovingAvrageGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsKeyRequestsMovingAvrageGet", + Method: "GET", + PathPattern: "/cache_service/metrics/key/requests_moving_avrage", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsKeyRequestsMovingAvrageGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsKeyRequestsMovingAvrageGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsKeyRequestsMovingAvrageGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsKeySizeGet gets key size + +Get key cache waited size +*/ +func (a *Client) CacheServiceMetricsKeySizeGet(params *CacheServiceMetricsKeySizeGetParams) (*CacheServiceMetricsKeySizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsKeySizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsKeySizeGet", + Method: "GET", + PathPattern: "/cache_service/metrics/key/size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsKeySizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsKeySizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsKeySizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsRowCapacityGet gets row capacity + +Get row capacity +*/ +func (a *Client) CacheServiceMetricsRowCapacityGet(params *CacheServiceMetricsRowCapacityGetParams) (*CacheServiceMetricsRowCapacityGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsRowCapacityGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsRowCapacityGet", + Method: "GET", + PathPattern: "/cache_service/metrics/row/capacity", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsRowCapacityGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsRowCapacityGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsRowCapacityGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsRowEntriesGet gets row entries + +Get row entries +*/ +func (a *Client) CacheServiceMetricsRowEntriesGet(params *CacheServiceMetricsRowEntriesGetParams) (*CacheServiceMetricsRowEntriesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsRowEntriesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsRowEntriesGet", + Method: "GET", + PathPattern: "/cache_service/metrics/row/entries", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsRowEntriesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsRowEntriesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsRowEntriesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsRowHitRateGet gets row hit rate + +Get row hit rate +*/ +func (a *Client) CacheServiceMetricsRowHitRateGet(params *CacheServiceMetricsRowHitRateGetParams) (*CacheServiceMetricsRowHitRateGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsRowHitRateGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsRowHitRateGet", + Method: "GET", + PathPattern: "/cache_service/metrics/row/hit_rate", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsRowHitRateGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsRowHitRateGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsRowHitRateGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsRowHitsGet gets row hits + +Get row hits +*/ +func (a *Client) CacheServiceMetricsRowHitsGet(params *CacheServiceMetricsRowHitsGetParams) (*CacheServiceMetricsRowHitsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsRowHitsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsRowHitsGet", + Method: "GET", + PathPattern: "/cache_service/metrics/row/hits", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsRowHitsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsRowHitsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsRowHitsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsRowHitsMovingAvrageGet gets row hits moving avrage + +Get row hits moving avrage +*/ +func (a *Client) CacheServiceMetricsRowHitsMovingAvrageGet(params *CacheServiceMetricsRowHitsMovingAvrageGetParams) (*CacheServiceMetricsRowHitsMovingAvrageGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsRowHitsMovingAvrageGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsRowHitsMovingAvrageGet", + Method: "GET", + PathPattern: "/cache_service/metrics/row/hits_moving_avrage", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsRowHitsMovingAvrageGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsRowHitsMovingAvrageGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsRowHitsMovingAvrageGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsRowRequestsGet gets row requests + +Get row requests +*/ +func (a *Client) CacheServiceMetricsRowRequestsGet(params *CacheServiceMetricsRowRequestsGetParams) (*CacheServiceMetricsRowRequestsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsRowRequestsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsRowRequestsGet", + Method: "GET", + PathPattern: "/cache_service/metrics/row/requests", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsRowRequestsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsRowRequestsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsRowRequestsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsRowRequestsMovingAvrageGet gets row requests moving avrage + +Get row requests moving avrage +*/ +func (a *Client) CacheServiceMetricsRowRequestsMovingAvrageGet(params *CacheServiceMetricsRowRequestsMovingAvrageGetParams) (*CacheServiceMetricsRowRequestsMovingAvrageGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsRowRequestsMovingAvrageGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsRowRequestsMovingAvrageGet", + Method: "GET", + PathPattern: "/cache_service/metrics/row/requests_moving_avrage", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsRowRequestsMovingAvrageGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsRowRequestsMovingAvrageGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsRowRequestsMovingAvrageGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceMetricsRowSizeGet gets row size + +Get row cache waited size +*/ +func (a *Client) CacheServiceMetricsRowSizeGet(params *CacheServiceMetricsRowSizeGetParams) (*CacheServiceMetricsRowSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceMetricsRowSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceMetricsRowSizeGet", + Method: "GET", + PathPattern: "/cache_service/metrics/row/size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceMetricsRowSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceMetricsRowSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceMetricsRowSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceRowCacheCapacityPost sets row cache capacity in mb + +set row cache capacity in mb +*/ +func (a *Client) CacheServiceRowCacheCapacityPost(params *CacheServiceRowCacheCapacityPostParams) (*CacheServiceRowCacheCapacityPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceRowCacheCapacityPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceRowCacheCapacityPost", + Method: "POST", + PathPattern: "/cache_service/row_cache_capacity", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceRowCacheCapacityPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceRowCacheCapacityPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceRowCacheCapacityPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceRowCacheKeysToSaveGet gets row cache keys to save + +get row cache keys to save +*/ +func (a *Client) CacheServiceRowCacheKeysToSaveGet(params *CacheServiceRowCacheKeysToSaveGetParams) (*CacheServiceRowCacheKeysToSaveGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceRowCacheKeysToSaveGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceRowCacheKeysToSaveGet", + Method: "GET", + PathPattern: "/cache_service/row_cache_keys_to_save", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceRowCacheKeysToSaveGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceRowCacheKeysToSaveGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceRowCacheKeysToSaveGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceRowCacheKeysToSavePost sets row cache keys to save + +set row cache keys to save +*/ +func (a *Client) CacheServiceRowCacheKeysToSavePost(params *CacheServiceRowCacheKeysToSavePostParams) (*CacheServiceRowCacheKeysToSavePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceRowCacheKeysToSavePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceRowCacheKeysToSavePost", + Method: "POST", + PathPattern: "/cache_service/row_cache_keys_to_save", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceRowCacheKeysToSavePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceRowCacheKeysToSavePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceRowCacheKeysToSavePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceRowCacheSavePeriodGet gets row cache save period in seconds + +get row cache save period in seconds +*/ +func (a *Client) CacheServiceRowCacheSavePeriodGet(params *CacheServiceRowCacheSavePeriodGetParams) (*CacheServiceRowCacheSavePeriodGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceRowCacheSavePeriodGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceRowCacheSavePeriodGet", + Method: "GET", + PathPattern: "/cache_service/row_cache_save_period", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceRowCacheSavePeriodGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceRowCacheSavePeriodGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceRowCacheSavePeriodGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceRowCacheSavePeriodPost sets row cache save period in seconds + +set row cache save period in seconds +*/ +func (a *Client) CacheServiceRowCacheSavePeriodPost(params *CacheServiceRowCacheSavePeriodPostParams) (*CacheServiceRowCacheSavePeriodPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceRowCacheSavePeriodPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceRowCacheSavePeriodPost", + Method: "POST", + PathPattern: "/cache_service/row_cache_save_period", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceRowCacheSavePeriodPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceRowCacheSavePeriodPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceRowCacheSavePeriodPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CacheServiceSaveCachesPost saves caches + +save row and key caches +*/ +func (a *Client) CacheServiceSaveCachesPost(params *CacheServiceSaveCachesPostParams) (*CacheServiceSaveCachesPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCacheServiceSaveCachesPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CacheServiceSaveCachesPost", + Method: "POST", + PathPattern: "/cache_service/save_caches", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CacheServiceSaveCachesPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CacheServiceSaveCachesPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CacheServiceSaveCachesPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CollectdByPluginidGet gets collectd + +Get a collectd value +*/ +func (a *Client) CollectdByPluginidGet(params *CollectdByPluginidGetParams) (*CollectdByPluginidGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCollectdByPluginidGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CollectdByPluginidGet", + Method: "GET", + PathPattern: "/collectd/{pluginid}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CollectdByPluginidGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CollectdByPluginidGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CollectdByPluginidGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CollectdByPluginidPost enables collectd + +Start reporting on one or more collectd metric +*/ +func (a *Client) CollectdByPluginidPost(params *CollectdByPluginidPostParams) (*CollectdByPluginidPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCollectdByPluginidPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CollectdByPluginidPost", + Method: "POST", + PathPattern: "/collectd/{pluginid}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CollectdByPluginidPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CollectdByPluginidPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CollectdByPluginidPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CollectdGet gets collectd items + +Get a list of all collectd metrics and their status +*/ +func (a *Client) CollectdGet(params *CollectdGetParams) (*CollectdGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCollectdGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CollectdGet", + Method: "GET", + PathPattern: "/collectd/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CollectdGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CollectdGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CollectdGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CollectdPost enables all collectd + +Enable or disable all collectd metrics +*/ +func (a *Client) CollectdPost(params *CollectdPostParams) (*CollectdPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCollectdPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CollectdPost", + Method: "POST", + PathPattern: "/collectd/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CollectdPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CollectdPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CollectdPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyAutocompactionByNameDelete disables auto compaction + +Disable table auto compaction +*/ +func (a *Client) ColumnFamilyAutocompactionByNameDelete(params *ColumnFamilyAutocompactionByNameDeleteParams) (*ColumnFamilyAutocompactionByNameDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyAutocompactionByNameDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyAutocompactionByNameDelete", + Method: "DELETE", + PathPattern: "/column_family/autocompaction/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyAutocompactionByNameDeleteReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyAutocompactionByNameDeleteOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyAutocompactionByNameDeleteDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyAutocompactionByNameGet is auto compaction enabled + +check if the auto_compaction property is enabled for a given table +*/ +func (a *Client) ColumnFamilyAutocompactionByNameGet(params *ColumnFamilyAutocompactionByNameGetParams) (*ColumnFamilyAutocompactionByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyAutocompactionByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyAutocompactionByNameGet", + Method: "GET", + PathPattern: "/column_family/autocompaction/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyAutocompactionByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyAutocompactionByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyAutocompactionByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyAutocompactionByNamePost enables auto compaction + +Enable table auto compaction +*/ +func (a *Client) ColumnFamilyAutocompactionByNamePost(params *ColumnFamilyAutocompactionByNamePostParams) (*ColumnFamilyAutocompactionByNamePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyAutocompactionByNamePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyAutocompactionByNamePost", + Method: "POST", + PathPattern: "/column_family/autocompaction/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyAutocompactionByNamePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyAutocompactionByNamePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyAutocompactionByNamePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyBuiltIndexesByNameGet gets built indexes + +Returns a list of the names of the built column indexes for current store +*/ +func (a *Client) ColumnFamilyBuiltIndexesByNameGet(params *ColumnFamilyBuiltIndexesByNameGetParams) (*ColumnFamilyBuiltIndexesByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyBuiltIndexesByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyBuiltIndexesByNameGet", + Method: "GET", + PathPattern: "/column_family/built_indexes/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyBuiltIndexesByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyBuiltIndexesByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyBuiltIndexesByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyCompactionByNamePost sets compaction threshold + +Sets the minumum and maximum number of sstables in queue before compaction kicks off +*/ +func (a *Client) ColumnFamilyCompactionByNamePost(params *ColumnFamilyCompactionByNamePostParams) (*ColumnFamilyCompactionByNamePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyCompactionByNamePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyCompactionByNamePost", + Method: "POST", + PathPattern: "/column_family/compaction/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyCompactionByNamePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyCompactionByNamePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyCompactionByNamePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyCompactionStrategyByNameGet gets compaction strategy class + +Gets the compaction strategy class name +*/ +func (a *Client) ColumnFamilyCompactionStrategyByNameGet(params *ColumnFamilyCompactionStrategyByNameGetParams) (*ColumnFamilyCompactionStrategyByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyCompactionStrategyByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyCompactionStrategyByNameGet", + Method: "GET", + PathPattern: "/column_family/compaction_strategy/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyCompactionStrategyByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyCompactionStrategyByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyCompactionStrategyByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyCompactionStrategyByNamePost sets compaction strategy class + +Sets the compaction strategy by class name +*/ +func (a *Client) ColumnFamilyCompactionStrategyByNamePost(params *ColumnFamilyCompactionStrategyByNamePostParams) (*ColumnFamilyCompactionStrategyByNamePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyCompactionStrategyByNamePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyCompactionStrategyByNamePost", + Method: "POST", + PathPattern: "/column_family/compaction_strategy/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyCompactionStrategyByNamePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyCompactionStrategyByNamePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyCompactionStrategyByNamePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyCompressionParametersByNameGet gets compression parameters + +get the compression parameters +*/ +func (a *Client) ColumnFamilyCompressionParametersByNameGet(params *ColumnFamilyCompressionParametersByNameGetParams) (*ColumnFamilyCompressionParametersByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyCompressionParametersByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyCompressionParametersByNameGet", + Method: "GET", + PathPattern: "/column_family/compression_parameters/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyCompressionParametersByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyCompressionParametersByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyCompressionParametersByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyCompressionParametersByNamePost sets compression parameters + +Sets the compression parameters +*/ +func (a *Client) ColumnFamilyCompressionParametersByNamePost(params *ColumnFamilyCompressionParametersByNamePostParams) (*ColumnFamilyCompressionParametersByNamePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyCompressionParametersByNamePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyCompressionParametersByNamePost", + Method: "POST", + PathPattern: "/column_family/compression_parameters/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyCompressionParametersByNamePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyCompressionParametersByNamePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyCompressionParametersByNamePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyCrcCheckChanceByNamePost sets crc check chance + +Set new crc check chance +*/ +func (a *Client) ColumnFamilyCrcCheckChanceByNamePost(params *ColumnFamilyCrcCheckChanceByNamePostParams) (*ColumnFamilyCrcCheckChanceByNamePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyCrcCheckChanceByNamePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyCrcCheckChanceByNamePost", + Method: "POST", + PathPattern: "/column_family/crc_check_chance/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyCrcCheckChanceByNamePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyCrcCheckChanceByNamePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyCrcCheckChanceByNamePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyDroppableRatioByNameGet gets droppable tombstone ratio + +Get the ratio of droppable tombstones to real columns (and non-droppable tombstones) +*/ +func (a *Client) ColumnFamilyDroppableRatioByNameGet(params *ColumnFamilyDroppableRatioByNameGetParams) (*ColumnFamilyDroppableRatioByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyDroppableRatioByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyDroppableRatioByNameGet", + Method: "GET", + PathPattern: "/column_family/droppable_ratio/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyDroppableRatioByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyDroppableRatioByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyDroppableRatioByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyEstimateKeysByNameGet estimates keys + +Get the estimate keys +*/ +func (a *Client) ColumnFamilyEstimateKeysByNameGet(params *ColumnFamilyEstimateKeysByNameGetParams) (*ColumnFamilyEstimateKeysByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyEstimateKeysByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyEstimateKeysByNameGet", + Method: "GET", + PathPattern: "/column_family/estimate_keys/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyEstimateKeysByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyEstimateKeysByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyEstimateKeysByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyGet gets column family + +Get a list of all column family info +*/ +func (a *Client) ColumnFamilyGet(params *ColumnFamilyGetParams) (*ColumnFamilyGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyGet", + Method: "GET", + PathPattern: "/column_family/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyLoadSstableByNamePost loads new sstables + +Scan through Keyspace/ColumnFamily's data directory determine which SSTables should be loaded and load them +*/ +func (a *Client) ColumnFamilyLoadSstableByNamePost(params *ColumnFamilyLoadSstableByNamePostParams) (*ColumnFamilyLoadSstableByNamePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyLoadSstableByNamePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyLoadSstableByNamePost", + Method: "POST", + PathPattern: "/column_family/load/sstable/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyLoadSstableByNamePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyLoadSstableByNamePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyLoadSstableByNamePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMajorCompactionByNamePost forces major compaction + +Force a major compaction of this column family +*/ +func (a *Client) ColumnFamilyMajorCompactionByNamePost(params *ColumnFamilyMajorCompactionByNamePostParams) (*ColumnFamilyMajorCompactionByNamePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMajorCompactionByNamePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMajorCompactionByNamePost", + Method: "POST", + PathPattern: "/column_family/major_compaction/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMajorCompactionByNamePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMajorCompactionByNamePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMajorCompactionByNamePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMaximumCompactionByNameGet gets maximum compaction threshold + +get the maximum number of sstables in queue before compaction kicks off +*/ +func (a *Client) ColumnFamilyMaximumCompactionByNameGet(params *ColumnFamilyMaximumCompactionByNameGetParams) (*ColumnFamilyMaximumCompactionByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMaximumCompactionByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMaximumCompactionByNameGet", + Method: "GET", + PathPattern: "/column_family/maximum_compaction/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMaximumCompactionByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMaximumCompactionByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMaximumCompactionByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMaximumCompactionByNamePost sets maximum compaction threshold + +Sets the maximum number of sstables in queue before compaction kicks off +*/ +func (a *Client) ColumnFamilyMaximumCompactionByNamePost(params *ColumnFamilyMaximumCompactionByNamePostParams) (*ColumnFamilyMaximumCompactionByNamePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMaximumCompactionByNamePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMaximumCompactionByNamePost", + Method: "POST", + PathPattern: "/column_family/maximum_compaction/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMaximumCompactionByNamePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMaximumCompactionByNamePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMaximumCompactionByNamePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGet gets cf all memtables live data size + +Get all of the column family active and not memtables live data size +*/ +func (a *Client) ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGet(params *ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams) (*ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/all_memtables_live_data_size/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsAllMemtablesLiveDataSizeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsAllMemtablesLiveDataSizeGet gets all cf all memtables live data size + +Get all memtables active and not of all column family live data size +*/ +func (a *Client) ColumnFamilyMetricsAllMemtablesLiveDataSizeGet(params *ColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams) (*ColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsAllMemtablesLiveDataSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsAllMemtablesLiveDataSizeGet", + Method: "GET", + PathPattern: "/column_family/metrics/all_memtables_live_data_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsAllMemtablesLiveDataSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsAllMemtablesLiveDataSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsAllMemtablesLiveDataSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGet gets cf all memtables off heap size + +Get all of the column family active and not memtables off heap size +*/ +func (a *Client) ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGet(params *ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams) (*ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/all_memtables_off_heap_size/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsAllMemtablesOffHeapSizeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsAllMemtablesOffHeapSizeGet gets all cf all memtables off heap size + +Get all memtables active and not of all column family off heap size +*/ +func (a *Client) ColumnFamilyMetricsAllMemtablesOffHeapSizeGet(params *ColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams) (*ColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsAllMemtablesOffHeapSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsAllMemtablesOffHeapSizeGet", + Method: "GET", + PathPattern: "/column_family/metrics/all_memtables_off_heap_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsAllMemtablesOffHeapSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsAllMemtablesOffHeapSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsAllMemtablesOffHeapSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGet gets cf all memtables on heap size + +Get all of the column family active and not memtables on heap size +*/ +func (a *Client) ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGet(params *ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams) (*ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/all_memtables_on_heap_size/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsAllMemtablesOnHeapSizeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsAllMemtablesOnHeapSizeGet gets all cf all memtables on heap size + +Get all memtables active and not of all column family on heap size +*/ +func (a *Client) ColumnFamilyMetricsAllMemtablesOnHeapSizeGet(params *ColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams) (*ColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsAllMemtablesOnHeapSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsAllMemtablesOnHeapSizeGet", + Method: "GET", + PathPattern: "/column_family/metrics/all_memtables_on_heap_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsAllMemtablesOnHeapSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsAllMemtablesOnHeapSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsAllMemtablesOnHeapSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGet gets bloom filter disk space used + +Get bloom filter disk space used +*/ +func (a *Client) ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGet(params *ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams) (*ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/bloom_filter_disk_space_used/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsBloomFilterDiskSpaceUsedByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsBloomFilterDiskSpaceUsedGet gets all bloom filter disk space used + +Get all bloom filter disk space used +*/ +func (a *Client) ColumnFamilyMetricsBloomFilterDiskSpaceUsedGet(params *ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams) (*ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsBloomFilterDiskSpaceUsedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsBloomFilterDiskSpaceUsedGet", + Method: "GET", + PathPattern: "/column_family/metrics/bloom_filter_disk_space_used", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsBloomFilterDiskSpaceUsedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsBloomFilterFalsePositivesByNameGet gets bloom filter false positives + +Get bloom filter false positives +*/ +func (a *Client) ColumnFamilyMetricsBloomFilterFalsePositivesByNameGet(params *ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams) (*ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsBloomFilterFalsePositivesByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsBloomFilterFalsePositivesByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/bloom_filter_false_positives/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsBloomFilterFalsePositivesByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsBloomFilterFalsePositivesGet gets all bloom filter false positives + +Get all bloom filter false positives +*/ +func (a *Client) ColumnFamilyMetricsBloomFilterFalsePositivesGet(params *ColumnFamilyMetricsBloomFilterFalsePositivesGetParams) (*ColumnFamilyMetricsBloomFilterFalsePositivesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsBloomFilterFalsePositivesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsBloomFilterFalsePositivesGet", + Method: "GET", + PathPattern: "/column_family/metrics/bloom_filter_false_positives", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsBloomFilterFalsePositivesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsBloomFilterFalsePositivesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsBloomFilterFalsePositivesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsBloomFilterFalseRatioByNameGet gets bloom filter false ratio + +Get bloom filter false ratio +*/ +func (a *Client) ColumnFamilyMetricsBloomFilterFalseRatioByNameGet(params *ColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams) (*ColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsBloomFilterFalseRatioByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsBloomFilterFalseRatioByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/bloom_filter_false_ratio/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsBloomFilterFalseRatioByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsBloomFilterFalseRatioByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsBloomFilterFalseRatioByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsBloomFilterFalseRatioGet gets all bloom filter false ratio + +Get all bloom filter false ratio +*/ +func (a *Client) ColumnFamilyMetricsBloomFilterFalseRatioGet(params *ColumnFamilyMetricsBloomFilterFalseRatioGetParams) (*ColumnFamilyMetricsBloomFilterFalseRatioGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsBloomFilterFalseRatioGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsBloomFilterFalseRatioGet", + Method: "GET", + PathPattern: "/column_family/metrics/bloom_filter_false_ratio", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsBloomFilterFalseRatioGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsBloomFilterFalseRatioGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsBloomFilterFalseRatioGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGet gets bloom filter off heap memory used + +Get bloom filter off heap memory used +*/ +func (a *Client) ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGet(params *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams) (*ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/bloom_filter_off_heap_memory_used/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGet gets all bloom filter off heap memory used + +Get all bloom filter off heap memory used +*/ +func (a *Client) ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGet(params *ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams) (*ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGet", + Method: "GET", + PathPattern: "/column_family/metrics/bloom_filter_off_heap_memory_used", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsBloomFilterOffHeapMemoryUsedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCasCommitByNameGet gets cas commit + +Get cas commit +*/ +func (a *Client) ColumnFamilyMetricsCasCommitByNameGet(params *ColumnFamilyMetricsCasCommitByNameGetParams) (*ColumnFamilyMetricsCasCommitByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCasCommitByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCasCommitByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/cas_commit/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCasCommitByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCasCommitByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCasCommitByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGet gets cas commit estimated histogram + +Get cas commit +*/ +func (a *Client) ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGet(params *ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/cas_commit/estimated_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCasCommitEstimatedHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGet gets cas commit estimated recent histogram + +Get cas commit +*/ +func (a *Client) ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/cas_commit/estimated_recent_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCasCommitEstimatedRecentHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCasPrepareByNameGet gets cas prepare + +Get cas prepare +*/ +func (a *Client) ColumnFamilyMetricsCasPrepareByNameGet(params *ColumnFamilyMetricsCasPrepareByNameGetParams) (*ColumnFamilyMetricsCasPrepareByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCasPrepareByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCasPrepareByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/cas_prepare/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCasPrepareByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCasPrepareByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCasPrepareByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGet gets cas prepare estimated histogram + +Get cas prepare +*/ +func (a *Client) ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGet(params *ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/cas_prepare/estimated_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCasPrepareEstimatedHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGet gets cas prepare estimated recent histogram + +Get cas prepare +*/ +func (a *Client) ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/cas_prepare/estimated_recent_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCasPrepareEstimatedRecentHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCasProposeByNameGet gets cas propose + +Get cas propose +*/ +func (a *Client) ColumnFamilyMetricsCasProposeByNameGet(params *ColumnFamilyMetricsCasProposeByNameGetParams) (*ColumnFamilyMetricsCasProposeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCasProposeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCasProposeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/cas_propose/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCasProposeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCasProposeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCasProposeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGet gets cas propose estimated histogram + +Get cas propose +*/ +func (a *Client) ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGet(params *ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/cas_propose/estimated_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCasProposeEstimatedHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGet gets cas propose estimated recent histogram + +Get cas propose +*/ +func (a *Client) ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/cas_propose/estimated_recent_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCasProposeEstimatedRecentHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGet gets col update time delta histogram + +Get col update time delta histogram +*/ +func (a *Client) ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGet(params *ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams) (*ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/col_update_time_delta_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsColUpdateTimeDeltaHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGet gets compression metadata off heap memory used + +Get compression metadata off heap memory used +*/ +func (a *Client) ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGet(params *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams) (*ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/compression_metadata_off_heap_memory_used/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGet gets all compression metadata off heap memory used + +Get all compression metadata off heap memory used +*/ +func (a *Client) ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGet(params *ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams) (*ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGet", + Method: "GET", + PathPattern: "/column_family/metrics/compression_metadata_off_heap_memory_used", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCompressionMetadataOffHeapMemoryUsedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCompressionRatioByNameGet gets compression ratio + +Get compression ratio +*/ +func (a *Client) ColumnFamilyMetricsCompressionRatioByNameGet(params *ColumnFamilyMetricsCompressionRatioByNameGetParams) (*ColumnFamilyMetricsCompressionRatioByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCompressionRatioByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCompressionRatioByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/compression_ratio/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCompressionRatioByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCompressionRatioByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCompressionRatioByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCompressionRatioGet gets all compression ratio + +Get all compression ratio +*/ +func (a *Client) ColumnFamilyMetricsCompressionRatioGet(params *ColumnFamilyMetricsCompressionRatioGetParams) (*ColumnFamilyMetricsCompressionRatioGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCompressionRatioGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCompressionRatioGet", + Method: "GET", + PathPattern: "/column_family/metrics/compression_ratio", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCompressionRatioGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCompressionRatioGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCompressionRatioGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCoordinatorReadGet gets coordinator read latency + +Get coordinator read latency +*/ +func (a *Client) ColumnFamilyMetricsCoordinatorReadGet(params *ColumnFamilyMetricsCoordinatorReadGetParams) (*ColumnFamilyMetricsCoordinatorReadGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCoordinatorReadGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCoordinatorReadGet", + Method: "GET", + PathPattern: "/column_family/metrics/coordinator/read", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCoordinatorReadGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCoordinatorReadGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCoordinatorReadGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsCoordinatorScanGet gets coordinator scan latency + +Get coordinator scan latency +*/ +func (a *Client) ColumnFamilyMetricsCoordinatorScanGet(params *ColumnFamilyMetricsCoordinatorScanGetParams) (*ColumnFamilyMetricsCoordinatorScanGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsCoordinatorScanGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsCoordinatorScanGet", + Method: "GET", + PathPattern: "/column_family/metrics/coordinator/scan", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsCoordinatorScanGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsCoordinatorScanGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsCoordinatorScanGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGet gets estimated column count histogram + +Get estimated column count histogram +*/ +func (a *Client) ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGet(params *ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams) (*ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/estimated_column_count_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsEstimatedColumnCountHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsEstimatedRowCountByNameGet gets estimated row count + +Get estimated row count +*/ +func (a *Client) ColumnFamilyMetricsEstimatedRowCountByNameGet(params *ColumnFamilyMetricsEstimatedRowCountByNameGetParams) (*ColumnFamilyMetricsEstimatedRowCountByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsEstimatedRowCountByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsEstimatedRowCountByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/estimated_row_count/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsEstimatedRowCountByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsEstimatedRowCountByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsEstimatedRowCountByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGet gets estimated row size histogram + +Get estimated row size histogram +*/ +func (a *Client) ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGet(params *ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams) (*ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/estimated_row_size_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsEstimatedRowSizeHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGet gets index summary off heap memory used + +Get index summary off heap memory used +*/ +func (a *Client) ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGet(params *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams) (*ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/index_summary_off_heap_memory_used/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGet gets all index summary off heap memory used + +Get all index summary off heap memory used +*/ +func (a *Client) ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGet(params *ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams) (*ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGet", + Method: "GET", + PathPattern: "/column_family/metrics/index_summary_off_heap_memory_used", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsIndexSummaryOffHeapMemoryUsedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsKeyCacheHitRateByNameGet gets key cache hit rate + +Get key cache hit rate +*/ +func (a *Client) ColumnFamilyMetricsKeyCacheHitRateByNameGet(params *ColumnFamilyMetricsKeyCacheHitRateByNameGetParams) (*ColumnFamilyMetricsKeyCacheHitRateByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsKeyCacheHitRateByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsKeyCacheHitRateByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/key_cache_hit_rate/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsKeyCacheHitRateByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsKeyCacheHitRateByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsKeyCacheHitRateByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsLiveDiskSpaceUsedByNameGet gets live disk space used + +Get live disk space used +*/ +func (a *Client) ColumnFamilyMetricsLiveDiskSpaceUsedByNameGet(params *ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams) (*ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsLiveDiskSpaceUsedByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsLiveDiskSpaceUsedByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/live_disk_space_used/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsLiveDiskSpaceUsedByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsLiveDiskSpaceUsedGet gets all live disk space used + +Get all live disk space used +*/ +func (a *Client) ColumnFamilyMetricsLiveDiskSpaceUsedGet(params *ColumnFamilyMetricsLiveDiskSpaceUsedGetParams) (*ColumnFamilyMetricsLiveDiskSpaceUsedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsLiveDiskSpaceUsedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsLiveDiskSpaceUsedGet", + Method: "GET", + PathPattern: "/column_family/metrics/live_disk_space_used", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsLiveDiskSpaceUsedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsLiveDiskSpaceUsedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsLiveDiskSpaceUsedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsLiveScannedHistogramByNameGet gets live scanned histogram + +Get live scanned histogram +*/ +func (a *Client) ColumnFamilyMetricsLiveScannedHistogramByNameGet(params *ColumnFamilyMetricsLiveScannedHistogramByNameGetParams) (*ColumnFamilyMetricsLiveScannedHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsLiveScannedHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsLiveScannedHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/live_scanned_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsLiveScannedHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsLiveScannedHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsLiveScannedHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsLiveSsTableCountByNameGet gets live ss table count + +Get live ss table count +*/ +func (a *Client) ColumnFamilyMetricsLiveSsTableCountByNameGet(params *ColumnFamilyMetricsLiveSsTableCountByNameGetParams) (*ColumnFamilyMetricsLiveSsTableCountByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsLiveSsTableCountByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsLiveSsTableCountByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/live_ss_table_count/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsLiveSsTableCountByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsLiveSsTableCountByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsLiveSsTableCountByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsLiveSsTableCountGet gets all live ss table count + +Get all live ss table count +*/ +func (a *Client) ColumnFamilyMetricsLiveSsTableCountGet(params *ColumnFamilyMetricsLiveSsTableCountGetParams) (*ColumnFamilyMetricsLiveSsTableCountGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsLiveSsTableCountGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsLiveSsTableCountGet", + Method: "GET", + PathPattern: "/column_family/metrics/live_ss_table_count", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsLiveSsTableCountGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsLiveSsTableCountGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsLiveSsTableCountGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMaxRowSizeByNameGet gets max row size + +Get max row size +*/ +func (a *Client) ColumnFamilyMetricsMaxRowSizeByNameGet(params *ColumnFamilyMetricsMaxRowSizeByNameGetParams) (*ColumnFamilyMetricsMaxRowSizeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMaxRowSizeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMaxRowSizeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/max_row_size/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMaxRowSizeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMaxRowSizeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMaxRowSizeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMaxRowSizeGet gets all max row size + +Get all max row size +*/ +func (a *Client) ColumnFamilyMetricsMaxRowSizeGet(params *ColumnFamilyMetricsMaxRowSizeGetParams) (*ColumnFamilyMetricsMaxRowSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMaxRowSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMaxRowSizeGet", + Method: "GET", + PathPattern: "/column_family/metrics/max_row_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMaxRowSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMaxRowSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMaxRowSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMeanRowSizeByNameGet gets mean row size + +Get mean row size +*/ +func (a *Client) ColumnFamilyMetricsMeanRowSizeByNameGet(params *ColumnFamilyMetricsMeanRowSizeByNameGetParams) (*ColumnFamilyMetricsMeanRowSizeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMeanRowSizeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMeanRowSizeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/mean_row_size/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMeanRowSizeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMeanRowSizeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMeanRowSizeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMeanRowSizeGet gets all mean row size + +Get all mean row size +*/ +func (a *Client) ColumnFamilyMetricsMeanRowSizeGet(params *ColumnFamilyMetricsMeanRowSizeGetParams) (*ColumnFamilyMetricsMeanRowSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMeanRowSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMeanRowSizeGet", + Method: "GET", + PathPattern: "/column_family/metrics/mean_row_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMeanRowSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMeanRowSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMeanRowSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMemtableColumnsCountByNameGet gets memtable columns count + +get memtable columns count +*/ +func (a *Client) ColumnFamilyMetricsMemtableColumnsCountByNameGet(params *ColumnFamilyMetricsMemtableColumnsCountByNameGetParams) (*ColumnFamilyMetricsMemtableColumnsCountByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMemtableColumnsCountByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMemtableColumnsCountByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/memtable_columns_count/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMemtableColumnsCountByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMemtableColumnsCountByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMemtableColumnsCountByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMemtableColumnsCountGet gets all memtable columns count + +get all memtable columns count +*/ +func (a *Client) ColumnFamilyMetricsMemtableColumnsCountGet(params *ColumnFamilyMetricsMemtableColumnsCountGetParams) (*ColumnFamilyMetricsMemtableColumnsCountGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMemtableColumnsCountGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMemtableColumnsCountGet", + Method: "GET", + PathPattern: "/column_family/metrics/memtable_columns_count/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMemtableColumnsCountGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMemtableColumnsCountGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMemtableColumnsCountGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMemtableLiveDataSizeByNameGet gets memtable live data size + +Get the column family active memtable live data size +*/ +func (a *Client) ColumnFamilyMetricsMemtableLiveDataSizeByNameGet(params *ColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams) (*ColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMemtableLiveDataSizeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMemtableLiveDataSizeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/memtable_live_data_size/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMemtableLiveDataSizeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMemtableLiveDataSizeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMemtableLiveDataSizeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMemtableLiveDataSizeGet gets all memtable live data size + +Get all active memtable of all column family live data size +*/ +func (a *Client) ColumnFamilyMetricsMemtableLiveDataSizeGet(params *ColumnFamilyMetricsMemtableLiveDataSizeGetParams) (*ColumnFamilyMetricsMemtableLiveDataSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMemtableLiveDataSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMemtableLiveDataSizeGet", + Method: "GET", + PathPattern: "/column_family/metrics/memtable_live_data_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMemtableLiveDataSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMemtableLiveDataSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMemtableLiveDataSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMemtableOffHeapSizeByNameGet gets memtable off heap size + +Get the column family active memtable off heap size +*/ +func (a *Client) ColumnFamilyMetricsMemtableOffHeapSizeByNameGet(params *ColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams) (*ColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMemtableOffHeapSizeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMemtableOffHeapSizeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/memtable_off_heap_size/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMemtableOffHeapSizeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMemtableOffHeapSizeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMemtableOffHeapSizeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMemtableOffHeapSizeGet gets all memtable off heap size + +Get all active memtable of all column family off heap size +*/ +func (a *Client) ColumnFamilyMetricsMemtableOffHeapSizeGet(params *ColumnFamilyMetricsMemtableOffHeapSizeGetParams) (*ColumnFamilyMetricsMemtableOffHeapSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMemtableOffHeapSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMemtableOffHeapSizeGet", + Method: "GET", + PathPattern: "/column_family/metrics/memtable_off_heap_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMemtableOffHeapSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMemtableOffHeapSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMemtableOffHeapSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMemtableOnHeapSizeByNameGet gets memtable on heap size + +Get the column family active memtable on heap size +*/ +func (a *Client) ColumnFamilyMetricsMemtableOnHeapSizeByNameGet(params *ColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams) (*ColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMemtableOnHeapSizeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMemtableOnHeapSizeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/memtable_on_heap_size/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMemtableOnHeapSizeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMemtableOnHeapSizeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMemtableOnHeapSizeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMemtableOnHeapSizeGet gets all memtable on heap size + +Get all active memtable of all column family on heap size +*/ +func (a *Client) ColumnFamilyMetricsMemtableOnHeapSizeGet(params *ColumnFamilyMetricsMemtableOnHeapSizeGetParams) (*ColumnFamilyMetricsMemtableOnHeapSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMemtableOnHeapSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMemtableOnHeapSizeGet", + Method: "GET", + PathPattern: "/column_family/metrics/memtable_on_heap_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMemtableOnHeapSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMemtableOnHeapSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMemtableOnHeapSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMemtableSwitchCountByNameGet gets memtable switch count + +Get memtable switch count +*/ +func (a *Client) ColumnFamilyMetricsMemtableSwitchCountByNameGet(params *ColumnFamilyMetricsMemtableSwitchCountByNameGetParams) (*ColumnFamilyMetricsMemtableSwitchCountByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMemtableSwitchCountByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMemtableSwitchCountByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/memtable_switch_count/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMemtableSwitchCountByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMemtableSwitchCountByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMemtableSwitchCountByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMemtableSwitchCountGet gets all memtable switch count + +Get all memtable switch count +*/ +func (a *Client) ColumnFamilyMetricsMemtableSwitchCountGet(params *ColumnFamilyMetricsMemtableSwitchCountGetParams) (*ColumnFamilyMetricsMemtableSwitchCountGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMemtableSwitchCountGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMemtableSwitchCountGet", + Method: "GET", + PathPattern: "/column_family/metrics/memtable_switch_count", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMemtableSwitchCountGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMemtableSwitchCountGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMemtableSwitchCountGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMinRowSizeByNameGet gets min row size + +Get min row size +*/ +func (a *Client) ColumnFamilyMetricsMinRowSizeByNameGet(params *ColumnFamilyMetricsMinRowSizeByNameGetParams) (*ColumnFamilyMetricsMinRowSizeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMinRowSizeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMinRowSizeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/min_row_size/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMinRowSizeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMinRowSizeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMinRowSizeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsMinRowSizeGet gets all min row size + +Get all min row size +*/ +func (a *Client) ColumnFamilyMetricsMinRowSizeGet(params *ColumnFamilyMetricsMinRowSizeGetParams) (*ColumnFamilyMetricsMinRowSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsMinRowSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsMinRowSizeGet", + Method: "GET", + PathPattern: "/column_family/metrics/min_row_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsMinRowSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsMinRowSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsMinRowSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsPendingCompactionsByNameGet gets pending compactions + +Get pending compactions +*/ +func (a *Client) ColumnFamilyMetricsPendingCompactionsByNameGet(params *ColumnFamilyMetricsPendingCompactionsByNameGetParams) (*ColumnFamilyMetricsPendingCompactionsByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsPendingCompactionsByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsPendingCompactionsByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/pending_compactions/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsPendingCompactionsByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsPendingCompactionsByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsPendingCompactionsByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsPendingCompactionsGet gets all pending compactions + +Get all pending compactions +*/ +func (a *Client) ColumnFamilyMetricsPendingCompactionsGet(params *ColumnFamilyMetricsPendingCompactionsGetParams) (*ColumnFamilyMetricsPendingCompactionsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsPendingCompactionsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsPendingCompactionsGet", + Method: "GET", + PathPattern: "/column_family/metrics/pending_compactions", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsPendingCompactionsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsPendingCompactionsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsPendingCompactionsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsPendingFlushesByNameGet gets pending flushes + +Get pending flushes +*/ +func (a *Client) ColumnFamilyMetricsPendingFlushesByNameGet(params *ColumnFamilyMetricsPendingFlushesByNameGetParams) (*ColumnFamilyMetricsPendingFlushesByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsPendingFlushesByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsPendingFlushesByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/pending_flushes/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsPendingFlushesByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsPendingFlushesByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsPendingFlushesByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsPendingFlushesGet gets all pending flushes + +Get all pending flushes +*/ +func (a *Client) ColumnFamilyMetricsPendingFlushesGet(params *ColumnFamilyMetricsPendingFlushesGetParams) (*ColumnFamilyMetricsPendingFlushesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsPendingFlushesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsPendingFlushesGet", + Method: "GET", + PathPattern: "/column_family/metrics/pending_flushes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsPendingFlushesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsPendingFlushesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsPendingFlushesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRangeLatencyByNameGet gets range latency + +Get range latency +*/ +func (a *Client) ColumnFamilyMetricsRangeLatencyByNameGet(params *ColumnFamilyMetricsRangeLatencyByNameGetParams) (*ColumnFamilyMetricsRangeLatencyByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRangeLatencyByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRangeLatencyByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/range_latency/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRangeLatencyByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRangeLatencyByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRangeLatencyByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGet gets range latency estimated histogram + +Get range latency +*/ +func (a *Client) ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGet(params *ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/range_latency/estimated_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRangeLatencyEstimatedHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGet gets range latency estimated recent histogram + +Get range latency +*/ +func (a *Client) ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/range_latency/estimated_recent_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRangeLatencyEstimatedRecentHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRangeLatencyGet gets all range latency + +Get all range latency +*/ +func (a *Client) ColumnFamilyMetricsRangeLatencyGet(params *ColumnFamilyMetricsRangeLatencyGetParams) (*ColumnFamilyMetricsRangeLatencyGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRangeLatencyGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRangeLatencyGet", + Method: "GET", + PathPattern: "/column_family/metrics/range_latency", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRangeLatencyGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRangeLatencyGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRangeLatencyGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsReadByNameGet gets read + +Get number of reads +*/ +func (a *Client) ColumnFamilyMetricsReadByNameGet(params *ColumnFamilyMetricsReadByNameGetParams) (*ColumnFamilyMetricsReadByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsReadByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsReadByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/read/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsReadByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsReadByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsReadByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsReadGet gets all read + +Get number of reads from all column family, per shard +*/ +func (a *Client) ColumnFamilyMetricsReadGet(params *ColumnFamilyMetricsReadGetParams) (*ColumnFamilyMetricsReadGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsReadGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsReadGet", + Method: "GET", + PathPattern: "/column_family/metrics/read/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsReadGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsReadGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsReadGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsReadLatencyByNameGet gets read latency + +Get read latency +*/ +func (a *Client) ColumnFamilyMetricsReadLatencyByNameGet(params *ColumnFamilyMetricsReadLatencyByNameGetParams) (*ColumnFamilyMetricsReadLatencyByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsReadLatencyByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsReadLatencyByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/read_latency/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsReadLatencyByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsReadLatencyByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsReadLatencyByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGet gets read latency estimated histogram + +Get read latency +*/ +func (a *Client) ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGet(params *ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/read_latency/estimated_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsReadLatencyEstimatedHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGet gets read latency estimated recent histogram + +Get read latency +*/ +func (a *Client) ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/read_latency/estimated_recent_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsReadLatencyEstimatedRecentHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsReadLatencyGet gets all read latency + +Get all read latency +*/ +func (a *Client) ColumnFamilyMetricsReadLatencyGet(params *ColumnFamilyMetricsReadLatencyGetParams) (*ColumnFamilyMetricsReadLatencyGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsReadLatencyGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsReadLatencyGet", + Method: "GET", + PathPattern: "/column_family/metrics/read_latency", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsReadLatencyGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsReadLatencyGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsReadLatencyGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsReadLatencyHistogramByNameGet gets read latency histogram depricated + +Get read latency histogram +*/ +func (a *Client) ColumnFamilyMetricsReadLatencyHistogramByNameGet(params *ColumnFamilyMetricsReadLatencyHistogramByNameGetParams) (*ColumnFamilyMetricsReadLatencyHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsReadLatencyHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsReadLatencyHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/read_latency/histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsReadLatencyHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsReadLatencyHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsReadLatencyHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsReadLatencyHistogramGet gets all read latency histogram depricated + +Get read latency histogram from all column family +*/ +func (a *Client) ColumnFamilyMetricsReadLatencyHistogramGet(params *ColumnFamilyMetricsReadLatencyHistogramGetParams) (*ColumnFamilyMetricsReadLatencyHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsReadLatencyHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsReadLatencyHistogramGet", + Method: "GET", + PathPattern: "/column_family/metrics/read_latency/histogram/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsReadLatencyHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsReadLatencyHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsReadLatencyHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGet gets read latency histogram + +Get read latency moving avrage histogram +*/ +func (a *Client) ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGet(params *ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams) (*ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/read_latency/moving_average_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsReadLatencyMovingAverageHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsReadLatencyMovingAverageHistogramGet gets all read latency histogram + +Get read latency moving avrage histogram from all column family +*/ +func (a *Client) ColumnFamilyMetricsReadLatencyMovingAverageHistogramGet(params *ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams) (*ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsReadLatencyMovingAverageHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsReadLatencyMovingAverageHistogramGet", + Method: "GET", + PathPattern: "/column_family/metrics/read_latency/moving_average_histogram/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsReadLatencyMovingAverageHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGet gets recent bloom filter false positives + +Get recent bloom filter false positives +*/ +func (a *Client) ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGet(params *ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams) (*ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/recent_bloom_filter_false_positives/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRecentBloomFilterFalsePositivesByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalsePositivesGet gets all recent bloom filter false positives + +Get all recent bloom filter false positives +*/ +func (a *Client) ColumnFamilyMetricsRecentBloomFilterFalsePositivesGet(params *ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams) (*ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRecentBloomFilterFalsePositivesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRecentBloomFilterFalsePositivesGet", + Method: "GET", + PathPattern: "/column_family/metrics/recent_bloom_filter_false_positives", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRecentBloomFilterFalsePositivesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGet gets recent bloom filter false ratio + +Get recent bloom filter false ratio +*/ +func (a *Client) ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGet(params *ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams) (*ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/recent_bloom_filter_false_ratio/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRecentBloomFilterFalseRatioByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRecentBloomFilterFalseRatioGet gets all recent bloom filter false ratio + +Get all recent bloom filter false ratio +*/ +func (a *Client) ColumnFamilyMetricsRecentBloomFilterFalseRatioGet(params *ColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams) (*ColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRecentBloomFilterFalseRatioGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRecentBloomFilterFalseRatioGet", + Method: "GET", + PathPattern: "/column_family/metrics/recent_bloom_filter_false_ratio", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRecentBloomFilterFalseRatioGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRecentBloomFilterFalseRatioGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRecentBloomFilterFalseRatioGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRowCacheHitByNameGet gets row cache hit + +Get row cache hit +*/ +func (a *Client) ColumnFamilyMetricsRowCacheHitByNameGet(params *ColumnFamilyMetricsRowCacheHitByNameGetParams) (*ColumnFamilyMetricsRowCacheHitByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRowCacheHitByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRowCacheHitByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/row_cache_hit/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRowCacheHitByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRowCacheHitByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRowCacheHitByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRowCacheHitGet gets all row cache hit + +Get all row cache hit +*/ +func (a *Client) ColumnFamilyMetricsRowCacheHitGet(params *ColumnFamilyMetricsRowCacheHitGetParams) (*ColumnFamilyMetricsRowCacheHitGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRowCacheHitGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRowCacheHitGet", + Method: "GET", + PathPattern: "/column_family/metrics/row_cache_hit", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRowCacheHitGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRowCacheHitGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRowCacheHitGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGet gets row cache hit out of range + +Get row cache hit out of range +*/ +func (a *Client) ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGet(params *ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams) (*ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/row_cache_hit_out_of_range/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRowCacheHitOutOfRangeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRowCacheHitOutOfRangeGet gets all row cache hit out of range + +Get all row cache hit out of range +*/ +func (a *Client) ColumnFamilyMetricsRowCacheHitOutOfRangeGet(params *ColumnFamilyMetricsRowCacheHitOutOfRangeGetParams) (*ColumnFamilyMetricsRowCacheHitOutOfRangeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRowCacheHitOutOfRangeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRowCacheHitOutOfRangeGet", + Method: "GET", + PathPattern: "/column_family/metrics/row_cache_hit_out_of_range", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRowCacheHitOutOfRangeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRowCacheHitOutOfRangeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRowCacheHitOutOfRangeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRowCacheMissByNameGet gets row cache miss + +Get row cache miss +*/ +func (a *Client) ColumnFamilyMetricsRowCacheMissByNameGet(params *ColumnFamilyMetricsRowCacheMissByNameGetParams) (*ColumnFamilyMetricsRowCacheMissByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRowCacheMissByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRowCacheMissByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/row_cache_miss/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRowCacheMissByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRowCacheMissByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRowCacheMissByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsRowCacheMissGet gets all row cache miss + +Get all row cache miss +*/ +func (a *Client) ColumnFamilyMetricsRowCacheMissGet(params *ColumnFamilyMetricsRowCacheMissGetParams) (*ColumnFamilyMetricsRowCacheMissGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsRowCacheMissGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsRowCacheMissGet", + Method: "GET", + PathPattern: "/column_family/metrics/row_cache_miss", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsRowCacheMissGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsRowCacheMissGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsRowCacheMissGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsSnapshotsSizeByNameGet gets true snapshots size + +Get true snapshots size +*/ +func (a *Client) ColumnFamilyMetricsSnapshotsSizeByNameGet(params *ColumnFamilyMetricsSnapshotsSizeByNameGetParams) (*ColumnFamilyMetricsSnapshotsSizeByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsSnapshotsSizeByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsSnapshotsSizeByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/snapshots_size/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsSnapshotsSizeByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsSnapshotsSizeByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsSnapshotsSizeByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsSpeculativeRetriesByNameGet gets speculative retries + +Get speculative retries +*/ +func (a *Client) ColumnFamilyMetricsSpeculativeRetriesByNameGet(params *ColumnFamilyMetricsSpeculativeRetriesByNameGetParams) (*ColumnFamilyMetricsSpeculativeRetriesByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsSpeculativeRetriesByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsSpeculativeRetriesByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/speculative_retries/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsSpeculativeRetriesByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsSpeculativeRetriesByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsSpeculativeRetriesByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsSpeculativeRetriesGet gets all speculative retries + +Get all speculative retries +*/ +func (a *Client) ColumnFamilyMetricsSpeculativeRetriesGet(params *ColumnFamilyMetricsSpeculativeRetriesGetParams) (*ColumnFamilyMetricsSpeculativeRetriesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsSpeculativeRetriesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsSpeculativeRetriesGet", + Method: "GET", + PathPattern: "/column_family/metrics/speculative_retries", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsSpeculativeRetriesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsSpeculativeRetriesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsSpeculativeRetriesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsSstablesPerReadHistogramByNameGet gets sstables per read histogram + +Get sstables per read histogram +*/ +func (a *Client) ColumnFamilyMetricsSstablesPerReadHistogramByNameGet(params *ColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams) (*ColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsSstablesPerReadHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsSstablesPerReadHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/sstables_per_read_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsSstablesPerReadHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsSstablesPerReadHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsSstablesPerReadHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsTombstoneScannedHistogramByNameGet gets tombstone scanned histogram + +Get tombstone scanned histogram +*/ +func (a *Client) ColumnFamilyMetricsTombstoneScannedHistogramByNameGet(params *ColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams) (*ColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsTombstoneScannedHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsTombstoneScannedHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/tombstone_scanned_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsTombstoneScannedHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsTombstoneScannedHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsTombstoneScannedHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsTotalDiskSpaceUsedByNameGet gets total disk space used + +Get total disk space used +*/ +func (a *Client) ColumnFamilyMetricsTotalDiskSpaceUsedByNameGet(params *ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams) (*ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsTotalDiskSpaceUsedByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsTotalDiskSpaceUsedByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/total_disk_space_used/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsTotalDiskSpaceUsedByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsTotalDiskSpaceUsedGet gets all total disk space used + +Get all total disk space used +*/ +func (a *Client) ColumnFamilyMetricsTotalDiskSpaceUsedGet(params *ColumnFamilyMetricsTotalDiskSpaceUsedGetParams) (*ColumnFamilyMetricsTotalDiskSpaceUsedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsTotalDiskSpaceUsedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsTotalDiskSpaceUsedGet", + Method: "GET", + PathPattern: "/column_family/metrics/total_disk_space_used", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsTotalDiskSpaceUsedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsTotalDiskSpaceUsedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsTotalDiskSpaceUsedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsTrueSnapshotsSizeGet gets all true snapshots size + +Get all true snapshots size +*/ +func (a *Client) ColumnFamilyMetricsTrueSnapshotsSizeGet(params *ColumnFamilyMetricsTrueSnapshotsSizeGetParams) (*ColumnFamilyMetricsTrueSnapshotsSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsTrueSnapshotsSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsTrueSnapshotsSizeGet", + Method: "GET", + PathPattern: "/column_family/metrics/true_snapshots_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsTrueSnapshotsSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsTrueSnapshotsSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsTrueSnapshotsSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWaitingOnFreeMemtableGet gets waiting on free memtable space + +Get waiting on free memtable space +*/ +func (a *Client) ColumnFamilyMetricsWaitingOnFreeMemtableGet(params *ColumnFamilyMetricsWaitingOnFreeMemtableGetParams) (*ColumnFamilyMetricsWaitingOnFreeMemtableGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWaitingOnFreeMemtableGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWaitingOnFreeMemtableGet", + Method: "GET", + PathPattern: "/column_family/metrics/waiting_on_free_memtable", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWaitingOnFreeMemtableGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWaitingOnFreeMemtableGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWaitingOnFreeMemtableGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWriteByNameGet gets write + +Get number of writes +*/ +func (a *Client) ColumnFamilyMetricsWriteByNameGet(params *ColumnFamilyMetricsWriteByNameGetParams) (*ColumnFamilyMetricsWriteByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWriteByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWriteByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/write/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWriteByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWriteByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWriteByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWriteGet gets all write + +Get number of writes from all column family, per shard +*/ +func (a *Client) ColumnFamilyMetricsWriteGet(params *ColumnFamilyMetricsWriteGetParams) (*ColumnFamilyMetricsWriteGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWriteGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWriteGet", + Method: "GET", + PathPattern: "/column_family/metrics/write/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWriteGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWriteGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWriteGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWriteLatencyByNameGet gets write latency + +Get write latency +*/ +func (a *Client) ColumnFamilyMetricsWriteLatencyByNameGet(params *ColumnFamilyMetricsWriteLatencyByNameGetParams) (*ColumnFamilyMetricsWriteLatencyByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWriteLatencyByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWriteLatencyByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/write_latency/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWriteLatencyByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWriteLatencyByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWriteLatencyByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGet gets write latency estimated histogram + +Get write latency +*/ +func (a *Client) ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGet(params *ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams) (*ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/write_latency/estimated_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWriteLatencyEstimatedHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGet gets write latency estimated recent histogram + +Get write latency +*/ +func (a *Client) ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGet(params *ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams) (*ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/write_latency/estimated_recent_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWriteLatencyEstimatedRecentHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWriteLatencyGet gets all write latency + +Get all write latency +*/ +func (a *Client) ColumnFamilyMetricsWriteLatencyGet(params *ColumnFamilyMetricsWriteLatencyGetParams) (*ColumnFamilyMetricsWriteLatencyGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWriteLatencyGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWriteLatencyGet", + Method: "GET", + PathPattern: "/column_family/metrics/write_latency", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWriteLatencyGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWriteLatencyGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWriteLatencyGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWriteLatencyHistogramByNameGet gets write latency histogram depricated + +Get write latency histogram +*/ +func (a *Client) ColumnFamilyMetricsWriteLatencyHistogramByNameGet(params *ColumnFamilyMetricsWriteLatencyHistogramByNameGetParams) (*ColumnFamilyMetricsWriteLatencyHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWriteLatencyHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWriteLatencyHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/write_latency/histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWriteLatencyHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWriteLatencyHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWriteLatencyHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWriteLatencyHistogramGet gets all write latency histogram depricated + +Get write latency histogram of all column family +*/ +func (a *Client) ColumnFamilyMetricsWriteLatencyHistogramGet(params *ColumnFamilyMetricsWriteLatencyHistogramGetParams) (*ColumnFamilyMetricsWriteLatencyHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWriteLatencyHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWriteLatencyHistogramGet", + Method: "GET", + PathPattern: "/column_family/metrics/write_latency/histogram/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWriteLatencyHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWriteLatencyHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWriteLatencyHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGet gets write latency histogram + +Get write latency moving average histogram +*/ +func (a *Client) ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGet(params *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams) (*ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGet", + Method: "GET", + PathPattern: "/column_family/metrics/write_latency/moving_average_histogram/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWriteLatencyMovingAverageHistogramByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGet gets all write latency histogram + +Get write latency moving average histogram of all column family +*/ +func (a *Client) ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGet(params *ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams) (*ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGet", + Method: "GET", + PathPattern: "/column_family/metrics/write_latency/moving_average_histogram/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMetricsWriteLatencyMovingAverageHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMinimumCompactionByNameGet gets minimum compaction threshold + +get the minimum number of sstables in queue before compaction kicks off +*/ +func (a *Client) ColumnFamilyMinimumCompactionByNameGet(params *ColumnFamilyMinimumCompactionByNameGetParams) (*ColumnFamilyMinimumCompactionByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMinimumCompactionByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMinimumCompactionByNameGet", + Method: "GET", + PathPattern: "/column_family/minimum_compaction/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMinimumCompactionByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMinimumCompactionByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMinimumCompactionByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyMinimumCompactionByNamePost sets minimum compaction threshold + +Sets the minimum number of sstables in queue before compaction kicks off +*/ +func (a *Client) ColumnFamilyMinimumCompactionByNamePost(params *ColumnFamilyMinimumCompactionByNamePostParams) (*ColumnFamilyMinimumCompactionByNamePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyMinimumCompactionByNamePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyMinimumCompactionByNamePost", + Method: "POST", + PathPattern: "/column_family/minimum_compaction/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyMinimumCompactionByNamePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyMinimumCompactionByNamePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyMinimumCompactionByNamePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyNameGet gets column family name + +Get a list of all column family names +*/ +func (a *Client) ColumnFamilyNameGet(params *ColumnFamilyNameGetParams) (*ColumnFamilyNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyNameGet", + Method: "GET", + PathPattern: "/column_family/name", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilyNameKeyspaceGet gets column family name keyspace + +Get a list of the key space names +*/ +func (a *Client) ColumnFamilyNameKeyspaceGet(params *ColumnFamilyNameKeyspaceGetParams) (*ColumnFamilyNameKeyspaceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilyNameKeyspaceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilyNameKeyspaceGet", + Method: "GET", + PathPattern: "/column_family/name/keyspace", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilyNameKeyspaceGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilyNameKeyspaceGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilyNameKeyspaceGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilySstablesByKeyByNameGet gets sstables for key + +Returns a list of sstable filenames that contain the given partition key on this node +*/ +func (a *Client) ColumnFamilySstablesByKeyByNameGet(params *ColumnFamilySstablesByKeyByNameGetParams) (*ColumnFamilySstablesByKeyByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilySstablesByKeyByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilySstablesByKeyByNameGet", + Method: "GET", + PathPattern: "/column_family/sstables/by_key/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilySstablesByKeyByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilySstablesByKeyByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilySstablesByKeyByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilySstablesPerLevelByNameGet gets sstable count per level + +sstable count for each level. empty unless leveled compaction is used +*/ +func (a *Client) ColumnFamilySstablesPerLevelByNameGet(params *ColumnFamilySstablesPerLevelByNameGetParams) (*ColumnFamilySstablesPerLevelByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilySstablesPerLevelByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilySstablesPerLevelByNameGet", + Method: "GET", + PathPattern: "/column_family/sstables/per_level/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilySstablesPerLevelByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilySstablesPerLevelByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilySstablesPerLevelByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +ColumnFamilySstablesUnleveledByNameGet gets unleveled sstables + +the number of SSTables in L0. Always return 0 if Leveled compaction is not enabled. +*/ +func (a *Client) ColumnFamilySstablesUnleveledByNameGet(params *ColumnFamilySstablesUnleveledByNameGetParams) (*ColumnFamilySstablesUnleveledByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewColumnFamilySstablesUnleveledByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "ColumnFamilySstablesUnleveledByNameGet", + Method: "GET", + PathPattern: "/column_family/sstables/unleveled/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ColumnFamilySstablesUnleveledByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*ColumnFamilySstablesUnleveledByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ColumnFamilySstablesUnleveledByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CommitLogMetricsWaitingOnCommitGet gets waiting on commit + +Get waiting on commit +*/ +func (a *Client) CommitLogMetricsWaitingOnCommitGet(params *CommitLogMetricsWaitingOnCommitGetParams) (*CommitLogMetricsWaitingOnCommitGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCommitLogMetricsWaitingOnCommitGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CommitLogMetricsWaitingOnCommitGet", + Method: "GET", + PathPattern: "/commit_log/metrics/waiting_on_commit", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CommitLogMetricsWaitingOnCommitGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CommitLogMetricsWaitingOnCommitGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CommitLogMetricsWaitingOnCommitGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CommitLogMetricsWaitingOnSegmentAllocationGet gets waiting on segment allocation + +Get waiting on segment allocation +*/ +func (a *Client) CommitLogMetricsWaitingOnSegmentAllocationGet(params *CommitLogMetricsWaitingOnSegmentAllocationGetParams) (*CommitLogMetricsWaitingOnSegmentAllocationGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCommitLogMetricsWaitingOnSegmentAllocationGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CommitLogMetricsWaitingOnSegmentAllocationGet", + Method: "GET", + PathPattern: "/commit_log/metrics/waiting_on_segment_allocation", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CommitLogMetricsWaitingOnSegmentAllocationGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CommitLogMetricsWaitingOnSegmentAllocationGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CommitLogMetricsWaitingOnSegmentAllocationGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CommitlogMetricsCompletedTasksGet gets completed tasks + +Get completed tasks +*/ +func (a *Client) CommitlogMetricsCompletedTasksGet(params *CommitlogMetricsCompletedTasksGetParams) (*CommitlogMetricsCompletedTasksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCommitlogMetricsCompletedTasksGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CommitlogMetricsCompletedTasksGet", + Method: "GET", + PathPattern: "/commitlog/metrics/completed_tasks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CommitlogMetricsCompletedTasksGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CommitlogMetricsCompletedTasksGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CommitlogMetricsCompletedTasksGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CommitlogMetricsPendingTasksGet gets pending tasks + +Get pending tasks +*/ +func (a *Client) CommitlogMetricsPendingTasksGet(params *CommitlogMetricsPendingTasksGetParams) (*CommitlogMetricsPendingTasksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCommitlogMetricsPendingTasksGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CommitlogMetricsPendingTasksGet", + Method: "GET", + PathPattern: "/commitlog/metrics/pending_tasks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CommitlogMetricsPendingTasksGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CommitlogMetricsPendingTasksGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CommitlogMetricsPendingTasksGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CommitlogMetricsTotalCommitLogSizeGet gets total commit log size + +Get total commit log size +*/ +func (a *Client) CommitlogMetricsTotalCommitLogSizeGet(params *CommitlogMetricsTotalCommitLogSizeGetParams) (*CommitlogMetricsTotalCommitLogSizeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCommitlogMetricsTotalCommitLogSizeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CommitlogMetricsTotalCommitLogSizeGet", + Method: "GET", + PathPattern: "/commitlog/metrics/total_commit_log_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CommitlogMetricsTotalCommitLogSizeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CommitlogMetricsTotalCommitLogSizeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CommitlogMetricsTotalCommitLogSizeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CommitlogRecoverByPathPost commitlogs recover + +Recover a single file +*/ +func (a *Client) CommitlogRecoverByPathPost(params *CommitlogRecoverByPathPostParams) (*CommitlogRecoverByPathPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCommitlogRecoverByPathPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CommitlogRecoverByPathPost", + Method: "POST", + PathPattern: "/commitlog/recover/{path}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CommitlogRecoverByPathPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CommitlogRecoverByPathPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CommitlogRecoverByPathPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CommitlogSegmentsActiveGet gets active segment names + +file names (not full paths) of active commit log segments (segments containing unflushed data) +*/ +func (a *Client) CommitlogSegmentsActiveGet(params *CommitlogSegmentsActiveGetParams) (*CommitlogSegmentsActiveGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCommitlogSegmentsActiveGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CommitlogSegmentsActiveGet", + Method: "GET", + PathPattern: "/commitlog/segments/active", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CommitlogSegmentsActiveGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CommitlogSegmentsActiveGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CommitlogSegmentsActiveGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CommitlogSegmentsArchivingGet gets archiving segment names + +Returns files which are pending for archival attempt. Does NOT include failed archive attempts +*/ +func (a *Client) CommitlogSegmentsArchivingGet(params *CommitlogSegmentsArchivingGetParams) (*CommitlogSegmentsArchivingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCommitlogSegmentsArchivingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CommitlogSegmentsArchivingGet", + Method: "GET", + PathPattern: "/commitlog/segments/archiving", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CommitlogSegmentsArchivingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CommitlogSegmentsArchivingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CommitlogSegmentsArchivingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CompactionManagerCompactionHistoryGet gets compaction history + +get List of the compaction history +*/ +func (a *Client) CompactionManagerCompactionHistoryGet(params *CompactionManagerCompactionHistoryGetParams) (*CompactionManagerCompactionHistoryGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCompactionManagerCompactionHistoryGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CompactionManagerCompactionHistoryGet", + Method: "GET", + PathPattern: "/compaction_manager/compaction_history", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CompactionManagerCompactionHistoryGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CompactionManagerCompactionHistoryGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CompactionManagerCompactionHistoryGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CompactionManagerCompactionInfoGet gets compaction info + +get a list of all active compaction info +*/ +func (a *Client) CompactionManagerCompactionInfoGet(params *CompactionManagerCompactionInfoGetParams) (*CompactionManagerCompactionInfoGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCompactionManagerCompactionInfoGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CompactionManagerCompactionInfoGet", + Method: "GET", + PathPattern: "/compaction_manager/compaction_info", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CompactionManagerCompactionInfoGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CompactionManagerCompactionInfoGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CompactionManagerCompactionInfoGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CompactionManagerCompactionsGet gets compactions + +get List of running compactions +*/ +func (a *Client) CompactionManagerCompactionsGet(params *CompactionManagerCompactionsGetParams) (*CompactionManagerCompactionsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCompactionManagerCompactionsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CompactionManagerCompactionsGet", + Method: "GET", + PathPattern: "/compaction_manager/compactions", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CompactionManagerCompactionsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CompactionManagerCompactionsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CompactionManagerCompactionsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CompactionManagerForceUserDefinedCompactionPost forces user defined compaction + +Triggers the compaction of user specified sstables. You can specify files from various keyspaces and columnfamilies. If you do so, user defined compaction is performed several times to the groups of files in the same keyspace/columnfamily. must contain keyspace and columnfamily name in path(for 2.1+) or file name itself. +*/ +func (a *Client) CompactionManagerForceUserDefinedCompactionPost(params *CompactionManagerForceUserDefinedCompactionPostParams) (*CompactionManagerForceUserDefinedCompactionPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCompactionManagerForceUserDefinedCompactionPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CompactionManagerForceUserDefinedCompactionPost", + Method: "POST", + PathPattern: "/compaction_manager/force_user_defined_compaction", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CompactionManagerForceUserDefinedCompactionPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CompactionManagerForceUserDefinedCompactionPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CompactionManagerForceUserDefinedCompactionPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CompactionManagerMetricsBytesCompactedGet gets bytes compacted + +Get bytes compacted +*/ +func (a *Client) CompactionManagerMetricsBytesCompactedGet(params *CompactionManagerMetricsBytesCompactedGetParams) (*CompactionManagerMetricsBytesCompactedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCompactionManagerMetricsBytesCompactedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CompactionManagerMetricsBytesCompactedGet", + Method: "GET", + PathPattern: "/compaction_manager/metrics/bytes_compacted", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CompactionManagerMetricsBytesCompactedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CompactionManagerMetricsBytesCompactedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CompactionManagerMetricsBytesCompactedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CompactionManagerMetricsCompletedTasksGet gets completed tasks1 + +Get completed tasks +*/ +func (a *Client) CompactionManagerMetricsCompletedTasksGet(params *CompactionManagerMetricsCompletedTasksGetParams) (*CompactionManagerMetricsCompletedTasksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCompactionManagerMetricsCompletedTasksGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CompactionManagerMetricsCompletedTasksGet", + Method: "GET", + PathPattern: "/compaction_manager/metrics/completed_tasks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CompactionManagerMetricsCompletedTasksGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CompactionManagerMetricsCompletedTasksGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CompactionManagerMetricsCompletedTasksGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CompactionManagerMetricsPendingTasksGet gets pending tasks1 + +Get pending tasks +*/ +func (a *Client) CompactionManagerMetricsPendingTasksGet(params *CompactionManagerMetricsPendingTasksGetParams) (*CompactionManagerMetricsPendingTasksGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCompactionManagerMetricsPendingTasksGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CompactionManagerMetricsPendingTasksGet", + Method: "GET", + PathPattern: "/compaction_manager/metrics/pending_tasks", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CompactionManagerMetricsPendingTasksGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CompactionManagerMetricsPendingTasksGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CompactionManagerMetricsPendingTasksGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CompactionManagerMetricsTotalCompactionsCompletedGet gets total compactions completed + +Get total compactions completed +*/ +func (a *Client) CompactionManagerMetricsTotalCompactionsCompletedGet(params *CompactionManagerMetricsTotalCompactionsCompletedGetParams) (*CompactionManagerMetricsTotalCompactionsCompletedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCompactionManagerMetricsTotalCompactionsCompletedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CompactionManagerMetricsTotalCompactionsCompletedGet", + Method: "GET", + PathPattern: "/compaction_manager/metrics/total_compactions_completed", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CompactionManagerMetricsTotalCompactionsCompletedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CompactionManagerMetricsTotalCompactionsCompletedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CompactionManagerMetricsTotalCompactionsCompletedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +CompactionManagerStopCompactionPost stops compaction + +Stop all running compaction-like tasks having the provided type +*/ +func (a *Client) CompactionManagerStopCompactionPost(params *CompactionManagerStopCompactionPostParams) (*CompactionManagerStopCompactionPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewCompactionManagerStopCompactionPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "CompactionManagerStopCompactionPost", + Method: "POST", + PathPattern: "/compaction_manager/stop_compaction", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &CompactionManagerStopCompactionPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*CompactionManagerStopCompactionPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*CompactionManagerStopCompactionPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FailureDetectorCountEndpointDownGet gets down endpoint count + +Get count down endpoint +*/ +func (a *Client) FailureDetectorCountEndpointDownGet(params *FailureDetectorCountEndpointDownGetParams) (*FailureDetectorCountEndpointDownGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFailureDetectorCountEndpointDownGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "FailureDetectorCountEndpointDownGet", + Method: "GET", + PathPattern: "/failure_detector/count/endpoint/down", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FailureDetectorCountEndpointDownGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FailureDetectorCountEndpointDownGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FailureDetectorCountEndpointDownGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FailureDetectorCountEndpointUpGet gets up endpoint count + +Get count up endpoint +*/ +func (a *Client) FailureDetectorCountEndpointUpGet(params *FailureDetectorCountEndpointUpGetParams) (*FailureDetectorCountEndpointUpGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFailureDetectorCountEndpointUpGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "FailureDetectorCountEndpointUpGet", + Method: "GET", + PathPattern: "/failure_detector/count/endpoint/up", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FailureDetectorCountEndpointUpGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FailureDetectorCountEndpointUpGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FailureDetectorCountEndpointUpGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FailureDetectorEndpointPhiValuesGet gets endpoint phi values + +Get end point phi values +*/ +func (a *Client) FailureDetectorEndpointPhiValuesGet(params *FailureDetectorEndpointPhiValuesGetParams) (*FailureDetectorEndpointPhiValuesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFailureDetectorEndpointPhiValuesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "FailureDetectorEndpointPhiValuesGet", + Method: "GET", + PathPattern: "/failure_detector/endpoint_phi_values", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FailureDetectorEndpointPhiValuesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FailureDetectorEndpointPhiValuesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FailureDetectorEndpointPhiValuesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FailureDetectorEndpointsGet gets all endpoint states + +Get all endpoint states +*/ +func (a *Client) FailureDetectorEndpointsGet(params *FailureDetectorEndpointsGetParams) (*FailureDetectorEndpointsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFailureDetectorEndpointsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "FailureDetectorEndpointsGet", + Method: "GET", + PathPattern: "/failure_detector/endpoints/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FailureDetectorEndpointsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FailureDetectorEndpointsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FailureDetectorEndpointsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FailureDetectorEndpointsStatesByAddrGet gets endpoint state + +Get endpoint states +*/ +func (a *Client) FailureDetectorEndpointsStatesByAddrGet(params *FailureDetectorEndpointsStatesByAddrGetParams) (*FailureDetectorEndpointsStatesByAddrGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFailureDetectorEndpointsStatesByAddrGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "FailureDetectorEndpointsStatesByAddrGet", + Method: "GET", + PathPattern: "/failure_detector/endpoints/states/{addr}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FailureDetectorEndpointsStatesByAddrGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FailureDetectorEndpointsStatesByAddrGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FailureDetectorEndpointsStatesByAddrGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FailureDetectorPhiGet gets phi convict threshold + +Get the phi convict threshold +*/ +func (a *Client) FailureDetectorPhiGet(params *FailureDetectorPhiGetParams) (*FailureDetectorPhiGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFailureDetectorPhiGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "FailureDetectorPhiGet", + Method: "GET", + PathPattern: "/failure_detector/phi", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FailureDetectorPhiGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FailureDetectorPhiGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FailureDetectorPhiGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FailureDetectorPhiPost sets phi convict threshold + +Set the phi convict threshold +*/ +func (a *Client) FailureDetectorPhiPost(params *FailureDetectorPhiPostParams) (*FailureDetectorPhiPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFailureDetectorPhiPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "FailureDetectorPhiPost", + Method: "POST", + PathPattern: "/failure_detector/phi", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FailureDetectorPhiPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FailureDetectorPhiPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FailureDetectorPhiPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FailureDetectorSimpleStatesGet gets simple states + +Get simple_states +*/ +func (a *Client) FailureDetectorSimpleStatesGet(params *FailureDetectorSimpleStatesGetParams) (*FailureDetectorSimpleStatesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFailureDetectorSimpleStatesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "FailureDetectorSimpleStatesGet", + Method: "GET", + PathPattern: "/failure_detector/simple_states", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FailureDetectorSimpleStatesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FailureDetectorSimpleStatesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FailureDetectorSimpleStatesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GossiperAssassinateByAddrPost assassinates endpoint + +Assassinate an end point +*/ +func (a *Client) GossiperAssassinateByAddrPost(params *GossiperAssassinateByAddrPostParams) (*GossiperAssassinateByAddrPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGossiperAssassinateByAddrPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "GossiperAssassinateByAddrPost", + Method: "POST", + PathPattern: "/gossiper/assassinate/{addr}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GossiperAssassinateByAddrPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*GossiperAssassinateByAddrPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*GossiperAssassinateByAddrPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GossiperDowntimeByAddrGet gets endpoint downtime + +Get the downtime of an end point +*/ +func (a *Client) GossiperDowntimeByAddrGet(params *GossiperDowntimeByAddrGetParams) (*GossiperDowntimeByAddrGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGossiperDowntimeByAddrGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "GossiperDowntimeByAddrGet", + Method: "GET", + PathPattern: "/gossiper/downtime/{addr}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GossiperDowntimeByAddrGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*GossiperDowntimeByAddrGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*GossiperDowntimeByAddrGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GossiperEndpointDownGet gets down endpoint + +Get the addreses of the down endpoints +*/ +func (a *Client) GossiperEndpointDownGet(params *GossiperEndpointDownGetParams) (*GossiperEndpointDownGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGossiperEndpointDownGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "GossiperEndpointDownGet", + Method: "GET", + PathPattern: "/gossiper/endpoint/down/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GossiperEndpointDownGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*GossiperEndpointDownGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*GossiperEndpointDownGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GossiperEndpointLiveGet gets live endpoint + +Get the addreses of live endpoints +*/ +func (a *Client) GossiperEndpointLiveGet(params *GossiperEndpointLiveGetParams) (*GossiperEndpointLiveGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGossiperEndpointLiveGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "GossiperEndpointLiveGet", + Method: "GET", + PathPattern: "/gossiper/endpoint/live/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GossiperEndpointLiveGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*GossiperEndpointLiveGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*GossiperEndpointLiveGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GossiperGenerationNumberByAddrGet gets current generation number + +Returns files which are pending for archival attempt. Does NOT include failed archive attempts +*/ +func (a *Client) GossiperGenerationNumberByAddrGet(params *GossiperGenerationNumberByAddrGetParams) (*GossiperGenerationNumberByAddrGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGossiperGenerationNumberByAddrGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "GossiperGenerationNumberByAddrGet", + Method: "GET", + PathPattern: "/gossiper/generation_number/{addr}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GossiperGenerationNumberByAddrGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*GossiperGenerationNumberByAddrGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*GossiperGenerationNumberByAddrGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +GossiperHeartBeatVersionByAddrGet gets current heart beat version + +Get heart beat version for a node +*/ +func (a *Client) GossiperHeartBeatVersionByAddrGet(params *GossiperHeartBeatVersionByAddrGetParams) (*GossiperHeartBeatVersionByAddrGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewGossiperHeartBeatVersionByAddrGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "GossiperHeartBeatVersionByAddrGet", + Method: "GET", + PathPattern: "/gossiper/heart_beat_version/{addr}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &GossiperHeartBeatVersionByAddrGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*GossiperHeartBeatVersionByAddrGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*GossiperHeartBeatVersionByAddrGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +HintedHandoffHintsDelete truncates all hints + +Truncate all the hints +*/ +func (a *Client) HintedHandoffHintsDelete(params *HintedHandoffHintsDeleteParams) (*HintedHandoffHintsDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewHintedHandoffHintsDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "HintedHandoffHintsDelete", + Method: "DELETE", + PathPattern: "/hinted_handoff/hints", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &HintedHandoffHintsDeleteReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*HintedHandoffHintsDeleteOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*HintedHandoffHintsDeleteDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +HintedHandoffHintsGet lists endpoints pending hints + +List all the endpoints that this node has hints for. +*/ +func (a *Client) HintedHandoffHintsGet(params *HintedHandoffHintsGetParams) (*HintedHandoffHintsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewHintedHandoffHintsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "HintedHandoffHintsGet", + Method: "GET", + PathPattern: "/hinted_handoff/hints", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &HintedHandoffHintsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*HintedHandoffHintsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*HintedHandoffHintsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +HintedHandoffMetricsCreateHintByAddrGet gets create hint count + +Get create hint count +*/ +func (a *Client) HintedHandoffMetricsCreateHintByAddrGet(params *HintedHandoffMetricsCreateHintByAddrGetParams) (*HintedHandoffMetricsCreateHintByAddrGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewHintedHandoffMetricsCreateHintByAddrGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "HintedHandoffMetricsCreateHintByAddrGet", + Method: "GET", + PathPattern: "/hinted_handoff/metrics/create_hint/{addr}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &HintedHandoffMetricsCreateHintByAddrGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*HintedHandoffMetricsCreateHintByAddrGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*HintedHandoffMetricsCreateHintByAddrGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +HintedHandoffMetricsNotStoredHintsByAddrGet gets not stored hints count + +Get not stored hints count +*/ +func (a *Client) HintedHandoffMetricsNotStoredHintsByAddrGet(params *HintedHandoffMetricsNotStoredHintsByAddrGetParams) (*HintedHandoffMetricsNotStoredHintsByAddrGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewHintedHandoffMetricsNotStoredHintsByAddrGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "HintedHandoffMetricsNotStoredHintsByAddrGet", + Method: "GET", + PathPattern: "/hinted_handoff/metrics/not_stored_hints/{addr}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &HintedHandoffMetricsNotStoredHintsByAddrGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*HintedHandoffMetricsNotStoredHintsByAddrGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*HintedHandoffMetricsNotStoredHintsByAddrGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +HintedHandoffPausePost pauses hints delivery + +pause hints delivery +*/ +func (a *Client) HintedHandoffPausePost(params *HintedHandoffPausePostParams) (*HintedHandoffPausePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewHintedHandoffPausePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "HintedHandoffPausePost", + Method: "POST", + PathPattern: "/hinted_handoff/pause", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &HintedHandoffPausePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*HintedHandoffPausePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*HintedHandoffPausePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +HintedHandoffSchedulePost schedules hint delivery + +force hint delivery to an endpoint +*/ +func (a *Client) HintedHandoffSchedulePost(params *HintedHandoffSchedulePostParams) (*HintedHandoffSchedulePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewHintedHandoffSchedulePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "HintedHandoffSchedulePost", + Method: "POST", + PathPattern: "/hinted_handoff/schedule", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &HintedHandoffSchedulePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*HintedHandoffSchedulePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*HintedHandoffSchedulePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +LsaCompactPost lsas compact + +Force compaction of all regions +*/ +func (a *Client) LsaCompactPost(params *LsaCompactPostParams) (*LsaCompactPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewLsaCompactPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "LsaCompactPost", + Method: "POST", + PathPattern: "/lsa/compact", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &LsaCompactPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*LsaCompactPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*LsaCompactPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +MessagingServiceMessagesDroppedByVerGet gets dropped messages by ver + +Get the number of dropped messages per verb +*/ +func (a *Client) MessagingServiceMessagesDroppedByVerGet(params *MessagingServiceMessagesDroppedByVerGetParams) (*MessagingServiceMessagesDroppedByVerGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMessagingServiceMessagesDroppedByVerGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "MessagingServiceMessagesDroppedByVerGet", + Method: "GET", + PathPattern: "/messaging_service/messages/dropped_by_ver", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &MessagingServiceMessagesDroppedByVerGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*MessagingServiceMessagesDroppedByVerGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*MessagingServiceMessagesDroppedByVerGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +MessagingServiceMessagesDroppedGet gets dropped messages + +Get the number of messages that were dropped before sending +*/ +func (a *Client) MessagingServiceMessagesDroppedGet(params *MessagingServiceMessagesDroppedGetParams) (*MessagingServiceMessagesDroppedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMessagingServiceMessagesDroppedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "MessagingServiceMessagesDroppedGet", + Method: "GET", + PathPattern: "/messaging_service/messages/dropped", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &MessagingServiceMessagesDroppedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*MessagingServiceMessagesDroppedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*MessagingServiceMessagesDroppedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +MessagingServiceMessagesExceptionGet gets exception messages + +Get the number of messages return with an exception +*/ +func (a *Client) MessagingServiceMessagesExceptionGet(params *MessagingServiceMessagesExceptionGetParams) (*MessagingServiceMessagesExceptionGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMessagingServiceMessagesExceptionGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "MessagingServiceMessagesExceptionGet", + Method: "GET", + PathPattern: "/messaging_service/messages/exception", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &MessagingServiceMessagesExceptionGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*MessagingServiceMessagesExceptionGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*MessagingServiceMessagesExceptionGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +MessagingServiceMessagesPendingGet gets pending messages + +Get the number of pending messages +*/ +func (a *Client) MessagingServiceMessagesPendingGet(params *MessagingServiceMessagesPendingGetParams) (*MessagingServiceMessagesPendingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMessagingServiceMessagesPendingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "MessagingServiceMessagesPendingGet", + Method: "GET", + PathPattern: "/messaging_service/messages/pending", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &MessagingServiceMessagesPendingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*MessagingServiceMessagesPendingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*MessagingServiceMessagesPendingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +MessagingServiceMessagesRepliedGet gets completed messages + +Get the number of replied messages +*/ +func (a *Client) MessagingServiceMessagesRepliedGet(params *MessagingServiceMessagesRepliedGetParams) (*MessagingServiceMessagesRepliedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMessagingServiceMessagesRepliedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "MessagingServiceMessagesRepliedGet", + Method: "GET", + PathPattern: "/messaging_service/messages/replied", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &MessagingServiceMessagesRepliedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*MessagingServiceMessagesRepliedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*MessagingServiceMessagesRepliedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +MessagingServiceMessagesRespondCompletedGet gets respond completed messages + +Get the number of completed respond messages +*/ +func (a *Client) MessagingServiceMessagesRespondCompletedGet(params *MessagingServiceMessagesRespondCompletedGetParams) (*MessagingServiceMessagesRespondCompletedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMessagingServiceMessagesRespondCompletedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "MessagingServiceMessagesRespondCompletedGet", + Method: "GET", + PathPattern: "/messaging_service/messages/respond_completed", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &MessagingServiceMessagesRespondCompletedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*MessagingServiceMessagesRespondCompletedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*MessagingServiceMessagesRespondCompletedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +MessagingServiceMessagesRespondPendingGet gets respond pending messages + +Get the number of messages waiting for respond +*/ +func (a *Client) MessagingServiceMessagesRespondPendingGet(params *MessagingServiceMessagesRespondPendingGetParams) (*MessagingServiceMessagesRespondPendingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMessagingServiceMessagesRespondPendingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "MessagingServiceMessagesRespondPendingGet", + Method: "GET", + PathPattern: "/messaging_service/messages/respond_pending", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &MessagingServiceMessagesRespondPendingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*MessagingServiceMessagesRespondPendingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*MessagingServiceMessagesRespondPendingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +MessagingServiceMessagesSentGet gets sent messages + +Get the number of sent messages +*/ +func (a *Client) MessagingServiceMessagesSentGet(params *MessagingServiceMessagesSentGetParams) (*MessagingServiceMessagesSentGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMessagingServiceMessagesSentGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "MessagingServiceMessagesSentGet", + Method: "GET", + PathPattern: "/messaging_service/messages/sent", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &MessagingServiceMessagesSentGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*MessagingServiceMessagesSentGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*MessagingServiceMessagesSentGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +MessagingServiceMessagesTimeoutGet gets timeout messages + +Get the number of timeout messages +*/ +func (a *Client) MessagingServiceMessagesTimeoutGet(params *MessagingServiceMessagesTimeoutGetParams) (*MessagingServiceMessagesTimeoutGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMessagingServiceMessagesTimeoutGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "MessagingServiceMessagesTimeoutGet", + Method: "GET", + PathPattern: "/messaging_service/messages/timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &MessagingServiceMessagesTimeoutGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*MessagingServiceMessagesTimeoutGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*MessagingServiceMessagesTimeoutGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +MessagingServiceVersionGet gets version + +Get the version number +*/ +func (a *Client) MessagingServiceVersionGet(params *MessagingServiceVersionGetParams) (*MessagingServiceVersionGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewMessagingServiceVersionGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "MessagingServiceVersionGet", + Method: "GET", + PathPattern: "/messaging_service/version", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &MessagingServiceVersionGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*MessagingServiceVersionGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*MessagingServiceVersionGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SnitchDatacenterGet gets datacenter + +Provides the Datacenter name depending on the respective snitch used, given the hostname/ip +*/ +func (a *Client) SnitchDatacenterGet(params *SnitchDatacenterGetParams) (*SnitchDatacenterGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSnitchDatacenterGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "SnitchDatacenterGet", + Method: "GET", + PathPattern: "/snitch/datacenter", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SnitchDatacenterGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*SnitchDatacenterGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*SnitchDatacenterGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SnitchNameGet gets snitch name + +Provides the snitch name of the cluster +*/ +func (a *Client) SnitchNameGet(params *SnitchNameGetParams) (*SnitchNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSnitchNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "SnitchNameGet", + Method: "GET", + PathPattern: "/snitch/name", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SnitchNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*SnitchNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*SnitchNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SnitchRackGet gets rack + +Provides the Rack name depending on the respective snitch used, given the host name/ip +*/ +func (a *Client) SnitchRackGet(params *SnitchRackGetParams) (*SnitchRackGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSnitchRackGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "SnitchRackGet", + Method: "GET", + PathPattern: "/snitch/rack", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SnitchRackGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*SnitchRackGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*SnitchRackGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyCasContentionTimeoutGet gets cas contention timeout + +Get CAS contention timeout in seconds +*/ +func (a *Client) StorageProxyCasContentionTimeoutGet(params *StorageProxyCasContentionTimeoutGetParams) (*StorageProxyCasContentionTimeoutGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyCasContentionTimeoutGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyCasContentionTimeoutGet", + Method: "GET", + PathPattern: "/storage_proxy/cas_contention_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyCasContentionTimeoutGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyCasContentionTimeoutGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyCasContentionTimeoutGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyCasContentionTimeoutPost sets cas contention timeout + +Set CAS contention timeout +*/ +func (a *Client) StorageProxyCasContentionTimeoutPost(params *StorageProxyCasContentionTimeoutPostParams) (*StorageProxyCasContentionTimeoutPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyCasContentionTimeoutPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyCasContentionTimeoutPost", + Method: "POST", + PathPattern: "/storage_proxy/cas_contention_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyCasContentionTimeoutPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyCasContentionTimeoutPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyCasContentionTimeoutPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyCounterWriteRPCTimeoutGet gets counter write rpc timeout + +Get counter write rpc timeout in seconds +*/ +func (a *Client) StorageProxyCounterWriteRPCTimeoutGet(params *StorageProxyCounterWriteRPCTimeoutGetParams) (*StorageProxyCounterWriteRPCTimeoutGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyCounterWriteRPCTimeoutGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyCounterWriteRpcTimeoutGet", + Method: "GET", + PathPattern: "/storage_proxy/counter_write_rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyCounterWriteRPCTimeoutGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyCounterWriteRPCTimeoutGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyCounterWriteRPCTimeoutGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyCounterWriteRPCTimeoutPost sets counter write rpc timeout + +Set counter write rpc timeout +*/ +func (a *Client) StorageProxyCounterWriteRPCTimeoutPost(params *StorageProxyCounterWriteRPCTimeoutPostParams) (*StorageProxyCounterWriteRPCTimeoutPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyCounterWriteRPCTimeoutPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyCounterWriteRpcTimeoutPost", + Method: "POST", + PathPattern: "/storage_proxy/counter_write_rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyCounterWriteRPCTimeoutPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyCounterWriteRPCTimeoutPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyCounterWriteRPCTimeoutPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyHintedHandoffEnabledByDcGet gets hinted handoff enabled by dc + +Get the hinted handoff enabled by dc +*/ +func (a *Client) StorageProxyHintedHandoffEnabledByDcGet(params *StorageProxyHintedHandoffEnabledByDcGetParams) (*StorageProxyHintedHandoffEnabledByDcGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyHintedHandoffEnabledByDcGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyHintedHandoffEnabledByDcGet", + Method: "GET", + PathPattern: "/storage_proxy/hinted_handoff_enabled_by_dc", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyHintedHandoffEnabledByDcGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyHintedHandoffEnabledByDcGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyHintedHandoffEnabledByDcGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyHintedHandoffEnabledByDcPost sets hinted handoff enabled by dc list + +Set the hinted handoff enabled by dc +*/ +func (a *Client) StorageProxyHintedHandoffEnabledByDcPost(params *StorageProxyHintedHandoffEnabledByDcPostParams) (*StorageProxyHintedHandoffEnabledByDcPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyHintedHandoffEnabledByDcPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyHintedHandoffEnabledByDcPost", + Method: "POST", + PathPattern: "/storage_proxy/hinted_handoff_enabled_by_dc", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyHintedHandoffEnabledByDcPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyHintedHandoffEnabledByDcPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyHintedHandoffEnabledByDcPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyHintedHandoffEnabledGet gets hinted handoff enabled + +Return true if hinted handoff enabled +*/ +func (a *Client) StorageProxyHintedHandoffEnabledGet(params *StorageProxyHintedHandoffEnabledGetParams) (*StorageProxyHintedHandoffEnabledGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyHintedHandoffEnabledGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyHintedHandoffEnabledGet", + Method: "GET", + PathPattern: "/storage_proxy/hinted_handoff_enabled", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyHintedHandoffEnabledGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyHintedHandoffEnabledGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyHintedHandoffEnabledGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyHintedHandoffEnabledPost sets hinted handoff enabled + +Set hinted handoff status +*/ +func (a *Client) StorageProxyHintedHandoffEnabledPost(params *StorageProxyHintedHandoffEnabledPostParams) (*StorageProxyHintedHandoffEnabledPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyHintedHandoffEnabledPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyHintedHandoffEnabledPost", + Method: "POST", + PathPattern: "/storage_proxy/hinted_handoff_enabled", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyHintedHandoffEnabledPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyHintedHandoffEnabledPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyHintedHandoffEnabledPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyHintsInProgressGet gets hints in progress + +get hints in progress +*/ +func (a *Client) StorageProxyHintsInProgressGet(params *StorageProxyHintsInProgressGetParams) (*StorageProxyHintsInProgressGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyHintsInProgressGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyHintsInProgressGet", + Method: "GET", + PathPattern: "/storage_proxy/hints_in_progress", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyHintsInProgressGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyHintsInProgressGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyHintsInProgressGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMaxHintWindowGet gets max hint window + +Get the max hint window +*/ +func (a *Client) StorageProxyMaxHintWindowGet(params *StorageProxyMaxHintWindowGetParams) (*StorageProxyMaxHintWindowGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMaxHintWindowGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMaxHintWindowGet", + Method: "GET", + PathPattern: "/storage_proxy/max_hint_window", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMaxHintWindowGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMaxHintWindowGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMaxHintWindowGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMaxHintWindowPost sets max hint window + +Set the max hint window +*/ +func (a *Client) StorageProxyMaxHintWindowPost(params *StorageProxyMaxHintWindowPostParams) (*StorageProxyMaxHintWindowPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMaxHintWindowPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMaxHintWindowPost", + Method: "POST", + PathPattern: "/storage_proxy/max_hint_window", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMaxHintWindowPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMaxHintWindowPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMaxHintWindowPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMaxHintsInProgressGet gets max hints in progress + +Get max hints in progress +*/ +func (a *Client) StorageProxyMaxHintsInProgressGet(params *StorageProxyMaxHintsInProgressGetParams) (*StorageProxyMaxHintsInProgressGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMaxHintsInProgressGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMaxHintsInProgressGet", + Method: "GET", + PathPattern: "/storage_proxy/max_hints_in_progress", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMaxHintsInProgressGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMaxHintsInProgressGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMaxHintsInProgressGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMaxHintsInProgressPost sets max hints in progress + +Set max hints in progress +*/ +func (a *Client) StorageProxyMaxHintsInProgressPost(params *StorageProxyMaxHintsInProgressPostParams) (*StorageProxyMaxHintsInProgressPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMaxHintsInProgressPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMaxHintsInProgressPost", + Method: "POST", + PathPattern: "/storage_proxy/max_hints_in_progress", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMaxHintsInProgressPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMaxHintsInProgressPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMaxHintsInProgressPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsCasReadConditionNotMetGet gets cas read metrics condition not met + +Get cas read metrics +*/ +func (a *Client) StorageProxyMetricsCasReadConditionNotMetGet(params *StorageProxyMetricsCasReadConditionNotMetGetParams) (*StorageProxyMetricsCasReadConditionNotMetGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsCasReadConditionNotMetGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsCasReadConditionNotMetGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/cas_read/condition_not_met", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsCasReadConditionNotMetGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsCasReadConditionNotMetGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsCasReadConditionNotMetGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsCasReadContentionGet gets cas read metrics contention + +Get cas read metrics +*/ +func (a *Client) StorageProxyMetricsCasReadContentionGet(params *StorageProxyMetricsCasReadContentionGetParams) (*StorageProxyMetricsCasReadContentionGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsCasReadContentionGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsCasReadContentionGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/cas_read/contention", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsCasReadContentionGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsCasReadContentionGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsCasReadContentionGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsCasReadTimeoutsGet gets cas read timeouts + +Get CAS read timeout +*/ +func (a *Client) StorageProxyMetricsCasReadTimeoutsGet(params *StorageProxyMetricsCasReadTimeoutsGetParams) (*StorageProxyMetricsCasReadTimeoutsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsCasReadTimeoutsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsCasReadTimeoutsGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/cas_read/timeouts", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsCasReadTimeoutsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsCasReadTimeoutsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsCasReadTimeoutsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsCasReadUnavailablesGet gets cas read unavailables + +Get CAS read unavailables +*/ +func (a *Client) StorageProxyMetricsCasReadUnavailablesGet(params *StorageProxyMetricsCasReadUnavailablesGetParams) (*StorageProxyMetricsCasReadUnavailablesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsCasReadUnavailablesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsCasReadUnavailablesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/cas_read/unavailables", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsCasReadUnavailablesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsCasReadUnavailablesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsCasReadUnavailablesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsCasReadUnfinishedCommitGet gets cas read metrics unfinished commit + +Get cas read metrics +*/ +func (a *Client) StorageProxyMetricsCasReadUnfinishedCommitGet(params *StorageProxyMetricsCasReadUnfinishedCommitGetParams) (*StorageProxyMetricsCasReadUnfinishedCommitGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsCasReadUnfinishedCommitGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsCasReadUnfinishedCommitGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/cas_read/unfinished_commit", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsCasReadUnfinishedCommitGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsCasReadUnfinishedCommitGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsCasReadUnfinishedCommitGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsCasWriteConditionNotMetGet gets cas write metrics condition not met + +Get cas write metrics +*/ +func (a *Client) StorageProxyMetricsCasWriteConditionNotMetGet(params *StorageProxyMetricsCasWriteConditionNotMetGetParams) (*StorageProxyMetricsCasWriteConditionNotMetGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsCasWriteConditionNotMetGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsCasWriteConditionNotMetGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/cas_write/condition_not_met", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsCasWriteConditionNotMetGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsCasWriteConditionNotMetGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsCasWriteConditionNotMetGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsCasWriteContentionGet gets cas write metrics contention + +Get cas write metrics +*/ +func (a *Client) StorageProxyMetricsCasWriteContentionGet(params *StorageProxyMetricsCasWriteContentionGetParams) (*StorageProxyMetricsCasWriteContentionGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsCasWriteContentionGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsCasWriteContentionGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/cas_write/contention", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsCasWriteContentionGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsCasWriteContentionGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsCasWriteContentionGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsCasWriteTimeoutsGet gets cas write timeouts + +Get CAS write timeout +*/ +func (a *Client) StorageProxyMetricsCasWriteTimeoutsGet(params *StorageProxyMetricsCasWriteTimeoutsGetParams) (*StorageProxyMetricsCasWriteTimeoutsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsCasWriteTimeoutsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsCasWriteTimeoutsGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/cas_write/timeouts", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsCasWriteTimeoutsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsCasWriteTimeoutsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsCasWriteTimeoutsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsCasWriteUnavailablesGet gets cas write unavailables + +Get CAS write unavailables +*/ +func (a *Client) StorageProxyMetricsCasWriteUnavailablesGet(params *StorageProxyMetricsCasWriteUnavailablesGetParams) (*StorageProxyMetricsCasWriteUnavailablesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsCasWriteUnavailablesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsCasWriteUnavailablesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/cas_write/unavailables", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsCasWriteUnavailablesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsCasWriteUnavailablesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsCasWriteUnavailablesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsCasWriteUnfinishedCommitGet gets cas write metrics unfinished commit + +Get cas write metrics +*/ +func (a *Client) StorageProxyMetricsCasWriteUnfinishedCommitGet(params *StorageProxyMetricsCasWriteUnfinishedCommitGetParams) (*StorageProxyMetricsCasWriteUnfinishedCommitGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsCasWriteUnfinishedCommitGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsCasWriteUnfinishedCommitGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/cas_write/unfinished_commit", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsCasWriteUnfinishedCommitGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsCasWriteUnfinishedCommitGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsCasWriteUnfinishedCommitGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsRangeEstimatedHistogramGet gets range estimated histogram + +Get range estimated latency +*/ +func (a *Client) StorageProxyMetricsRangeEstimatedHistogramGet(params *StorageProxyMetricsRangeEstimatedHistogramGetParams) (*StorageProxyMetricsRangeEstimatedHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsRangeEstimatedHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsRangeEstimatedHistogramGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/range/estimated_histogram/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsRangeEstimatedHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsRangeEstimatedHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsRangeEstimatedHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsRangeGet gets range latency + +Get range latency +*/ +func (a *Client) StorageProxyMetricsRangeGet(params *StorageProxyMetricsRangeGetParams) (*StorageProxyMetricsRangeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsRangeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsRangeGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/range", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsRangeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsRangeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsRangeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsRangeHistogramGet gets range metrics latency histogram depricated + +Get range metrics +*/ +func (a *Client) StorageProxyMetricsRangeHistogramGet(params *StorageProxyMetricsRangeHistogramGetParams) (*StorageProxyMetricsRangeHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsRangeHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsRangeHistogramGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/range/histogram", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsRangeHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsRangeHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsRangeHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsRangeMovingAverageHistogramGet gets range metrics latency histogram + +Get range metrics rate and histogram +*/ +func (a *Client) StorageProxyMetricsRangeMovingAverageHistogramGet(params *StorageProxyMetricsRangeMovingAverageHistogramGetParams) (*StorageProxyMetricsRangeMovingAverageHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsRangeMovingAverageHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsRangeMovingAverageHistogramGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/range/moving_average_histogram", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsRangeMovingAverageHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsRangeMovingAverageHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsRangeMovingAverageHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsRangeTimeoutsGet gets range metrics timeouts + +Get range metrics +*/ +func (a *Client) StorageProxyMetricsRangeTimeoutsGet(params *StorageProxyMetricsRangeTimeoutsGetParams) (*StorageProxyMetricsRangeTimeoutsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsRangeTimeoutsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsRangeTimeoutsGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/range/timeouts", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsRangeTimeoutsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsRangeTimeoutsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsRangeTimeoutsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsRangeTimeoutsRatesGet gets range metrics timeouts rates + +Get range metrics rates +*/ +func (a *Client) StorageProxyMetricsRangeTimeoutsRatesGet(params *StorageProxyMetricsRangeTimeoutsRatesGetParams) (*StorageProxyMetricsRangeTimeoutsRatesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsRangeTimeoutsRatesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsRangeTimeoutsRatesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/range/timeouts_rates", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsRangeTimeoutsRatesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsRangeTimeoutsRatesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsRangeTimeoutsRatesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsRangeUnavailablesGet gets range metrics unavailables + +Get range metrics +*/ +func (a *Client) StorageProxyMetricsRangeUnavailablesGet(params *StorageProxyMetricsRangeUnavailablesGetParams) (*StorageProxyMetricsRangeUnavailablesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsRangeUnavailablesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsRangeUnavailablesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/range/unavailables", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsRangeUnavailablesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsRangeUnavailablesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsRangeUnavailablesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsRangeUnavailablesRatesGet gets range metrics unavailables rates + +Get range metrics rates +*/ +func (a *Client) StorageProxyMetricsRangeUnavailablesRatesGet(params *StorageProxyMetricsRangeUnavailablesRatesGetParams) (*StorageProxyMetricsRangeUnavailablesRatesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsRangeUnavailablesRatesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsRangeUnavailablesRatesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/range/unavailables_rates", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsRangeUnavailablesRatesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsRangeUnavailablesRatesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsRangeUnavailablesRatesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsReadEstimatedHistogramGet gets read estimated histogram + +Get read estimated latency +*/ +func (a *Client) StorageProxyMetricsReadEstimatedHistogramGet(params *StorageProxyMetricsReadEstimatedHistogramGetParams) (*StorageProxyMetricsReadEstimatedHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsReadEstimatedHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsReadEstimatedHistogramGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/read/estimated_histogram/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsReadEstimatedHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsReadEstimatedHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsReadEstimatedHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsReadGet gets read latency + +Get read latency +*/ +func (a *Client) StorageProxyMetricsReadGet(params *StorageProxyMetricsReadGetParams) (*StorageProxyMetricsReadGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsReadGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsReadGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/read", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsReadGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsReadGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsReadGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsReadHistogramGet gets read metrics latency histogram depricated + +Get read metrics +*/ +func (a *Client) StorageProxyMetricsReadHistogramGet(params *StorageProxyMetricsReadHistogramGetParams) (*StorageProxyMetricsReadHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsReadHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsReadHistogramGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/read/histogram", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsReadHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsReadHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsReadHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsReadMovingAverageHistogramGet gets read metrics latency histogram + +Get read metrics +*/ +func (a *Client) StorageProxyMetricsReadMovingAverageHistogramGet(params *StorageProxyMetricsReadMovingAverageHistogramGetParams) (*StorageProxyMetricsReadMovingAverageHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsReadMovingAverageHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsReadMovingAverageHistogramGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/read/moving_average_histogram", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsReadMovingAverageHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsReadMovingAverageHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsReadMovingAverageHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsReadTimeoutsGet gets read metrics timeouts + +Get read metrics +*/ +func (a *Client) StorageProxyMetricsReadTimeoutsGet(params *StorageProxyMetricsReadTimeoutsGetParams) (*StorageProxyMetricsReadTimeoutsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsReadTimeoutsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsReadTimeoutsGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/read/timeouts", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsReadTimeoutsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsReadTimeoutsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsReadTimeoutsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsReadTimeoutsRatesGet gets read metrics timeouts rates + +Get read metrics rates +*/ +func (a *Client) StorageProxyMetricsReadTimeoutsRatesGet(params *StorageProxyMetricsReadTimeoutsRatesGetParams) (*StorageProxyMetricsReadTimeoutsRatesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsReadTimeoutsRatesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsReadTimeoutsRatesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/read/timeouts_rates", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsReadTimeoutsRatesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsReadTimeoutsRatesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsReadTimeoutsRatesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsReadUnavailablesGet gets read metrics unavailables + +Get read metrics +*/ +func (a *Client) StorageProxyMetricsReadUnavailablesGet(params *StorageProxyMetricsReadUnavailablesGetParams) (*StorageProxyMetricsReadUnavailablesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsReadUnavailablesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsReadUnavailablesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/read/unavailables", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsReadUnavailablesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsReadUnavailablesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsReadUnavailablesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsReadUnavailablesRatesGet gets read metrics unavailables rates + +Get read metrics rates +*/ +func (a *Client) StorageProxyMetricsReadUnavailablesRatesGet(params *StorageProxyMetricsReadUnavailablesRatesGetParams) (*StorageProxyMetricsReadUnavailablesRatesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsReadUnavailablesRatesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsReadUnavailablesRatesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/read/unavailables_rates", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsReadUnavailablesRatesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsReadUnavailablesRatesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsReadUnavailablesRatesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsWriteEstimatedHistogramGet gets write estimated histogram + +Get write estimated latency +*/ +func (a *Client) StorageProxyMetricsWriteEstimatedHistogramGet(params *StorageProxyMetricsWriteEstimatedHistogramGetParams) (*StorageProxyMetricsWriteEstimatedHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsWriteEstimatedHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsWriteEstimatedHistogramGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/write/estimated_histogram/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsWriteEstimatedHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsWriteEstimatedHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsWriteEstimatedHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsWriteGet gets write latency + +Get write latency +*/ +func (a *Client) StorageProxyMetricsWriteGet(params *StorageProxyMetricsWriteGetParams) (*StorageProxyMetricsWriteGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsWriteGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsWriteGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/write", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsWriteGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsWriteGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsWriteGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsWriteHistogramGet gets write metrics latency histogram depricated + +Get write metrics +*/ +func (a *Client) StorageProxyMetricsWriteHistogramGet(params *StorageProxyMetricsWriteHistogramGetParams) (*StorageProxyMetricsWriteHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsWriteHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsWriteHistogramGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/write/histogram", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsWriteHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsWriteHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsWriteHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsWriteMovingAverageHistogramGet gets write metrics latency histogram + +Get write metrics +*/ +func (a *Client) StorageProxyMetricsWriteMovingAverageHistogramGet(params *StorageProxyMetricsWriteMovingAverageHistogramGetParams) (*StorageProxyMetricsWriteMovingAverageHistogramGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsWriteMovingAverageHistogramGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsWriteMovingAverageHistogramGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/write/moving_average_histogram", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsWriteMovingAverageHistogramGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsWriteMovingAverageHistogramGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsWriteMovingAverageHistogramGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsWriteTimeoutsGet gets write metrics timeouts + +Get write metrics +*/ +func (a *Client) StorageProxyMetricsWriteTimeoutsGet(params *StorageProxyMetricsWriteTimeoutsGetParams) (*StorageProxyMetricsWriteTimeoutsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsWriteTimeoutsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsWriteTimeoutsGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/write/timeouts", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsWriteTimeoutsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsWriteTimeoutsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsWriteTimeoutsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsWriteTimeoutsRatesGet gets write metrics timeouts rates + +Get write metrics rates +*/ +func (a *Client) StorageProxyMetricsWriteTimeoutsRatesGet(params *StorageProxyMetricsWriteTimeoutsRatesGetParams) (*StorageProxyMetricsWriteTimeoutsRatesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsWriteTimeoutsRatesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsWriteTimeoutsRatesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/write/timeouts_rates", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsWriteTimeoutsRatesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsWriteTimeoutsRatesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsWriteTimeoutsRatesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsWriteUnavailablesGet gets write metrics unavailables + +Get write metrics +*/ +func (a *Client) StorageProxyMetricsWriteUnavailablesGet(params *StorageProxyMetricsWriteUnavailablesGetParams) (*StorageProxyMetricsWriteUnavailablesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsWriteUnavailablesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsWriteUnavailablesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/write/unavailables", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsWriteUnavailablesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsWriteUnavailablesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsWriteUnavailablesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyMetricsWriteUnavailablesRatesGet gets write metrics unavailables rates + +Get write metrics rates +*/ +func (a *Client) StorageProxyMetricsWriteUnavailablesRatesGet(params *StorageProxyMetricsWriteUnavailablesRatesGetParams) (*StorageProxyMetricsWriteUnavailablesRatesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyMetricsWriteUnavailablesRatesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyMetricsWriteUnavailablesRatesGet", + Method: "GET", + PathPattern: "/storage_proxy/metrics/write/unavailables_rates", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyMetricsWriteUnavailablesRatesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyMetricsWriteUnavailablesRatesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyMetricsWriteUnavailablesRatesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyRangeRPCTimeoutGet gets range rpc timeout + +Get range rpc timeout in seconds +*/ +func (a *Client) StorageProxyRangeRPCTimeoutGet(params *StorageProxyRangeRPCTimeoutGetParams) (*StorageProxyRangeRPCTimeoutGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyRangeRPCTimeoutGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyRangeRpcTimeoutGet", + Method: "GET", + PathPattern: "/storage_proxy/range_rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyRangeRPCTimeoutGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyRangeRPCTimeoutGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyRangeRPCTimeoutGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyRangeRPCTimeoutPost sets range rpc timeout + +Set range rpc timeout +*/ +func (a *Client) StorageProxyRangeRPCTimeoutPost(params *StorageProxyRangeRPCTimeoutPostParams) (*StorageProxyRangeRPCTimeoutPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyRangeRPCTimeoutPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyRangeRpcTimeoutPost", + Method: "POST", + PathPattern: "/storage_proxy/range_rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyRangeRPCTimeoutPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyRangeRPCTimeoutPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyRangeRPCTimeoutPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyReadRepairAttemptedGet gets read repair attempted + +Get read repair attempted +*/ +func (a *Client) StorageProxyReadRepairAttemptedGet(params *StorageProxyReadRepairAttemptedGetParams) (*StorageProxyReadRepairAttemptedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyReadRepairAttemptedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyReadRepairAttemptedGet", + Method: "GET", + PathPattern: "/storage_proxy/read_repair_attempted", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyReadRepairAttemptedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyReadRepairAttemptedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyReadRepairAttemptedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyReadRepairRepairedBackgroundGet gets read repair repaired background + +Get read repair repaired background +*/ +func (a *Client) StorageProxyReadRepairRepairedBackgroundGet(params *StorageProxyReadRepairRepairedBackgroundGetParams) (*StorageProxyReadRepairRepairedBackgroundGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyReadRepairRepairedBackgroundGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyReadRepairRepairedBackgroundGet", + Method: "GET", + PathPattern: "/storage_proxy/read_repair_repaired_background", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyReadRepairRepairedBackgroundGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyReadRepairRepairedBackgroundGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyReadRepairRepairedBackgroundGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyReadRepairRepairedBlockingGet gets read repair repaired blocking + +Get read repair repaired blocking +*/ +func (a *Client) StorageProxyReadRepairRepairedBlockingGet(params *StorageProxyReadRepairRepairedBlockingGetParams) (*StorageProxyReadRepairRepairedBlockingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyReadRepairRepairedBlockingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyReadRepairRepairedBlockingGet", + Method: "GET", + PathPattern: "/storage_proxy/read_repair_repaired_blocking", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyReadRepairRepairedBlockingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyReadRepairRepairedBlockingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyReadRepairRepairedBlockingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyReadRPCTimeoutGet gets read rpc timeout + +Get the read RPC timeout in seconds +*/ +func (a *Client) StorageProxyReadRPCTimeoutGet(params *StorageProxyReadRPCTimeoutGetParams) (*StorageProxyReadRPCTimeoutGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyReadRPCTimeoutGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyReadRpcTimeoutGet", + Method: "GET", + PathPattern: "/storage_proxy/read_rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyReadRPCTimeoutGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyReadRPCTimeoutGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyReadRPCTimeoutGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyReadRPCTimeoutPost sets read rpc timeout + +Set the read RPC timeout +*/ +func (a *Client) StorageProxyReadRPCTimeoutPost(params *StorageProxyReadRPCTimeoutPostParams) (*StorageProxyReadRPCTimeoutPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyReadRPCTimeoutPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyReadRpcTimeoutPost", + Method: "POST", + PathPattern: "/storage_proxy/read_rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyReadRPCTimeoutPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyReadRPCTimeoutPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyReadRPCTimeoutPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyReloadTriggerClassesPost reloads trigger classes + +Reload trigger classes +*/ +func (a *Client) StorageProxyReloadTriggerClassesPost(params *StorageProxyReloadTriggerClassesPostParams) (*StorageProxyReloadTriggerClassesPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyReloadTriggerClassesPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyReloadTriggerClassesPost", + Method: "POST", + PathPattern: "/storage_proxy/reload_trigger_classes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyReloadTriggerClassesPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyReloadTriggerClassesPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyReloadTriggerClassesPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyRPCTimeoutGet gets rpc timeout + +Get the RPC timeout in seconds +*/ +func (a *Client) StorageProxyRPCTimeoutGet(params *StorageProxyRPCTimeoutGetParams) (*StorageProxyRPCTimeoutGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyRPCTimeoutGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyRpcTimeoutGet", + Method: "GET", + PathPattern: "/storage_proxy/rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyRPCTimeoutGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyRPCTimeoutGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyRPCTimeoutGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyRPCTimeoutPost sets rpc timeout + +Set the RPC timeout +*/ +func (a *Client) StorageProxyRPCTimeoutPost(params *StorageProxyRPCTimeoutPostParams) (*StorageProxyRPCTimeoutPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyRPCTimeoutPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyRpcTimeoutPost", + Method: "POST", + PathPattern: "/storage_proxy/rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyRPCTimeoutPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyRPCTimeoutPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyRPCTimeoutPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxySchemaVersionsGet gets schema versions + +Get a map of the schema versions +*/ +func (a *Client) StorageProxySchemaVersionsGet(params *StorageProxySchemaVersionsGetParams) (*StorageProxySchemaVersionsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxySchemaVersionsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxySchemaVersionsGet", + Method: "GET", + PathPattern: "/storage_proxy/schema_versions", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxySchemaVersionsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxySchemaVersionsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxySchemaVersionsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyTotalHintsGet gets total hints + +Get total hints +*/ +func (a *Client) StorageProxyTotalHintsGet(params *StorageProxyTotalHintsGetParams) (*StorageProxyTotalHintsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyTotalHintsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyTotalHintsGet", + Method: "GET", + PathPattern: "/storage_proxy/total_hints", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyTotalHintsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyTotalHintsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyTotalHintsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyTruncateRPCTimeoutGet gets truncate rpc timeout + +Get truncate rpc timeout in seconds +*/ +func (a *Client) StorageProxyTruncateRPCTimeoutGet(params *StorageProxyTruncateRPCTimeoutGetParams) (*StorageProxyTruncateRPCTimeoutGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyTruncateRPCTimeoutGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyTruncateRpcTimeoutGet", + Method: "GET", + PathPattern: "/storage_proxy/truncate_rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyTruncateRPCTimeoutGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyTruncateRPCTimeoutGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyTruncateRPCTimeoutGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyTruncateRPCTimeoutPost sets truncate rpc timeout + +Set truncate rpc timeout +*/ +func (a *Client) StorageProxyTruncateRPCTimeoutPost(params *StorageProxyTruncateRPCTimeoutPostParams) (*StorageProxyTruncateRPCTimeoutPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyTruncateRPCTimeoutPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyTruncateRpcTimeoutPost", + Method: "POST", + PathPattern: "/storage_proxy/truncate_rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyTruncateRPCTimeoutPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyTruncateRPCTimeoutPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyTruncateRPCTimeoutPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyWriteRPCTimeoutGet gets write rpc timeout + +Get the write RPC timeout in seconds +*/ +func (a *Client) StorageProxyWriteRPCTimeoutGet(params *StorageProxyWriteRPCTimeoutGetParams) (*StorageProxyWriteRPCTimeoutGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyWriteRPCTimeoutGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyWriteRpcTimeoutGet", + Method: "GET", + PathPattern: "/storage_proxy/write_rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyWriteRPCTimeoutGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyWriteRPCTimeoutGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyWriteRPCTimeoutGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageProxyWriteRPCTimeoutPost sets write rpc timeout + +Set the write RPC timeout +*/ +func (a *Client) StorageProxyWriteRPCTimeoutPost(params *StorageProxyWriteRPCTimeoutPostParams) (*StorageProxyWriteRPCTimeoutPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageProxyWriteRPCTimeoutPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageProxyWriteRpcTimeoutPost", + Method: "POST", + PathPattern: "/storage_proxy/write_rpc_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageProxyWriteRPCTimeoutPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageProxyWriteRPCTimeoutPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageProxyWriteRPCTimeoutPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceActiveRepairGet gets active repair async + +Return an array with the ids of the currently active repairs +*/ +func (a *Client) StorageServiceActiveRepairGet(params *StorageServiceActiveRepairGetParams) (*StorageServiceActiveRepairGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceActiveRepairGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceActiveRepairGet", + Method: "GET", + PathPattern: "/storage_service/active_repair/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceActiveRepairGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceActiveRepairGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceActiveRepairGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceAutoCompactionByKeyspaceDelete disables auto compaction + +Disable auto compaction +*/ +func (a *Client) StorageServiceAutoCompactionByKeyspaceDelete(params *StorageServiceAutoCompactionByKeyspaceDeleteParams) (*StorageServiceAutoCompactionByKeyspaceDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceAutoCompactionByKeyspaceDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceAutoCompactionByKeyspaceDelete", + Method: "DELETE", + PathPattern: "/storage_service/auto_compaction/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceAutoCompactionByKeyspaceDeleteReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceAutoCompactionByKeyspaceDeleteOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceAutoCompactionByKeyspaceDeleteDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceAutoCompactionByKeyspacePost enables auto compaction + +Enable auto compaction +*/ +func (a *Client) StorageServiceAutoCompactionByKeyspacePost(params *StorageServiceAutoCompactionByKeyspacePostParams) (*StorageServiceAutoCompactionByKeyspacePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceAutoCompactionByKeyspacePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceAutoCompactionByKeyspacePost", + Method: "POST", + PathPattern: "/storage_service/auto_compaction/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceAutoCompactionByKeyspacePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceAutoCompactionByKeyspacePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceAutoCompactionByKeyspacePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceBatchSizeFailureThresholdGet gets batch size failure threshold + +Returns the threshold for rejecting queries due to a large batch size +*/ +func (a *Client) StorageServiceBatchSizeFailureThresholdGet(params *StorageServiceBatchSizeFailureThresholdGetParams) (*StorageServiceBatchSizeFailureThresholdGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceBatchSizeFailureThresholdGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceBatchSizeFailureThresholdGet", + Method: "GET", + PathPattern: "/storage_service/batch_size_failure_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceBatchSizeFailureThresholdGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceBatchSizeFailureThresholdGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceBatchSizeFailureThresholdGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceBatchSizeFailureThresholdPost sets batch size failure threshold + +Sets the threshold for rejecting queries due to a large batch size +*/ +func (a *Client) StorageServiceBatchSizeFailureThresholdPost(params *StorageServiceBatchSizeFailureThresholdPostParams) (*StorageServiceBatchSizeFailureThresholdPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceBatchSizeFailureThresholdPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceBatchSizeFailureThresholdPost", + Method: "POST", + PathPattern: "/storage_service/batch_size_failure_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceBatchSizeFailureThresholdPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceBatchSizeFailureThresholdPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceBatchSizeFailureThresholdPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceBulkLoadAsyncByPathGet bulks load async + +Starts a bulk load asynchronously and returns the String representation of the planID for the new streaming session. +*/ +func (a *Client) StorageServiceBulkLoadAsyncByPathGet(params *StorageServiceBulkLoadAsyncByPathGetParams) (*StorageServiceBulkLoadAsyncByPathGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceBulkLoadAsyncByPathGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceBulkLoadAsyncByPathGet", + Method: "GET", + PathPattern: "/storage_service/bulk_load_async/{path}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceBulkLoadAsyncByPathGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceBulkLoadAsyncByPathGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceBulkLoadAsyncByPathGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceBulkLoadByPathPost bulks load + +Starts a bulk load and blocks until it completes +*/ +func (a *Client) StorageServiceBulkLoadByPathPost(params *StorageServiceBulkLoadByPathPostParams) (*StorageServiceBulkLoadByPathPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceBulkLoadByPathPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceBulkLoadByPathPost", + Method: "POST", + PathPattern: "/storage_service/bulk_load/{path}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceBulkLoadByPathPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceBulkLoadByPathPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceBulkLoadByPathPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceClusterNameGet gets cluster name + +Returns the name of the cluster +*/ +func (a *Client) StorageServiceClusterNameGet(params *StorageServiceClusterNameGetParams) (*StorageServiceClusterNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceClusterNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceClusterNameGet", + Method: "GET", + PathPattern: "/storage_service/cluster_name", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceClusterNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceClusterNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceClusterNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceCommitlogGet gets commitlog + +Returns the location of the commit log files +*/ +func (a *Client) StorageServiceCommitlogGet(params *StorageServiceCommitlogGetParams) (*StorageServiceCommitlogGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceCommitlogGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceCommitlogGet", + Method: "GET", + PathPattern: "/storage_service/commitlog", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceCommitlogGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceCommitlogGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceCommitlogGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceCompactionThroughputGet gets compaction throughput mb per sec + +get compaction throughput mb per sec +*/ +func (a *Client) StorageServiceCompactionThroughputGet(params *StorageServiceCompactionThroughputGetParams) (*StorageServiceCompactionThroughputGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceCompactionThroughputGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceCompactionThroughputGet", + Method: "GET", + PathPattern: "/storage_service/compaction_throughput", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceCompactionThroughputGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceCompactionThroughputGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceCompactionThroughputGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceCompactionThroughputPost sets compaction throughput mb per sec + +Set compaction throughput mb per sec +*/ +func (a *Client) StorageServiceCompactionThroughputPost(params *StorageServiceCompactionThroughputPostParams) (*StorageServiceCompactionThroughputPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceCompactionThroughputPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceCompactionThroughputPost", + Method: "POST", + PathPattern: "/storage_service/compaction_throughput", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceCompactionThroughputPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceCompactionThroughputPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceCompactionThroughputPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceDataFileLocationsGet gets all data file locations + +Get the list of all data file locations from conf +*/ +func (a *Client) StorageServiceDataFileLocationsGet(params *StorageServiceDataFileLocationsGetParams) (*StorageServiceDataFileLocationsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceDataFileLocationsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceDataFileLocationsGet", + Method: "GET", + PathPattern: "/storage_service/data_file/locations", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceDataFileLocationsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceDataFileLocationsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceDataFileLocationsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceDecommissionPost decommissions + +transfer this node's data to other machines and remove it from service. +*/ +func (a *Client) StorageServiceDecommissionPost(params *StorageServiceDecommissionPostParams) (*StorageServiceDecommissionPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceDecommissionPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceDecommissionPost", + Method: "POST", + PathPattern: "/storage_service/decommission", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceDecommissionPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceDecommissionPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceDecommissionPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceDeliverHintsPost delivers hints +*/ +func (a *Client) StorageServiceDeliverHintsPost(params *StorageServiceDeliverHintsPostParams) (*StorageServiceDeliverHintsPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceDeliverHintsPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceDeliverHintsPost", + Method: "POST", + PathPattern: "/storage_service/deliver_hints", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceDeliverHintsPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceDeliverHintsPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceDeliverHintsPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceDescribeRingByKeyspaceGet describes ring + +The TokenRange for a given keyspace +*/ +func (a *Client) StorageServiceDescribeRingByKeyspaceGet(params *StorageServiceDescribeRingByKeyspaceGetParams) (*StorageServiceDescribeRingByKeyspaceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceDescribeRingByKeyspaceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceDescribeRingByKeyspaceGet", + Method: "GET", + PathPattern: "/storage_service/describe_ring/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceDescribeRingByKeyspaceGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceDescribeRingByKeyspaceGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceDescribeRingByKeyspaceGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceDrainGet gets drain progress + +Get the progress of a drain operation +*/ +func (a *Client) StorageServiceDrainGet(params *StorageServiceDrainGetParams) (*StorageServiceDrainGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceDrainGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceDrainGet", + Method: "GET", + PathPattern: "/storage_service/drain", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceDrainGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceDrainGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceDrainGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceDrainPost drains + +makes node unavailable for writes, flushes memtables and replays commitlog +*/ +func (a *Client) StorageServiceDrainPost(params *StorageServiceDrainPostParams) (*StorageServiceDrainPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceDrainPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceDrainPost", + Method: "POST", + PathPattern: "/storage_service/drain", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceDrainPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceDrainPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceDrainPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceForceRemoveCompletionPost forces remove completion + +Force a remove operation to finish. +*/ +func (a *Client) StorageServiceForceRemoveCompletionPost(params *StorageServiceForceRemoveCompletionPostParams) (*StorageServiceForceRemoveCompletionPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceForceRemoveCompletionPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceForceRemoveCompletionPost", + Method: "POST", + PathPattern: "/storage_service/force_remove_completion", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceForceRemoveCompletionPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceForceRemoveCompletionPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceForceRemoveCompletionPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceForceTerminatePost forces terminate all repair sessions + +Force terminate all repair sessions +*/ +func (a *Client) StorageServiceForceTerminatePost(params *StorageServiceForceTerminatePostParams) (*StorageServiceForceTerminatePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceForceTerminatePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceForceTerminatePost", + Method: "POST", + PathPattern: "/storage_service/force_terminate", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceForceTerminatePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceForceTerminatePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceForceTerminatePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceForceTerminateRepairPost forces terminate all repair sessions new + +Force terminate all repair sessions +*/ +func (a *Client) StorageServiceForceTerminateRepairPost(params *StorageServiceForceTerminateRepairPostParams) (*StorageServiceForceTerminateRepairPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceForceTerminateRepairPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceForceTerminateRepairPost", + Method: "POST", + PathPattern: "/storage_service/force_terminate_repair", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceForceTerminateRepairPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceForceTerminateRepairPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceForceTerminateRepairPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceGenerationNumberGet gets current generation number + +Return the generation value for this node. +*/ +func (a *Client) StorageServiceGenerationNumberGet(params *StorageServiceGenerationNumberGetParams) (*StorageServiceGenerationNumberGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceGenerationNumberGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceGenerationNumberGet", + Method: "GET", + PathPattern: "/storage_service/generation_number", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceGenerationNumberGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceGenerationNumberGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceGenerationNumberGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceGossipingDelete stops gossiping + +allows a user to forcibly 'kill' a sick node +*/ +func (a *Client) StorageServiceGossipingDelete(params *StorageServiceGossipingDeleteParams) (*StorageServiceGossipingDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceGossipingDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceGossipingDelete", + Method: "DELETE", + PathPattern: "/storage_service/gossiping", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceGossipingDeleteReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceGossipingDeleteOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceGossipingDeleteDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceGossipingGet is gossip running + +allows a user to see whether gossip is running or not +*/ +func (a *Client) StorageServiceGossipingGet(params *StorageServiceGossipingGetParams) (*StorageServiceGossipingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceGossipingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceGossipingGet", + Method: "GET", + PathPattern: "/storage_service/gossiping", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceGossipingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceGossipingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceGossipingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceGossipingPost starts gossiping + +allows a user to recover a forcibly 'killed' node +*/ +func (a *Client) StorageServiceGossipingPost(params *StorageServiceGossipingPostParams) (*StorageServiceGossipingPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceGossipingPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceGossipingPost", + Method: "POST", + PathPattern: "/storage_service/gossiping", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceGossipingPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceGossipingPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceGossipingPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceHintedHandoffPost sets hinted handoff throttle in kb + +Sets the hinted handoff throttle in kb per second, per delivery thread +*/ +func (a *Client) StorageServiceHintedHandoffPost(params *StorageServiceHintedHandoffPostParams) (*StorageServiceHintedHandoffPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceHintedHandoffPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceHintedHandoffPost", + Method: "POST", + PathPattern: "/storage_service/hinted_handoff", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceHintedHandoffPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceHintedHandoffPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceHintedHandoffPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceHostIDGet gets host id map + +Retrieve the mapping of endpoint to host ID +*/ +func (a *Client) StorageServiceHostIDGet(params *StorageServiceHostIDGetParams) (*StorageServiceHostIDGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceHostIDGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceHostIdGet", + Method: "GET", + PathPattern: "/storage_service/host_id", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceHostIDGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceHostIDGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceHostIDGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceHostidLocalGet locals hostid + +Returns the local host id +*/ +func (a *Client) StorageServiceHostidLocalGet(params *StorageServiceHostidLocalGetParams) (*StorageServiceHostidLocalGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceHostidLocalGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceHostidLocalGet", + Method: "GET", + PathPattern: "/storage_service/hostid/local", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceHostidLocalGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceHostidLocalGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceHostidLocalGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceIncrementalBackupsGet is incremental backups enabled + +Check if incremental backup is enabled +*/ +func (a *Client) StorageServiceIncrementalBackupsGet(params *StorageServiceIncrementalBackupsGetParams) (*StorageServiceIncrementalBackupsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceIncrementalBackupsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceIncrementalBackupsGet", + Method: "GET", + PathPattern: "/storage_service/incremental_backups", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceIncrementalBackupsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceIncrementalBackupsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceIncrementalBackupsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceIncrementalBackupsPost sets incremental backups enabled +*/ +func (a *Client) StorageServiceIncrementalBackupsPost(params *StorageServiceIncrementalBackupsPostParams) (*StorageServiceIncrementalBackupsPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceIncrementalBackupsPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceIncrementalBackupsPost", + Method: "POST", + PathPattern: "/storage_service/incremental_backups", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceIncrementalBackupsPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceIncrementalBackupsPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceIncrementalBackupsPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceIsInitializedGet is initialized + +Determine if gossip is enable +*/ +func (a *Client) StorageServiceIsInitializedGet(params *StorageServiceIsInitializedGetParams) (*StorageServiceIsInitializedGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceIsInitializedGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceIsInitializedGet", + Method: "GET", + PathPattern: "/storage_service/is_initialized", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceIsInitializedGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceIsInitializedGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceIsInitializedGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceIsStartingGet is starting + +Returns whether the storage service is starting or not +*/ +func (a *Client) StorageServiceIsStartingGet(params *StorageServiceIsStartingGetParams) (*StorageServiceIsStartingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceIsStartingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceIsStartingGet", + Method: "GET", + PathPattern: "/storage_service/is_starting", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceIsStartingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceIsStartingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceIsStartingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceJoinRingGet is joined +*/ +func (a *Client) StorageServiceJoinRingGet(params *StorageServiceJoinRingGetParams) (*StorageServiceJoinRingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceJoinRingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceJoinRingGet", + Method: "GET", + PathPattern: "/storage_service/join_ring", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceJoinRingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceJoinRingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceJoinRingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceJoinRingPost joins ring + +Allows a node that have been started without joining the ring to join it +*/ +func (a *Client) StorageServiceJoinRingPost(params *StorageServiceJoinRingPostParams) (*StorageServiceJoinRingPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceJoinRingPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceJoinRingPost", + Method: "POST", + PathPattern: "/storage_service/join_ring", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceJoinRingPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceJoinRingPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceJoinRingPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceKeyspaceCleanupByKeyspacePost forces keyspace cleanup + +Trigger a cleanup of keys on a single keyspace +*/ +func (a *Client) StorageServiceKeyspaceCleanupByKeyspacePost(params *StorageServiceKeyspaceCleanupByKeyspacePostParams) (*StorageServiceKeyspaceCleanupByKeyspacePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceKeyspaceCleanupByKeyspacePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceKeyspaceCleanupByKeyspacePost", + Method: "POST", + PathPattern: "/storage_service/keyspace_cleanup/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceKeyspaceCleanupByKeyspacePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceKeyspaceCleanupByKeyspacePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceKeyspaceCleanupByKeyspacePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceKeyspaceCompactionByKeyspacePost forces keyspace compaction + +Forces major compaction of a single keyspace +*/ +func (a *Client) StorageServiceKeyspaceCompactionByKeyspacePost(params *StorageServiceKeyspaceCompactionByKeyspacePostParams) (*StorageServiceKeyspaceCompactionByKeyspacePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceKeyspaceCompactionByKeyspacePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceKeyspaceCompactionByKeyspacePost", + Method: "POST", + PathPattern: "/storage_service/keyspace_compaction/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceKeyspaceCompactionByKeyspacePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceKeyspaceCompactionByKeyspacePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceKeyspaceCompactionByKeyspacePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceKeyspaceFlushByKeyspacePost forces keyspace flush + +Flush all memtables for the given column families, or all columnfamilies for the given keyspace if none are explicitly listed. +*/ +func (a *Client) StorageServiceKeyspaceFlushByKeyspacePost(params *StorageServiceKeyspaceFlushByKeyspacePostParams) (*StorageServiceKeyspaceFlushByKeyspacePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceKeyspaceFlushByKeyspacePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceKeyspaceFlushByKeyspacePost", + Method: "POST", + PathPattern: "/storage_service/keyspace_flush/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceKeyspaceFlushByKeyspacePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceKeyspaceFlushByKeyspacePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceKeyspaceFlushByKeyspacePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceKeyspaceScrubByKeyspaceGet scrubs + +Scrub (deserialize + reserialize at the latest version, skipping bad rows if any) the given keyspace. If columnFamilies array is empty, all CFs are scrubbed. Scrubbed CFs will be snapshotted first, if disableSnapshot is false +*/ +func (a *Client) StorageServiceKeyspaceScrubByKeyspaceGet(params *StorageServiceKeyspaceScrubByKeyspaceGetParams) (*StorageServiceKeyspaceScrubByKeyspaceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceKeyspaceScrubByKeyspaceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceKeyspaceScrubByKeyspaceGet", + Method: "GET", + PathPattern: "/storage_service/keyspace_scrub/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceKeyspaceScrubByKeyspaceGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceKeyspaceScrubByKeyspaceGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceKeyspaceScrubByKeyspaceGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceKeyspaceUpgradeSstablesByKeyspaceGet upgrades sstables + +Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip bad rows and do not snapshot sstables first. +*/ +func (a *Client) StorageServiceKeyspaceUpgradeSstablesByKeyspaceGet(params *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) (*StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceKeyspaceUpgradeSstablesByKeyspaceGet", + Method: "GET", + PathPattern: "/storage_service/keyspace_upgrade_sstables/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceKeyspacesGet gets keyspaces + +Get the keyspaces +*/ +func (a *Client) StorageServiceKeyspacesGet(params *StorageServiceKeyspacesGetParams) (*StorageServiceKeyspacesGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceKeyspacesGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceKeyspacesGet", + Method: "GET", + PathPattern: "/storage_service/keyspaces", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceKeyspacesGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceKeyspacesGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceKeyspacesGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceLoadGet gets load + +get load value +*/ +func (a *Client) StorageServiceLoadGet(params *StorageServiceLoadGetParams) (*StorageServiceLoadGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceLoadGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceLoadGet", + Method: "GET", + PathPattern: "/storage_service/load", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceLoadGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceLoadGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceLoadGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceLoadMapGet gets load map + +load value. Keys are IP addresses +*/ +func (a *Client) StorageServiceLoadMapGet(params *StorageServiceLoadMapGetParams) (*StorageServiceLoadMapGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceLoadMapGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceLoadMapGet", + Method: "GET", + PathPattern: "/storage_service/load_map", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceLoadMapGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceLoadMapGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceLoadMapGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceLoggingLevelGet gets logging levels + +get the runtime logging levels +*/ +func (a *Client) StorageServiceLoggingLevelGet(params *StorageServiceLoggingLevelGetParams) (*StorageServiceLoggingLevelGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceLoggingLevelGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceLoggingLevelGet", + Method: "GET", + PathPattern: "/storage_service/logging_level", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceLoggingLevelGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceLoggingLevelGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceLoggingLevelGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceLoggingLevelPost sets logging level + +set the logging level at runtime

If both classQualifer and level are empty/null, it will reload the configuration to reset.
If classQualifer is not empty but level is empty/null, it will set the level to null for the defined classQualifer
If level cannot be parsed, then the level will be defaulted to DEBUG

The logback configuration should have < jmxConfigurator /> set +*/ +func (a *Client) StorageServiceLoggingLevelPost(params *StorageServiceLoggingLevelPostParams) (*StorageServiceLoggingLevelPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceLoggingLevelPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceLoggingLevelPost", + Method: "POST", + PathPattern: "/storage_service/logging_level", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceLoggingLevelPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceLoggingLevelPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceLoggingLevelPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceMetricsExceptionsGet gets exceptions + +Get exceptions +*/ +func (a *Client) StorageServiceMetricsExceptionsGet(params *StorageServiceMetricsExceptionsGetParams) (*StorageServiceMetricsExceptionsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceMetricsExceptionsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceMetricsExceptionsGet", + Method: "GET", + PathPattern: "/storage_service/metrics/exceptions", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceMetricsExceptionsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceMetricsExceptionsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceMetricsExceptionsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceMetricsHintsInProgressGet gets total hints in progress + +Get total hints in progress +*/ +func (a *Client) StorageServiceMetricsHintsInProgressGet(params *StorageServiceMetricsHintsInProgressGetParams) (*StorageServiceMetricsHintsInProgressGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceMetricsHintsInProgressGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceMetricsHintsInProgressGet", + Method: "GET", + PathPattern: "/storage_service/metrics/hints_in_progress", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceMetricsHintsInProgressGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceMetricsHintsInProgressGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceMetricsHintsInProgressGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceMetricsLoadGet gets metrics load + +Get load +*/ +func (a *Client) StorageServiceMetricsLoadGet(params *StorageServiceMetricsLoadGetParams) (*StorageServiceMetricsLoadGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceMetricsLoadGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceMetricsLoadGet", + Method: "GET", + PathPattern: "/storage_service/metrics/load", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceMetricsLoadGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceMetricsLoadGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceMetricsLoadGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceMetricsTotalHintsGet gets total hints1 + +Get total hints +*/ +func (a *Client) StorageServiceMetricsTotalHintsGet(params *StorageServiceMetricsTotalHintsGetParams) (*StorageServiceMetricsTotalHintsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceMetricsTotalHintsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceMetricsTotalHintsGet", + Method: "GET", + PathPattern: "/storage_service/metrics/total_hints", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceMetricsTotalHintsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceMetricsTotalHintsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceMetricsTotalHintsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceMovePost moves + +This node will unload its data onto its neighbors, and bootstrap to the new token. +*/ +func (a *Client) StorageServiceMovePost(params *StorageServiceMovePostParams) (*StorageServiceMovePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceMovePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceMovePost", + Method: "POST", + PathPattern: "/storage_service/move", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceMovePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceMovePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceMovePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceNativeTransportDelete stops native transport + +Stop native transport +*/ +func (a *Client) StorageServiceNativeTransportDelete(params *StorageServiceNativeTransportDeleteParams) (*StorageServiceNativeTransportDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceNativeTransportDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceNativeTransportDelete", + Method: "DELETE", + PathPattern: "/storage_service/native_transport", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceNativeTransportDeleteReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceNativeTransportDeleteOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceNativeTransportDeleteDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceNativeTransportGet is native transport running + +Is native transport running +*/ +func (a *Client) StorageServiceNativeTransportGet(params *StorageServiceNativeTransportGetParams) (*StorageServiceNativeTransportGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceNativeTransportGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceNativeTransportGet", + Method: "GET", + PathPattern: "/storage_service/native_transport", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceNativeTransportGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceNativeTransportGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceNativeTransportGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceNativeTransportPost starts native transport + +Start native transport +*/ +func (a *Client) StorageServiceNativeTransportPost(params *StorageServiceNativeTransportPostParams) (*StorageServiceNativeTransportPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceNativeTransportPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceNativeTransportPost", + Method: "POST", + PathPattern: "/storage_service/native_transport", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceNativeTransportPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceNativeTransportPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceNativeTransportPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceNaturalEndpointsByKeyspaceGet gets natural endpoints + +This method returns the N endpoints that are responsible for storing the specified key i.e for replication. the endpoint responsible for this key +*/ +func (a *Client) StorageServiceNaturalEndpointsByKeyspaceGet(params *StorageServiceNaturalEndpointsByKeyspaceGetParams) (*StorageServiceNaturalEndpointsByKeyspaceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceNaturalEndpointsByKeyspaceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceNaturalEndpointsByKeyspaceGet", + Method: "GET", + PathPattern: "/storage_service/natural_endpoints/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceNaturalEndpointsByKeyspaceGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceNaturalEndpointsByKeyspaceGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceNaturalEndpointsByKeyspaceGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceNodesJoiningGet gets joining nodes + +Retrieve the list of nodes currently bootstrapping into the ring +*/ +func (a *Client) StorageServiceNodesJoiningGet(params *StorageServiceNodesJoiningGetParams) (*StorageServiceNodesJoiningGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceNodesJoiningGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceNodesJoiningGet", + Method: "GET", + PathPattern: "/storage_service/nodes/joining", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceNodesJoiningGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceNodesJoiningGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceNodesJoiningGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceNodesLeavingGet gets leaving nodes + +Retrieve the list of nodes currently leaving the ring +*/ +func (a *Client) StorageServiceNodesLeavingGet(params *StorageServiceNodesLeavingGetParams) (*StorageServiceNodesLeavingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceNodesLeavingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceNodesLeavingGet", + Method: "GET", + PathPattern: "/storage_service/nodes/leaving", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceNodesLeavingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceNodesLeavingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceNodesLeavingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceNodesMovingGet gets moving nodes + +Retrieve the list of nodes currently moving in the ring +*/ +func (a *Client) StorageServiceNodesMovingGet(params *StorageServiceNodesMovingGetParams) (*StorageServiceNodesMovingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceNodesMovingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceNodesMovingGet", + Method: "GET", + PathPattern: "/storage_service/nodes/moving", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceNodesMovingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceNodesMovingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceNodesMovingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceOperationModeGet gets operation mode + +Get the operational mode (leaving, joining, normal, decommissioned, client) +*/ +func (a *Client) StorageServiceOperationModeGet(params *StorageServiceOperationModeGetParams) (*StorageServiceOperationModeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceOperationModeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceOperationModeGet", + Method: "GET", + PathPattern: "/storage_service/operation_mode", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceOperationModeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceOperationModeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceOperationModeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceOwnershipByKeyspaceGet gets effective ownership + +Effective ownership is % of the data each node owns given the keyspace +*/ +func (a *Client) StorageServiceOwnershipByKeyspaceGet(params *StorageServiceOwnershipByKeyspaceGetParams) (*StorageServiceOwnershipByKeyspaceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceOwnershipByKeyspaceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceOwnershipByKeyspaceGet", + Method: "GET", + PathPattern: "/storage_service/ownership/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceOwnershipByKeyspaceGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceOwnershipByKeyspaceGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceOwnershipByKeyspaceGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceOwnershipGet gets ownership + +The mapping from token -> % of cluster owned by that token +*/ +func (a *Client) StorageServiceOwnershipGet(params *StorageServiceOwnershipGetParams) (*StorageServiceOwnershipGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceOwnershipGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceOwnershipGet", + Method: "GET", + PathPattern: "/storage_service/ownership/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceOwnershipGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceOwnershipGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceOwnershipGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServicePartitionerNameGet gets partitioner name + +Returns the cluster partitioner +*/ +func (a *Client) StorageServicePartitionerNameGet(params *StorageServicePartitionerNameGetParams) (*StorageServicePartitionerNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServicePartitionerNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServicePartitionerNameGet", + Method: "GET", + PathPattern: "/storage_service/partitioner_name", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServicePartitionerNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServicePartitionerNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServicePartitionerNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServicePendingRangeByKeyspaceGet gets pending range to endpoint map + +Retrieve a map of pending ranges to endpoints that describe the ring topology +*/ +func (a *Client) StorageServicePendingRangeByKeyspaceGet(params *StorageServicePendingRangeByKeyspaceGetParams) (*StorageServicePendingRangeByKeyspaceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServicePendingRangeByKeyspaceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServicePendingRangeByKeyspaceGet", + Method: "GET", + PathPattern: "/storage_service/pending_range/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServicePendingRangeByKeyspaceGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServicePendingRangeByKeyspaceGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServicePendingRangeByKeyspaceGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRangeToEndpointMapByKeyspaceGet gets range to endpoint map + +Retrieve a map of range to end points that describe the ring topology of a Cassandra cluster. +*/ +func (a *Client) StorageServiceRangeToEndpointMapByKeyspaceGet(params *StorageServiceRangeToEndpointMapByKeyspaceGetParams) (*StorageServiceRangeToEndpointMapByKeyspaceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRangeToEndpointMapByKeyspaceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRangeToEndpointMapByKeyspaceGet", + Method: "GET", + PathPattern: "/storage_service/range_to_endpoint_map/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRangeToEndpointMapByKeyspaceGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRangeToEndpointMapByKeyspaceGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRangeToEndpointMapByKeyspaceGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRebuildPost rebuilds + +Initiate a process of streaming data for which we are responsible from other nodes. It is similar to bootstrap except meant to be used on a node which is already in the cluster (typically containing no data) as an alternative to running repair. +*/ +func (a *Client) StorageServiceRebuildPost(params *StorageServiceRebuildPostParams) (*StorageServiceRebuildPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRebuildPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRebuildPost", + Method: "POST", + PathPattern: "/storage_service/rebuild", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRebuildPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRebuildPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRebuildPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceReleaseVersionGet gets release version + +Fetch a string representation of the Cassandra version. +*/ +func (a *Client) StorageServiceReleaseVersionGet(params *StorageServiceReleaseVersionGetParams) (*StorageServiceReleaseVersionGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceReleaseVersionGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceReleaseVersionGet", + Method: "GET", + PathPattern: "/storage_service/release_version", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceReleaseVersionGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceReleaseVersionGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceReleaseVersionGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRelocalSchemaPost resets local schema + +Reset local schema +*/ +func (a *Client) StorageServiceRelocalSchemaPost(params *StorageServiceRelocalSchemaPostParams) (*StorageServiceRelocalSchemaPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRelocalSchemaPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRelocalSchemaPost", + Method: "POST", + PathPattern: "/storage_service/relocal_schema", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRelocalSchemaPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRelocalSchemaPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRelocalSchemaPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRemovalStatusGet gets removal status + +Get the status of a token removal. +*/ +func (a *Client) StorageServiceRemovalStatusGet(params *StorageServiceRemovalStatusGetParams) (*StorageServiceRemovalStatusGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRemovalStatusGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRemovalStatusGet", + Method: "GET", + PathPattern: "/storage_service/removal_status", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRemovalStatusGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRemovalStatusGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRemovalStatusGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRemoveNodePost removes node + +Removes token (and all data associated with enpoint that had it) from the ring +*/ +func (a *Client) StorageServiceRemoveNodePost(params *StorageServiceRemoveNodePostParams) (*StorageServiceRemoveNodePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRemoveNodePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRemoveNodePost", + Method: "POST", + PathPattern: "/storage_service/remove_node", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRemoveNodePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRemoveNodePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRemoveNodePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRepairAsyncByKeyspaceGet repairs async status + +Track already running repair progress +*/ +func (a *Client) StorageServiceRepairAsyncByKeyspaceGet(params *StorageServiceRepairAsyncByKeyspaceGetParams) (*StorageServiceRepairAsyncByKeyspaceGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRepairAsyncByKeyspaceGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRepairAsyncByKeyspaceGet", + Method: "GET", + PathPattern: "/storage_service/repair_async/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRepairAsyncByKeyspaceGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRepairAsyncByKeyspaceGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRepairAsyncByKeyspaceGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRepairAsyncByKeyspacePost repairs async + +Invoke repair asynchronously. You can track repair progress by using the get supplying id +*/ +func (a *Client) StorageServiceRepairAsyncByKeyspacePost(params *StorageServiceRepairAsyncByKeyspacePostParams) (*StorageServiceRepairAsyncByKeyspacePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRepairAsyncByKeyspacePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRepairAsyncByKeyspacePost", + Method: "POST", + PathPattern: "/storage_service/repair_async/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRepairAsyncByKeyspacePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRepairAsyncByKeyspacePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRepairAsyncByKeyspacePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRepairStatus storages service repair status + +Query the repair status and return when the repair is finished or timeout +*/ +func (a *Client) StorageServiceRepairStatus(params *StorageServiceRepairStatusParams) (*StorageServiceRepairStatusOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRepairStatusParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRepairStatus", + Method: "GET", + PathPattern: "/storage_service/repair_status", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRepairStatusReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRepairStatusOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRepairStatusDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRescheduleFailedDeletionsPost reschedules failed deletions + +Reschedule failed deletions +*/ +func (a *Client) StorageServiceRescheduleFailedDeletionsPost(params *StorageServiceRescheduleFailedDeletionsPostParams) (*StorageServiceRescheduleFailedDeletionsPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRescheduleFailedDeletionsPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRescheduleFailedDeletionsPost", + Method: "POST", + PathPattern: "/storage_service/reschedule_failed_deletions", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRescheduleFailedDeletionsPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRescheduleFailedDeletionsPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRescheduleFailedDeletionsPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRPCServerDelete stops rpc server + +Allows a user to disable thrift +*/ +func (a *Client) StorageServiceRPCServerDelete(params *StorageServiceRPCServerDeleteParams) (*StorageServiceRPCServerDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRPCServerDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRpcServerDelete", + Method: "DELETE", + PathPattern: "/storage_service/rpc_server", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRPCServerDeleteReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRPCServerDeleteOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRPCServerDeleteDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRPCServerGet is rpc server running + +Determine if thrift is running +*/ +func (a *Client) StorageServiceRPCServerGet(params *StorageServiceRPCServerGetParams) (*StorageServiceRPCServerGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRPCServerGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRpcServerGet", + Method: "GET", + PathPattern: "/storage_service/rpc_server", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRPCServerGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRPCServerGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRPCServerGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceRPCServerPost starts rpc server + +allows a user to reenable thrift +*/ +func (a *Client) StorageServiceRPCServerPost(params *StorageServiceRPCServerPostParams) (*StorageServiceRPCServerPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceRPCServerPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceRpcServerPost", + Method: "POST", + PathPattern: "/storage_service/rpc_server", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceRPCServerPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceRPCServerPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceRPCServerPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceSampleKeyRangeGet samples key range + +Return a List of Tokens representing a sample of keys across all ColumnFamilyStores. +*/ +func (a *Client) StorageServiceSampleKeyRangeGet(params *StorageServiceSampleKeyRangeGetParams) (*StorageServiceSampleKeyRangeGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceSampleKeyRangeGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceSampleKeyRangeGet", + Method: "GET", + PathPattern: "/storage_service/sample_key_range", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceSampleKeyRangeGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceSampleKeyRangeGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceSampleKeyRangeGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceSavedCachesLocationGet gets saved caches location + +Get location of the saved caches dir +*/ +func (a *Client) StorageServiceSavedCachesLocationGet(params *StorageServiceSavedCachesLocationGetParams) (*StorageServiceSavedCachesLocationGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceSavedCachesLocationGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceSavedCachesLocationGet", + Method: "GET", + PathPattern: "/storage_service/saved_caches/location", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceSavedCachesLocationGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceSavedCachesLocationGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceSavedCachesLocationGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceSchemaVersionGet gets schema version + +Fetch a string representation of the current Schema version. +*/ +func (a *Client) StorageServiceSchemaVersionGet(params *StorageServiceSchemaVersionGetParams) (*StorageServiceSchemaVersionGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceSchemaVersionGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceSchemaVersionGet", + Method: "GET", + PathPattern: "/storage_service/schema_version", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceSchemaVersionGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceSchemaVersionGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceSchemaVersionGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceScyllaReleaseVersionGet gets scylla release version + +Fetch a string representation of the Scylla version. +*/ +func (a *Client) StorageServiceScyllaReleaseVersionGet(params *StorageServiceScyllaReleaseVersionGetParams) (*StorageServiceScyllaReleaseVersionGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceScyllaReleaseVersionGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceScyllaReleaseVersionGet", + Method: "GET", + PathPattern: "/storage_service/scylla_release_version", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceScyllaReleaseVersionGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceScyllaReleaseVersionGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceScyllaReleaseVersionGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceSlowQueryGet gets slow query info + +Returns the slow query record configuration. +*/ +func (a *Client) StorageServiceSlowQueryGet(params *StorageServiceSlowQueryGetParams) (*StorageServiceSlowQueryGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceSlowQueryGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceSlowQueryGet", + Method: "GET", + PathPattern: "/storage_service/slow_query", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceSlowQueryGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceSlowQueryGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceSlowQueryGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceSlowQueryPost sets slow query + +Set slow query parameter +*/ +func (a *Client) StorageServiceSlowQueryPost(params *StorageServiceSlowQueryPostParams) (*StorageServiceSlowQueryPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceSlowQueryPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceSlowQueryPost", + Method: "POST", + PathPattern: "/storage_service/slow_query", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceSlowQueryPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceSlowQueryPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceSlowQueryPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceSnapshotsDelete dels snapshot + +Remove the snapshot with the given name from the given keyspaces. If no tag is specified all snapshots will be removed +*/ +func (a *Client) StorageServiceSnapshotsDelete(params *StorageServiceSnapshotsDeleteParams) (*StorageServiceSnapshotsDeleteOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceSnapshotsDeleteParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceSnapshotsDelete", + Method: "DELETE", + PathPattern: "/storage_service/snapshots", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceSnapshotsDeleteReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceSnapshotsDeleteOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceSnapshotsDeleteDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceSnapshotsGet gets snapshot details + +Get the details of all the snapshot +*/ +func (a *Client) StorageServiceSnapshotsGet(params *StorageServiceSnapshotsGetParams) (*StorageServiceSnapshotsGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceSnapshotsGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceSnapshotsGet", + Method: "GET", + PathPattern: "/storage_service/snapshots", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceSnapshotsGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceSnapshotsGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceSnapshotsGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceSnapshotsPost takes snapshot + +Takes the snapshot for the given keyspaces. A snapshot name must be specified. +*/ +func (a *Client) StorageServiceSnapshotsPost(params *StorageServiceSnapshotsPostParams) (*StorageServiceSnapshotsPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceSnapshotsPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceSnapshotsPost", + Method: "POST", + PathPattern: "/storage_service/snapshots", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceSnapshotsPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceSnapshotsPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceSnapshotsPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceSnapshotsSizeTrueGet trues snapshots size + +Get the true size taken by all snapshots across all keyspaces. +*/ +func (a *Client) StorageServiceSnapshotsSizeTrueGet(params *StorageServiceSnapshotsSizeTrueGetParams) (*StorageServiceSnapshotsSizeTrueGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceSnapshotsSizeTrueGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceSnapshotsSizeTrueGet", + Method: "GET", + PathPattern: "/storage_service/snapshots/size/true", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceSnapshotsSizeTrueGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceSnapshotsSizeTrueGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceSnapshotsSizeTrueGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceSstablesByKeyspacePost loads new ss tables + +Load new SSTables to the given keyspace/columnFamily +*/ +func (a *Client) StorageServiceSstablesByKeyspacePost(params *StorageServiceSstablesByKeyspacePostParams) (*StorageServiceSstablesByKeyspacePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceSstablesByKeyspacePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceSstablesByKeyspacePost", + Method: "POST", + PathPattern: "/storage_service/sstables/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceSstablesByKeyspacePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceSstablesByKeyspacePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceSstablesByKeyspacePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceStopDaemonPost stops daemon + +allows a user to forcibly completely stop cassandra +*/ +func (a *Client) StorageServiceStopDaemonPost(params *StorageServiceStopDaemonPostParams) (*StorageServiceStopDaemonPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceStopDaemonPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceStopDaemonPost", + Method: "POST", + PathPattern: "/storage_service/stop_daemon", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceStopDaemonPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceStopDaemonPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceStopDaemonPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceStreamThroughputGet gets stream throughput mb per sec + +Get stream throughput mb per sec +*/ +func (a *Client) StorageServiceStreamThroughputGet(params *StorageServiceStreamThroughputGetParams) (*StorageServiceStreamThroughputGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceStreamThroughputGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceStreamThroughputGet", + Method: "GET", + PathPattern: "/storage_service/stream_throughput", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceStreamThroughputGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceStreamThroughputGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceStreamThroughputGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceStreamThroughputPost sets stream throughput mb per sec + +set stream throughput mb per sec +*/ +func (a *Client) StorageServiceStreamThroughputPost(params *StorageServiceStreamThroughputPostParams) (*StorageServiceStreamThroughputPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceStreamThroughputPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceStreamThroughputPost", + Method: "POST", + PathPattern: "/storage_service/stream_throughput", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceStreamThroughputPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceStreamThroughputPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceStreamThroughputPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceTokensByEndpointGet gets node tokens + +Returns a list of the tokens for or a specified node +*/ +func (a *Client) StorageServiceTokensByEndpointGet(params *StorageServiceTokensByEndpointGetParams) (*StorageServiceTokensByEndpointGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceTokensByEndpointGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceTokensByEndpointGet", + Method: "GET", + PathPattern: "/storage_service/tokens/{endpoint}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceTokensByEndpointGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceTokensByEndpointGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceTokensByEndpointGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceTokensEndpointGet gets token endpoint + +Returns a list of the tokens endpoint mapping +*/ +func (a *Client) StorageServiceTokensEndpointGet(params *StorageServiceTokensEndpointGetParams) (*StorageServiceTokensEndpointGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceTokensEndpointGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceTokensEndpointGet", + Method: "GET", + PathPattern: "/storage_service/tokens_endpoint", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceTokensEndpointGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceTokensEndpointGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceTokensEndpointGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceTokensGet gets tokens + +Returns a list of the tokens for this node +*/ +func (a *Client) StorageServiceTokensGet(params *StorageServiceTokensGetParams) (*StorageServiceTokensGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceTokensGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceTokensGet", + Method: "GET", + PathPattern: "/storage_service/tokens", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceTokensGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceTokensGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceTokensGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceTombstoneFailureThresholdGet gets tombstone failure threshold +*/ +func (a *Client) StorageServiceTombstoneFailureThresholdGet(params *StorageServiceTombstoneFailureThresholdGetParams) (*StorageServiceTombstoneFailureThresholdGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceTombstoneFailureThresholdGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceTombstoneFailureThresholdGet", + Method: "GET", + PathPattern: "/storage_service/tombstone_failure_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceTombstoneFailureThresholdGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceTombstoneFailureThresholdGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceTombstoneFailureThresholdGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceTombstoneFailureThresholdPost sets tombstone failure threshold +*/ +func (a *Client) StorageServiceTombstoneFailureThresholdPost(params *StorageServiceTombstoneFailureThresholdPostParams) (*StorageServiceTombstoneFailureThresholdPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceTombstoneFailureThresholdPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceTombstoneFailureThresholdPost", + Method: "POST", + PathPattern: "/storage_service/tombstone_failure_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceTombstoneFailureThresholdPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceTombstoneFailureThresholdPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceTombstoneFailureThresholdPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceTombstoneWarnThresholdGet gets tombstone warn threshold + +Returns the threshold for warning of queries with many tombstones +*/ +func (a *Client) StorageServiceTombstoneWarnThresholdGet(params *StorageServiceTombstoneWarnThresholdGetParams) (*StorageServiceTombstoneWarnThresholdGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceTombstoneWarnThresholdGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceTombstoneWarnThresholdGet", + Method: "GET", + PathPattern: "/storage_service/tombstone_warn_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceTombstoneWarnThresholdGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceTombstoneWarnThresholdGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceTombstoneWarnThresholdGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceTombstoneWarnThresholdPost sets tombstone warn threshold + +Sets the threshold for warning queries with many tombstones +*/ +func (a *Client) StorageServiceTombstoneWarnThresholdPost(params *StorageServiceTombstoneWarnThresholdPostParams) (*StorageServiceTombstoneWarnThresholdPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceTombstoneWarnThresholdPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceTombstoneWarnThresholdPost", + Method: "POST", + PathPattern: "/storage_service/tombstone_warn_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceTombstoneWarnThresholdPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceTombstoneWarnThresholdPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceTombstoneWarnThresholdPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceTraceProbabilityGet gets trace probability + +Returns the configured tracing probability. +*/ +func (a *Client) StorageServiceTraceProbabilityGet(params *StorageServiceTraceProbabilityGetParams) (*StorageServiceTraceProbabilityGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceTraceProbabilityGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceTraceProbabilityGet", + Method: "GET", + PathPattern: "/storage_service/trace_probability", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceTraceProbabilityGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceTraceProbabilityGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceTraceProbabilityGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceTraceProbabilityPost sets trace probability + +Enables/Disables tracing for the whole system. Only thrift requests can start tracing currently +*/ +func (a *Client) StorageServiceTraceProbabilityPost(params *StorageServiceTraceProbabilityPostParams) (*StorageServiceTraceProbabilityPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceTraceProbabilityPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceTraceProbabilityPost", + Method: "POST", + PathPattern: "/storage_service/trace_probability", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceTraceProbabilityPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceTraceProbabilityPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceTraceProbabilityPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceTruncateByKeyspacePost truncates + +Truncates (deletes) the given columnFamily from the provided keyspace. Calling truncate results in actual deletion of all data in the cluster under the given columnFamily and it will fail unless all hosts are up. All data in the given column family will be deleted, but its definition will not be affected. +*/ +func (a *Client) StorageServiceTruncateByKeyspacePost(params *StorageServiceTruncateByKeyspacePostParams) (*StorageServiceTruncateByKeyspacePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceTruncateByKeyspacePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceTruncateByKeyspacePost", + Method: "POST", + PathPattern: "/storage_service/truncate/{keyspace}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceTruncateByKeyspacePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceTruncateByKeyspacePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceTruncateByKeyspacePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceUpdateSnitchPost updates snitch + +Change endpointsnitch class and dynamic-ness (and dynamic attributes) at runtime +*/ +func (a *Client) StorageServiceUpdateSnitchPost(params *StorageServiceUpdateSnitchPostParams) (*StorageServiceUpdateSnitchPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceUpdateSnitchPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceUpdateSnitchPost", + Method: "POST", + PathPattern: "/storage_service/update_snitch", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceUpdateSnitchPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceUpdateSnitchPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceUpdateSnitchPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StorageServiceViewBuildStatusesByKeyspaceAndViewGet views build statuses + +Gets the progress of a materialized view build +*/ +func (a *Client) StorageServiceViewBuildStatusesByKeyspaceAndViewGet(params *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) (*StorageServiceViewBuildStatusesByKeyspaceAndViewGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StorageServiceViewBuildStatusesByKeyspaceAndViewGet", + Method: "GET", + PathPattern: "/storage_service/view_build_statuses/{keyspace}/{view}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StorageServiceViewBuildStatusesByKeyspaceAndViewGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StorageServiceViewBuildStatusesByKeyspaceAndViewGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StreamManagerGet gets current streams + +Returns the current state of all ongoing streams. +*/ +func (a *Client) StreamManagerGet(params *StreamManagerGetParams) (*StreamManagerGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStreamManagerGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StreamManagerGet", + Method: "GET", + PathPattern: "/stream_manager/", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StreamManagerGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StreamManagerGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StreamManagerGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StreamManagerMetricsIncomingByPeerGet gets total incoming bytes + +Get total incoming bytes +*/ +func (a *Client) StreamManagerMetricsIncomingByPeerGet(params *StreamManagerMetricsIncomingByPeerGetParams) (*StreamManagerMetricsIncomingByPeerGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStreamManagerMetricsIncomingByPeerGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StreamManagerMetricsIncomingByPeerGet", + Method: "GET", + PathPattern: "/stream_manager/metrics/incoming/{peer}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StreamManagerMetricsIncomingByPeerGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StreamManagerMetricsIncomingByPeerGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StreamManagerMetricsIncomingByPeerGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StreamManagerMetricsIncomingGet gets all total incoming bytes + +Get all total incoming bytes +*/ +func (a *Client) StreamManagerMetricsIncomingGet(params *StreamManagerMetricsIncomingGetParams) (*StreamManagerMetricsIncomingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStreamManagerMetricsIncomingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StreamManagerMetricsIncomingGet", + Method: "GET", + PathPattern: "/stream_manager/metrics/incoming", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StreamManagerMetricsIncomingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StreamManagerMetricsIncomingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StreamManagerMetricsIncomingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StreamManagerMetricsOutboundGet gets all active streams outbound + +Get number of active outbound streams +*/ +func (a *Client) StreamManagerMetricsOutboundGet(params *StreamManagerMetricsOutboundGetParams) (*StreamManagerMetricsOutboundGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStreamManagerMetricsOutboundGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StreamManagerMetricsOutboundGet", + Method: "GET", + PathPattern: "/stream_manager/metrics/outbound", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StreamManagerMetricsOutboundGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StreamManagerMetricsOutboundGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StreamManagerMetricsOutboundGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StreamManagerMetricsOutgoingByPeerGet gets total outgoing bytes + +Get total outgoing bytes +*/ +func (a *Client) StreamManagerMetricsOutgoingByPeerGet(params *StreamManagerMetricsOutgoingByPeerGetParams) (*StreamManagerMetricsOutgoingByPeerGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStreamManagerMetricsOutgoingByPeerGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StreamManagerMetricsOutgoingByPeerGet", + Method: "GET", + PathPattern: "/stream_manager/metrics/outgoing/{peer}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StreamManagerMetricsOutgoingByPeerGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StreamManagerMetricsOutgoingByPeerGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StreamManagerMetricsOutgoingByPeerGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +StreamManagerMetricsOutgoingGet gets all total outgoing bytes + +Get all total outgoing bytes +*/ +func (a *Client) StreamManagerMetricsOutgoingGet(params *StreamManagerMetricsOutgoingGetParams) (*StreamManagerMetricsOutgoingGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewStreamManagerMetricsOutgoingGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "StreamManagerMetricsOutgoingGet", + Method: "GET", + PathPattern: "/stream_manager/metrics/outgoing", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &StreamManagerMetricsOutgoingGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*StreamManagerMetricsOutgoingGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*StreamManagerMetricsOutgoingGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SystemLoggerByNameGet gets logger level + +Get logger level +*/ +func (a *Client) SystemLoggerByNameGet(params *SystemLoggerByNameGetParams) (*SystemLoggerByNameGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSystemLoggerByNameGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "SystemLoggerByNameGet", + Method: "GET", + PathPattern: "/system/logger/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SystemLoggerByNameGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*SystemLoggerByNameGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*SystemLoggerByNameGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SystemLoggerByNamePost sets logger level + +Set logger level +*/ +func (a *Client) SystemLoggerByNamePost(params *SystemLoggerByNamePostParams) (*SystemLoggerByNamePostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSystemLoggerByNamePostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "SystemLoggerByNamePost", + Method: "POST", + PathPattern: "/system/logger/{name}", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SystemLoggerByNamePostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*SystemLoggerByNamePostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*SystemLoggerByNamePostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SystemLoggerGet gets all logger names + +Get all logger names +*/ +func (a *Client) SystemLoggerGet(params *SystemLoggerGetParams) (*SystemLoggerGetOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSystemLoggerGetParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "SystemLoggerGet", + Method: "GET", + PathPattern: "/system/logger", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SystemLoggerGetReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*SystemLoggerGetOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*SystemLoggerGetDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +SystemLoggerPost sets all logger level + +Set all logger level +*/ +func (a *Client) SystemLoggerPost(params *SystemLoggerPostParams) (*SystemLoggerPostOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewSystemLoggerPostParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "SystemLoggerPost", + Method: "POST", + PathPattern: "/system/logger", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &SystemLoggerPostReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*SystemLoggerPostOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*SystemLoggerPostDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_datacenter_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_datacenter_get_parameters.go new file mode 100644 index 00000000000..f1f3e1e3be8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_datacenter_get_parameters.go @@ -0,0 +1,147 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewSnitchDatacenterGetParams creates a new SnitchDatacenterGetParams object +// with the default values initialized. +func NewSnitchDatacenterGetParams() *SnitchDatacenterGetParams { + var () + return &SnitchDatacenterGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewSnitchDatacenterGetParamsWithTimeout creates a new SnitchDatacenterGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewSnitchDatacenterGetParamsWithTimeout(timeout time.Duration) *SnitchDatacenterGetParams { + var () + return &SnitchDatacenterGetParams{ + + timeout: timeout, + } +} + +// NewSnitchDatacenterGetParamsWithContext creates a new SnitchDatacenterGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewSnitchDatacenterGetParamsWithContext(ctx context.Context) *SnitchDatacenterGetParams { + var () + return &SnitchDatacenterGetParams{ + + Context: ctx, + } +} + +// NewSnitchDatacenterGetParamsWithHTTPClient creates a new SnitchDatacenterGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewSnitchDatacenterGetParamsWithHTTPClient(client *http.Client) *SnitchDatacenterGetParams { + var () + return &SnitchDatacenterGetParams{ + HTTPClient: client, + } +} + +/* +SnitchDatacenterGetParams contains all the parameters to send to the API endpoint +for the snitch datacenter get operation typically these are written to a http.Request +*/ +type SnitchDatacenterGetParams struct { + + /*Host + The host name. If absent, the local server broadcast/listen address is used + + */ + Host *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the snitch datacenter get params +func (o *SnitchDatacenterGetParams) WithTimeout(timeout time.Duration) *SnitchDatacenterGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the snitch datacenter get params +func (o *SnitchDatacenterGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the snitch datacenter get params +func (o *SnitchDatacenterGetParams) WithContext(ctx context.Context) *SnitchDatacenterGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the snitch datacenter get params +func (o *SnitchDatacenterGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the snitch datacenter get params +func (o *SnitchDatacenterGetParams) WithHTTPClient(client *http.Client) *SnitchDatacenterGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the snitch datacenter get params +func (o *SnitchDatacenterGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithHost adds the host to the snitch datacenter get params +func (o *SnitchDatacenterGetParams) WithHost(host *string) *SnitchDatacenterGetParams { + o.SetHost(host) + return o +} + +// SetHost adds the host to the snitch datacenter get params +func (o *SnitchDatacenterGetParams) SetHost(host *string) { + o.Host = host +} + +// WriteToRequest writes these params to a swagger request +func (o *SnitchDatacenterGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Host != nil { + + // query param host + var qrHost string + if o.Host != nil { + qrHost = *o.Host + } + qHost := qrHost + if qHost != "" { + if err := r.SetQueryParam("host", qHost); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_datacenter_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_datacenter_get_responses.go new file mode 100644 index 00000000000..d4026866792 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_datacenter_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// SnitchDatacenterGetReader is a Reader for the SnitchDatacenterGet structure. +type SnitchDatacenterGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SnitchDatacenterGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSnitchDatacenterGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewSnitchDatacenterGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSnitchDatacenterGetOK creates a SnitchDatacenterGetOK with default headers values +func NewSnitchDatacenterGetOK() *SnitchDatacenterGetOK { + return &SnitchDatacenterGetOK{} +} + +/* +SnitchDatacenterGetOK handles this case with default header values. + +Success +*/ +type SnitchDatacenterGetOK struct { + Payload string +} + +func (o *SnitchDatacenterGetOK) GetPayload() string { + return o.Payload +} + +func (o *SnitchDatacenterGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSnitchDatacenterGetDefault creates a SnitchDatacenterGetDefault with default headers values +func NewSnitchDatacenterGetDefault(code int) *SnitchDatacenterGetDefault { + return &SnitchDatacenterGetDefault{ + _statusCode: code, + } +} + +/* +SnitchDatacenterGetDefault handles this case with default header values. + +internal server error +*/ +type SnitchDatacenterGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the snitch datacenter get default response +func (o *SnitchDatacenterGetDefault) Code() int { + return o._statusCode +} + +func (o *SnitchDatacenterGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *SnitchDatacenterGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *SnitchDatacenterGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_name_get_parameters.go new file mode 100644 index 00000000000..6a1e52a346c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_name_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewSnitchNameGetParams creates a new SnitchNameGetParams object +// with the default values initialized. +func NewSnitchNameGetParams() *SnitchNameGetParams { + + return &SnitchNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewSnitchNameGetParamsWithTimeout creates a new SnitchNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewSnitchNameGetParamsWithTimeout(timeout time.Duration) *SnitchNameGetParams { + + return &SnitchNameGetParams{ + + timeout: timeout, + } +} + +// NewSnitchNameGetParamsWithContext creates a new SnitchNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewSnitchNameGetParamsWithContext(ctx context.Context) *SnitchNameGetParams { + + return &SnitchNameGetParams{ + + Context: ctx, + } +} + +// NewSnitchNameGetParamsWithHTTPClient creates a new SnitchNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewSnitchNameGetParamsWithHTTPClient(client *http.Client) *SnitchNameGetParams { + + return &SnitchNameGetParams{ + HTTPClient: client, + } +} + +/* +SnitchNameGetParams contains all the parameters to send to the API endpoint +for the snitch name get operation typically these are written to a http.Request +*/ +type SnitchNameGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the snitch name get params +func (o *SnitchNameGetParams) WithTimeout(timeout time.Duration) *SnitchNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the snitch name get params +func (o *SnitchNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the snitch name get params +func (o *SnitchNameGetParams) WithContext(ctx context.Context) *SnitchNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the snitch name get params +func (o *SnitchNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the snitch name get params +func (o *SnitchNameGetParams) WithHTTPClient(client *http.Client) *SnitchNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the snitch name get params +func (o *SnitchNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *SnitchNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_name_get_responses.go new file mode 100644 index 00000000000..335537799dc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// SnitchNameGetReader is a Reader for the SnitchNameGet structure. +type SnitchNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SnitchNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSnitchNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewSnitchNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSnitchNameGetOK creates a SnitchNameGetOK with default headers values +func NewSnitchNameGetOK() *SnitchNameGetOK { + return &SnitchNameGetOK{} +} + +/* +SnitchNameGetOK handles this case with default header values. + +Success +*/ +type SnitchNameGetOK struct { + Payload string +} + +func (o *SnitchNameGetOK) GetPayload() string { + return o.Payload +} + +func (o *SnitchNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSnitchNameGetDefault creates a SnitchNameGetDefault with default headers values +func NewSnitchNameGetDefault(code int) *SnitchNameGetDefault { + return &SnitchNameGetDefault{ + _statusCode: code, + } +} + +/* +SnitchNameGetDefault handles this case with default header values. + +internal server error +*/ +type SnitchNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the snitch name get default response +func (o *SnitchNameGetDefault) Code() int { + return o._statusCode +} + +func (o *SnitchNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *SnitchNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *SnitchNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_rack_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_rack_get_parameters.go new file mode 100644 index 00000000000..b11cffded1c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_rack_get_parameters.go @@ -0,0 +1,147 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewSnitchRackGetParams creates a new SnitchRackGetParams object +// with the default values initialized. +func NewSnitchRackGetParams() *SnitchRackGetParams { + var () + return &SnitchRackGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewSnitchRackGetParamsWithTimeout creates a new SnitchRackGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewSnitchRackGetParamsWithTimeout(timeout time.Duration) *SnitchRackGetParams { + var () + return &SnitchRackGetParams{ + + timeout: timeout, + } +} + +// NewSnitchRackGetParamsWithContext creates a new SnitchRackGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewSnitchRackGetParamsWithContext(ctx context.Context) *SnitchRackGetParams { + var () + return &SnitchRackGetParams{ + + Context: ctx, + } +} + +// NewSnitchRackGetParamsWithHTTPClient creates a new SnitchRackGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewSnitchRackGetParamsWithHTTPClient(client *http.Client) *SnitchRackGetParams { + var () + return &SnitchRackGetParams{ + HTTPClient: client, + } +} + +/* +SnitchRackGetParams contains all the parameters to send to the API endpoint +for the snitch rack get operation typically these are written to a http.Request +*/ +type SnitchRackGetParams struct { + + /*Host + The host name. If absent, the local server broadcast/listen address is used + + */ + Host *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the snitch rack get params +func (o *SnitchRackGetParams) WithTimeout(timeout time.Duration) *SnitchRackGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the snitch rack get params +func (o *SnitchRackGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the snitch rack get params +func (o *SnitchRackGetParams) WithContext(ctx context.Context) *SnitchRackGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the snitch rack get params +func (o *SnitchRackGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the snitch rack get params +func (o *SnitchRackGetParams) WithHTTPClient(client *http.Client) *SnitchRackGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the snitch rack get params +func (o *SnitchRackGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithHost adds the host to the snitch rack get params +func (o *SnitchRackGetParams) WithHost(host *string) *SnitchRackGetParams { + o.SetHost(host) + return o +} + +// SetHost adds the host to the snitch rack get params +func (o *SnitchRackGetParams) SetHost(host *string) { + o.Host = host +} + +// WriteToRequest writes these params to a swagger request +func (o *SnitchRackGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Host != nil { + + // query param host + var qrHost string + if o.Host != nil { + qrHost = *o.Host + } + qHost := qrHost + if qHost != "" { + if err := r.SetQueryParam("host", qHost); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_rack_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_rack_get_responses.go new file mode 100644 index 00000000000..75fe44665a6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/snitch_rack_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// SnitchRackGetReader is a Reader for the SnitchRackGet structure. +type SnitchRackGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SnitchRackGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSnitchRackGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewSnitchRackGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSnitchRackGetOK creates a SnitchRackGetOK with default headers values +func NewSnitchRackGetOK() *SnitchRackGetOK { + return &SnitchRackGetOK{} +} + +/* +SnitchRackGetOK handles this case with default header values. + +Success +*/ +type SnitchRackGetOK struct { + Payload string +} + +func (o *SnitchRackGetOK) GetPayload() string { + return o.Payload +} + +func (o *SnitchRackGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSnitchRackGetDefault creates a SnitchRackGetDefault with default headers values +func NewSnitchRackGetDefault(code int) *SnitchRackGetDefault { + return &SnitchRackGetDefault{ + _statusCode: code, + } +} + +/* +SnitchRackGetDefault handles this case with default header values. + +internal server error +*/ +type SnitchRackGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the snitch rack get default response +func (o *SnitchRackGetDefault) Code() int { + return o._statusCode +} + +func (o *SnitchRackGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *SnitchRackGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *SnitchRackGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_get_parameters.go new file mode 100644 index 00000000000..d9a9ad87c1d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyCasContentionTimeoutGetParams creates a new StorageProxyCasContentionTimeoutGetParams object +// with the default values initialized. +func NewStorageProxyCasContentionTimeoutGetParams() *StorageProxyCasContentionTimeoutGetParams { + + return &StorageProxyCasContentionTimeoutGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyCasContentionTimeoutGetParamsWithTimeout creates a new StorageProxyCasContentionTimeoutGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyCasContentionTimeoutGetParamsWithTimeout(timeout time.Duration) *StorageProxyCasContentionTimeoutGetParams { + + return &StorageProxyCasContentionTimeoutGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyCasContentionTimeoutGetParamsWithContext creates a new StorageProxyCasContentionTimeoutGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyCasContentionTimeoutGetParamsWithContext(ctx context.Context) *StorageProxyCasContentionTimeoutGetParams { + + return &StorageProxyCasContentionTimeoutGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyCasContentionTimeoutGetParamsWithHTTPClient creates a new StorageProxyCasContentionTimeoutGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyCasContentionTimeoutGetParamsWithHTTPClient(client *http.Client) *StorageProxyCasContentionTimeoutGetParams { + + return &StorageProxyCasContentionTimeoutGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyCasContentionTimeoutGetParams contains all the parameters to send to the API endpoint +for the storage proxy cas contention timeout get operation typically these are written to a http.Request +*/ +type StorageProxyCasContentionTimeoutGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy cas contention timeout get params +func (o *StorageProxyCasContentionTimeoutGetParams) WithTimeout(timeout time.Duration) *StorageProxyCasContentionTimeoutGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy cas contention timeout get params +func (o *StorageProxyCasContentionTimeoutGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy cas contention timeout get params +func (o *StorageProxyCasContentionTimeoutGetParams) WithContext(ctx context.Context) *StorageProxyCasContentionTimeoutGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy cas contention timeout get params +func (o *StorageProxyCasContentionTimeoutGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy cas contention timeout get params +func (o *StorageProxyCasContentionTimeoutGetParams) WithHTTPClient(client *http.Client) *StorageProxyCasContentionTimeoutGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy cas contention timeout get params +func (o *StorageProxyCasContentionTimeoutGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyCasContentionTimeoutGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_get_responses.go new file mode 100644 index 00000000000..5d95215d279 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyCasContentionTimeoutGetReader is a Reader for the StorageProxyCasContentionTimeoutGet structure. +type StorageProxyCasContentionTimeoutGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyCasContentionTimeoutGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyCasContentionTimeoutGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyCasContentionTimeoutGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyCasContentionTimeoutGetOK creates a StorageProxyCasContentionTimeoutGetOK with default headers values +func NewStorageProxyCasContentionTimeoutGetOK() *StorageProxyCasContentionTimeoutGetOK { + return &StorageProxyCasContentionTimeoutGetOK{} +} + +/* +StorageProxyCasContentionTimeoutGetOK handles this case with default header values. + +Success +*/ +type StorageProxyCasContentionTimeoutGetOK struct { + Payload interface{} +} + +func (o *StorageProxyCasContentionTimeoutGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyCasContentionTimeoutGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyCasContentionTimeoutGetDefault creates a StorageProxyCasContentionTimeoutGetDefault with default headers values +func NewStorageProxyCasContentionTimeoutGetDefault(code int) *StorageProxyCasContentionTimeoutGetDefault { + return &StorageProxyCasContentionTimeoutGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyCasContentionTimeoutGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyCasContentionTimeoutGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy cas contention timeout get default response +func (o *StorageProxyCasContentionTimeoutGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyCasContentionTimeoutGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyCasContentionTimeoutGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyCasContentionTimeoutGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_post_parameters.go new file mode 100644 index 00000000000..516eb67e1c0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyCasContentionTimeoutPostParams creates a new StorageProxyCasContentionTimeoutPostParams object +// with the default values initialized. +func NewStorageProxyCasContentionTimeoutPostParams() *StorageProxyCasContentionTimeoutPostParams { + var () + return &StorageProxyCasContentionTimeoutPostParams{ + + requestTimeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyCasContentionTimeoutPostParamsWithTimeout creates a new StorageProxyCasContentionTimeoutPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyCasContentionTimeoutPostParamsWithTimeout(timeout time.Duration) *StorageProxyCasContentionTimeoutPostParams { + var () + return &StorageProxyCasContentionTimeoutPostParams{ + + requestTimeout: timeout, + } +} + +// NewStorageProxyCasContentionTimeoutPostParamsWithContext creates a new StorageProxyCasContentionTimeoutPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyCasContentionTimeoutPostParamsWithContext(ctx context.Context) *StorageProxyCasContentionTimeoutPostParams { + var () + return &StorageProxyCasContentionTimeoutPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyCasContentionTimeoutPostParamsWithHTTPClient creates a new StorageProxyCasContentionTimeoutPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyCasContentionTimeoutPostParamsWithHTTPClient(client *http.Client) *StorageProxyCasContentionTimeoutPostParams { + var () + return &StorageProxyCasContentionTimeoutPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyCasContentionTimeoutPostParams contains all the parameters to send to the API endpoint +for the storage proxy cas contention timeout post operation typically these are written to a http.Request +*/ +type StorageProxyCasContentionTimeoutPostParams struct { + + /*Timeout + timeout in second + + */ + Timeout string + + requestTimeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithRequestTimeout adds the timeout to the storage proxy cas contention timeout post params +func (o *StorageProxyCasContentionTimeoutPostParams) WithRequestTimeout(timeout time.Duration) *StorageProxyCasContentionTimeoutPostParams { + o.SetRequestTimeout(timeout) + return o +} + +// SetRequestTimeout adds the timeout to the storage proxy cas contention timeout post params +func (o *StorageProxyCasContentionTimeoutPostParams) SetRequestTimeout(timeout time.Duration) { + o.requestTimeout = timeout +} + +// WithContext adds the context to the storage proxy cas contention timeout post params +func (o *StorageProxyCasContentionTimeoutPostParams) WithContext(ctx context.Context) *StorageProxyCasContentionTimeoutPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy cas contention timeout post params +func (o *StorageProxyCasContentionTimeoutPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy cas contention timeout post params +func (o *StorageProxyCasContentionTimeoutPostParams) WithHTTPClient(client *http.Client) *StorageProxyCasContentionTimeoutPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy cas contention timeout post params +func (o *StorageProxyCasContentionTimeoutPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTimeout adds the timeout to the storage proxy cas contention timeout post params +func (o *StorageProxyCasContentionTimeoutPostParams) WithTimeout(timeout string) *StorageProxyCasContentionTimeoutPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy cas contention timeout post params +func (o *StorageProxyCasContentionTimeoutPostParams) SetTimeout(timeout string) { + o.Timeout = timeout +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyCasContentionTimeoutPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.requestTimeout); err != nil { + return err + } + var res []error + + // query param timeout + qrTimeout := o.Timeout + qTimeout := qrTimeout + if qTimeout != "" { + if err := r.SetQueryParam("timeout", qTimeout); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_post_responses.go new file mode 100644 index 00000000000..2f4be7d1c72 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_cas_contention_timeout_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyCasContentionTimeoutPostReader is a Reader for the StorageProxyCasContentionTimeoutPost structure. +type StorageProxyCasContentionTimeoutPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyCasContentionTimeoutPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyCasContentionTimeoutPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyCasContentionTimeoutPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyCasContentionTimeoutPostOK creates a StorageProxyCasContentionTimeoutPostOK with default headers values +func NewStorageProxyCasContentionTimeoutPostOK() *StorageProxyCasContentionTimeoutPostOK { + return &StorageProxyCasContentionTimeoutPostOK{} +} + +/* +StorageProxyCasContentionTimeoutPostOK handles this case with default header values. + +Success +*/ +type StorageProxyCasContentionTimeoutPostOK struct { +} + +func (o *StorageProxyCasContentionTimeoutPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyCasContentionTimeoutPostDefault creates a StorageProxyCasContentionTimeoutPostDefault with default headers values +func NewStorageProxyCasContentionTimeoutPostDefault(code int) *StorageProxyCasContentionTimeoutPostDefault { + return &StorageProxyCasContentionTimeoutPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyCasContentionTimeoutPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyCasContentionTimeoutPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy cas contention timeout post default response +func (o *StorageProxyCasContentionTimeoutPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyCasContentionTimeoutPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyCasContentionTimeoutPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyCasContentionTimeoutPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_get_parameters.go new file mode 100644 index 00000000000..f242dba937d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyCounterWriteRPCTimeoutGetParams creates a new StorageProxyCounterWriteRPCTimeoutGetParams object +// with the default values initialized. +func NewStorageProxyCounterWriteRPCTimeoutGetParams() *StorageProxyCounterWriteRPCTimeoutGetParams { + + return &StorageProxyCounterWriteRPCTimeoutGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyCounterWriteRPCTimeoutGetParamsWithTimeout creates a new StorageProxyCounterWriteRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyCounterWriteRPCTimeoutGetParamsWithTimeout(timeout time.Duration) *StorageProxyCounterWriteRPCTimeoutGetParams { + + return &StorageProxyCounterWriteRPCTimeoutGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyCounterWriteRPCTimeoutGetParamsWithContext creates a new StorageProxyCounterWriteRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyCounterWriteRPCTimeoutGetParamsWithContext(ctx context.Context) *StorageProxyCounterWriteRPCTimeoutGetParams { + + return &StorageProxyCounterWriteRPCTimeoutGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyCounterWriteRPCTimeoutGetParamsWithHTTPClient creates a new StorageProxyCounterWriteRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyCounterWriteRPCTimeoutGetParamsWithHTTPClient(client *http.Client) *StorageProxyCounterWriteRPCTimeoutGetParams { + + return &StorageProxyCounterWriteRPCTimeoutGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyCounterWriteRPCTimeoutGetParams contains all the parameters to send to the API endpoint +for the storage proxy counter write Rpc timeout get operation typically these are written to a http.Request +*/ +type StorageProxyCounterWriteRPCTimeoutGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy counter write Rpc timeout get params +func (o *StorageProxyCounterWriteRPCTimeoutGetParams) WithTimeout(timeout time.Duration) *StorageProxyCounterWriteRPCTimeoutGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy counter write Rpc timeout get params +func (o *StorageProxyCounterWriteRPCTimeoutGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy counter write Rpc timeout get params +func (o *StorageProxyCounterWriteRPCTimeoutGetParams) WithContext(ctx context.Context) *StorageProxyCounterWriteRPCTimeoutGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy counter write Rpc timeout get params +func (o *StorageProxyCounterWriteRPCTimeoutGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy counter write Rpc timeout get params +func (o *StorageProxyCounterWriteRPCTimeoutGetParams) WithHTTPClient(client *http.Client) *StorageProxyCounterWriteRPCTimeoutGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy counter write Rpc timeout get params +func (o *StorageProxyCounterWriteRPCTimeoutGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyCounterWriteRPCTimeoutGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_get_responses.go new file mode 100644 index 00000000000..eb84a699b85 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyCounterWriteRPCTimeoutGetReader is a Reader for the StorageProxyCounterWriteRPCTimeoutGet structure. +type StorageProxyCounterWriteRPCTimeoutGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyCounterWriteRPCTimeoutGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyCounterWriteRPCTimeoutGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyCounterWriteRPCTimeoutGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyCounterWriteRPCTimeoutGetOK creates a StorageProxyCounterWriteRPCTimeoutGetOK with default headers values +func NewStorageProxyCounterWriteRPCTimeoutGetOK() *StorageProxyCounterWriteRPCTimeoutGetOK { + return &StorageProxyCounterWriteRPCTimeoutGetOK{} +} + +/* +StorageProxyCounterWriteRPCTimeoutGetOK handles this case with default header values. + +Success +*/ +type StorageProxyCounterWriteRPCTimeoutGetOK struct { + Payload interface{} +} + +func (o *StorageProxyCounterWriteRPCTimeoutGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyCounterWriteRPCTimeoutGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyCounterWriteRPCTimeoutGetDefault creates a StorageProxyCounterWriteRPCTimeoutGetDefault with default headers values +func NewStorageProxyCounterWriteRPCTimeoutGetDefault(code int) *StorageProxyCounterWriteRPCTimeoutGetDefault { + return &StorageProxyCounterWriteRPCTimeoutGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyCounterWriteRPCTimeoutGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyCounterWriteRPCTimeoutGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy counter write Rpc timeout get default response +func (o *StorageProxyCounterWriteRPCTimeoutGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyCounterWriteRPCTimeoutGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyCounterWriteRPCTimeoutGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyCounterWriteRPCTimeoutGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_post_parameters.go new file mode 100644 index 00000000000..8d6e89d47c7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyCounterWriteRPCTimeoutPostParams creates a new StorageProxyCounterWriteRPCTimeoutPostParams object +// with the default values initialized. +func NewStorageProxyCounterWriteRPCTimeoutPostParams() *StorageProxyCounterWriteRPCTimeoutPostParams { + var () + return &StorageProxyCounterWriteRPCTimeoutPostParams{ + + requestTimeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyCounterWriteRPCTimeoutPostParamsWithTimeout creates a new StorageProxyCounterWriteRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyCounterWriteRPCTimeoutPostParamsWithTimeout(timeout time.Duration) *StorageProxyCounterWriteRPCTimeoutPostParams { + var () + return &StorageProxyCounterWriteRPCTimeoutPostParams{ + + requestTimeout: timeout, + } +} + +// NewStorageProxyCounterWriteRPCTimeoutPostParamsWithContext creates a new StorageProxyCounterWriteRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyCounterWriteRPCTimeoutPostParamsWithContext(ctx context.Context) *StorageProxyCounterWriteRPCTimeoutPostParams { + var () + return &StorageProxyCounterWriteRPCTimeoutPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyCounterWriteRPCTimeoutPostParamsWithHTTPClient creates a new StorageProxyCounterWriteRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyCounterWriteRPCTimeoutPostParamsWithHTTPClient(client *http.Client) *StorageProxyCounterWriteRPCTimeoutPostParams { + var () + return &StorageProxyCounterWriteRPCTimeoutPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyCounterWriteRPCTimeoutPostParams contains all the parameters to send to the API endpoint +for the storage proxy counter write Rpc timeout post operation typically these are written to a http.Request +*/ +type StorageProxyCounterWriteRPCTimeoutPostParams struct { + + /*Timeout + timeout in seconds + + */ + Timeout string + + requestTimeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithRequestTimeout adds the timeout to the storage proxy counter write Rpc timeout post params +func (o *StorageProxyCounterWriteRPCTimeoutPostParams) WithRequestTimeout(timeout time.Duration) *StorageProxyCounterWriteRPCTimeoutPostParams { + o.SetRequestTimeout(timeout) + return o +} + +// SetRequestTimeout adds the timeout to the storage proxy counter write Rpc timeout post params +func (o *StorageProxyCounterWriteRPCTimeoutPostParams) SetRequestTimeout(timeout time.Duration) { + o.requestTimeout = timeout +} + +// WithContext adds the context to the storage proxy counter write Rpc timeout post params +func (o *StorageProxyCounterWriteRPCTimeoutPostParams) WithContext(ctx context.Context) *StorageProxyCounterWriteRPCTimeoutPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy counter write Rpc timeout post params +func (o *StorageProxyCounterWriteRPCTimeoutPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy counter write Rpc timeout post params +func (o *StorageProxyCounterWriteRPCTimeoutPostParams) WithHTTPClient(client *http.Client) *StorageProxyCounterWriteRPCTimeoutPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy counter write Rpc timeout post params +func (o *StorageProxyCounterWriteRPCTimeoutPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTimeout adds the timeout to the storage proxy counter write Rpc timeout post params +func (o *StorageProxyCounterWriteRPCTimeoutPostParams) WithTimeout(timeout string) *StorageProxyCounterWriteRPCTimeoutPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy counter write Rpc timeout post params +func (o *StorageProxyCounterWriteRPCTimeoutPostParams) SetTimeout(timeout string) { + o.Timeout = timeout +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyCounterWriteRPCTimeoutPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.requestTimeout); err != nil { + return err + } + var res []error + + // query param timeout + qrTimeout := o.Timeout + qTimeout := qrTimeout + if qTimeout != "" { + if err := r.SetQueryParam("timeout", qTimeout); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_post_responses.go new file mode 100644 index 00000000000..93c0f49ff20 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_counter_write_rpc_timeout_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyCounterWriteRPCTimeoutPostReader is a Reader for the StorageProxyCounterWriteRPCTimeoutPost structure. +type StorageProxyCounterWriteRPCTimeoutPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyCounterWriteRPCTimeoutPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyCounterWriteRPCTimeoutPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyCounterWriteRPCTimeoutPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyCounterWriteRPCTimeoutPostOK creates a StorageProxyCounterWriteRPCTimeoutPostOK with default headers values +func NewStorageProxyCounterWriteRPCTimeoutPostOK() *StorageProxyCounterWriteRPCTimeoutPostOK { + return &StorageProxyCounterWriteRPCTimeoutPostOK{} +} + +/* +StorageProxyCounterWriteRPCTimeoutPostOK handles this case with default header values. + +Success +*/ +type StorageProxyCounterWriteRPCTimeoutPostOK struct { +} + +func (o *StorageProxyCounterWriteRPCTimeoutPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyCounterWriteRPCTimeoutPostDefault creates a StorageProxyCounterWriteRPCTimeoutPostDefault with default headers values +func NewStorageProxyCounterWriteRPCTimeoutPostDefault(code int) *StorageProxyCounterWriteRPCTimeoutPostDefault { + return &StorageProxyCounterWriteRPCTimeoutPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyCounterWriteRPCTimeoutPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyCounterWriteRPCTimeoutPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy counter write Rpc timeout post default response +func (o *StorageProxyCounterWriteRPCTimeoutPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyCounterWriteRPCTimeoutPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyCounterWriteRPCTimeoutPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyCounterWriteRPCTimeoutPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_get_parameters.go new file mode 100644 index 00000000000..2924343364f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyHintedHandoffEnabledByDcGetParams creates a new StorageProxyHintedHandoffEnabledByDcGetParams object +// with the default values initialized. +func NewStorageProxyHintedHandoffEnabledByDcGetParams() *StorageProxyHintedHandoffEnabledByDcGetParams { + + return &StorageProxyHintedHandoffEnabledByDcGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyHintedHandoffEnabledByDcGetParamsWithTimeout creates a new StorageProxyHintedHandoffEnabledByDcGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyHintedHandoffEnabledByDcGetParamsWithTimeout(timeout time.Duration) *StorageProxyHintedHandoffEnabledByDcGetParams { + + return &StorageProxyHintedHandoffEnabledByDcGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyHintedHandoffEnabledByDcGetParamsWithContext creates a new StorageProxyHintedHandoffEnabledByDcGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyHintedHandoffEnabledByDcGetParamsWithContext(ctx context.Context) *StorageProxyHintedHandoffEnabledByDcGetParams { + + return &StorageProxyHintedHandoffEnabledByDcGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyHintedHandoffEnabledByDcGetParamsWithHTTPClient creates a new StorageProxyHintedHandoffEnabledByDcGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyHintedHandoffEnabledByDcGetParamsWithHTTPClient(client *http.Client) *StorageProxyHintedHandoffEnabledByDcGetParams { + + return &StorageProxyHintedHandoffEnabledByDcGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyHintedHandoffEnabledByDcGetParams contains all the parameters to send to the API endpoint +for the storage proxy hinted handoff enabled by dc get operation typically these are written to a http.Request +*/ +type StorageProxyHintedHandoffEnabledByDcGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy hinted handoff enabled by dc get params +func (o *StorageProxyHintedHandoffEnabledByDcGetParams) WithTimeout(timeout time.Duration) *StorageProxyHintedHandoffEnabledByDcGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy hinted handoff enabled by dc get params +func (o *StorageProxyHintedHandoffEnabledByDcGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy hinted handoff enabled by dc get params +func (o *StorageProxyHintedHandoffEnabledByDcGetParams) WithContext(ctx context.Context) *StorageProxyHintedHandoffEnabledByDcGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy hinted handoff enabled by dc get params +func (o *StorageProxyHintedHandoffEnabledByDcGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy hinted handoff enabled by dc get params +func (o *StorageProxyHintedHandoffEnabledByDcGetParams) WithHTTPClient(client *http.Client) *StorageProxyHintedHandoffEnabledByDcGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy hinted handoff enabled by dc get params +func (o *StorageProxyHintedHandoffEnabledByDcGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyHintedHandoffEnabledByDcGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_get_responses.go new file mode 100644 index 00000000000..11d4512f299 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyHintedHandoffEnabledByDcGetReader is a Reader for the StorageProxyHintedHandoffEnabledByDcGet structure. +type StorageProxyHintedHandoffEnabledByDcGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyHintedHandoffEnabledByDcGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyHintedHandoffEnabledByDcGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyHintedHandoffEnabledByDcGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyHintedHandoffEnabledByDcGetOK creates a StorageProxyHintedHandoffEnabledByDcGetOK with default headers values +func NewStorageProxyHintedHandoffEnabledByDcGetOK() *StorageProxyHintedHandoffEnabledByDcGetOK { + return &StorageProxyHintedHandoffEnabledByDcGetOK{} +} + +/* +StorageProxyHintedHandoffEnabledByDcGetOK handles this case with default header values. + +Success +*/ +type StorageProxyHintedHandoffEnabledByDcGetOK struct { + Payload []*models.MapperList +} + +func (o *StorageProxyHintedHandoffEnabledByDcGetOK) GetPayload() []*models.MapperList { + return o.Payload +} + +func (o *StorageProxyHintedHandoffEnabledByDcGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyHintedHandoffEnabledByDcGetDefault creates a StorageProxyHintedHandoffEnabledByDcGetDefault with default headers values +func NewStorageProxyHintedHandoffEnabledByDcGetDefault(code int) *StorageProxyHintedHandoffEnabledByDcGetDefault { + return &StorageProxyHintedHandoffEnabledByDcGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyHintedHandoffEnabledByDcGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyHintedHandoffEnabledByDcGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy hinted handoff enabled by dc get default response +func (o *StorageProxyHintedHandoffEnabledByDcGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyHintedHandoffEnabledByDcGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyHintedHandoffEnabledByDcGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyHintedHandoffEnabledByDcGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_post_parameters.go new file mode 100644 index 00000000000..6b58dd4757d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyHintedHandoffEnabledByDcPostParams creates a new StorageProxyHintedHandoffEnabledByDcPostParams object +// with the default values initialized. +func NewStorageProxyHintedHandoffEnabledByDcPostParams() *StorageProxyHintedHandoffEnabledByDcPostParams { + var () + return &StorageProxyHintedHandoffEnabledByDcPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyHintedHandoffEnabledByDcPostParamsWithTimeout creates a new StorageProxyHintedHandoffEnabledByDcPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyHintedHandoffEnabledByDcPostParamsWithTimeout(timeout time.Duration) *StorageProxyHintedHandoffEnabledByDcPostParams { + var () + return &StorageProxyHintedHandoffEnabledByDcPostParams{ + + timeout: timeout, + } +} + +// NewStorageProxyHintedHandoffEnabledByDcPostParamsWithContext creates a new StorageProxyHintedHandoffEnabledByDcPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyHintedHandoffEnabledByDcPostParamsWithContext(ctx context.Context) *StorageProxyHintedHandoffEnabledByDcPostParams { + var () + return &StorageProxyHintedHandoffEnabledByDcPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyHintedHandoffEnabledByDcPostParamsWithHTTPClient creates a new StorageProxyHintedHandoffEnabledByDcPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyHintedHandoffEnabledByDcPostParamsWithHTTPClient(client *http.Client) *StorageProxyHintedHandoffEnabledByDcPostParams { + var () + return &StorageProxyHintedHandoffEnabledByDcPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyHintedHandoffEnabledByDcPostParams contains all the parameters to send to the API endpoint +for the storage proxy hinted handoff enabled by dc post operation typically these are written to a http.Request +*/ +type StorageProxyHintedHandoffEnabledByDcPostParams struct { + + /*Dcs + The dcs to enable in the CSV format + + */ + Dcs string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy hinted handoff enabled by dc post params +func (o *StorageProxyHintedHandoffEnabledByDcPostParams) WithTimeout(timeout time.Duration) *StorageProxyHintedHandoffEnabledByDcPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy hinted handoff enabled by dc post params +func (o *StorageProxyHintedHandoffEnabledByDcPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy hinted handoff enabled by dc post params +func (o *StorageProxyHintedHandoffEnabledByDcPostParams) WithContext(ctx context.Context) *StorageProxyHintedHandoffEnabledByDcPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy hinted handoff enabled by dc post params +func (o *StorageProxyHintedHandoffEnabledByDcPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy hinted handoff enabled by dc post params +func (o *StorageProxyHintedHandoffEnabledByDcPostParams) WithHTTPClient(client *http.Client) *StorageProxyHintedHandoffEnabledByDcPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy hinted handoff enabled by dc post params +func (o *StorageProxyHintedHandoffEnabledByDcPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithDcs adds the dcs to the storage proxy hinted handoff enabled by dc post params +func (o *StorageProxyHintedHandoffEnabledByDcPostParams) WithDcs(dcs string) *StorageProxyHintedHandoffEnabledByDcPostParams { + o.SetDcs(dcs) + return o +} + +// SetDcs adds the dcs to the storage proxy hinted handoff enabled by dc post params +func (o *StorageProxyHintedHandoffEnabledByDcPostParams) SetDcs(dcs string) { + o.Dcs = dcs +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyHintedHandoffEnabledByDcPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param dcs + qrDcs := o.Dcs + qDcs := qrDcs + if qDcs != "" { + if err := r.SetQueryParam("dcs", qDcs); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_post_responses.go new file mode 100644 index 00000000000..f241dcd83ce --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_by_dc_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyHintedHandoffEnabledByDcPostReader is a Reader for the StorageProxyHintedHandoffEnabledByDcPost structure. +type StorageProxyHintedHandoffEnabledByDcPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyHintedHandoffEnabledByDcPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyHintedHandoffEnabledByDcPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyHintedHandoffEnabledByDcPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyHintedHandoffEnabledByDcPostOK creates a StorageProxyHintedHandoffEnabledByDcPostOK with default headers values +func NewStorageProxyHintedHandoffEnabledByDcPostOK() *StorageProxyHintedHandoffEnabledByDcPostOK { + return &StorageProxyHintedHandoffEnabledByDcPostOK{} +} + +/* +StorageProxyHintedHandoffEnabledByDcPostOK handles this case with default header values. + +Success +*/ +type StorageProxyHintedHandoffEnabledByDcPostOK struct { +} + +func (o *StorageProxyHintedHandoffEnabledByDcPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyHintedHandoffEnabledByDcPostDefault creates a StorageProxyHintedHandoffEnabledByDcPostDefault with default headers values +func NewStorageProxyHintedHandoffEnabledByDcPostDefault(code int) *StorageProxyHintedHandoffEnabledByDcPostDefault { + return &StorageProxyHintedHandoffEnabledByDcPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyHintedHandoffEnabledByDcPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyHintedHandoffEnabledByDcPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy hinted handoff enabled by dc post default response +func (o *StorageProxyHintedHandoffEnabledByDcPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyHintedHandoffEnabledByDcPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyHintedHandoffEnabledByDcPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyHintedHandoffEnabledByDcPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_get_parameters.go new file mode 100644 index 00000000000..2956c9ee1af --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyHintedHandoffEnabledGetParams creates a new StorageProxyHintedHandoffEnabledGetParams object +// with the default values initialized. +func NewStorageProxyHintedHandoffEnabledGetParams() *StorageProxyHintedHandoffEnabledGetParams { + + return &StorageProxyHintedHandoffEnabledGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyHintedHandoffEnabledGetParamsWithTimeout creates a new StorageProxyHintedHandoffEnabledGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyHintedHandoffEnabledGetParamsWithTimeout(timeout time.Duration) *StorageProxyHintedHandoffEnabledGetParams { + + return &StorageProxyHintedHandoffEnabledGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyHintedHandoffEnabledGetParamsWithContext creates a new StorageProxyHintedHandoffEnabledGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyHintedHandoffEnabledGetParamsWithContext(ctx context.Context) *StorageProxyHintedHandoffEnabledGetParams { + + return &StorageProxyHintedHandoffEnabledGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyHintedHandoffEnabledGetParamsWithHTTPClient creates a new StorageProxyHintedHandoffEnabledGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyHintedHandoffEnabledGetParamsWithHTTPClient(client *http.Client) *StorageProxyHintedHandoffEnabledGetParams { + + return &StorageProxyHintedHandoffEnabledGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyHintedHandoffEnabledGetParams contains all the parameters to send to the API endpoint +for the storage proxy hinted handoff enabled get operation typically these are written to a http.Request +*/ +type StorageProxyHintedHandoffEnabledGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy hinted handoff enabled get params +func (o *StorageProxyHintedHandoffEnabledGetParams) WithTimeout(timeout time.Duration) *StorageProxyHintedHandoffEnabledGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy hinted handoff enabled get params +func (o *StorageProxyHintedHandoffEnabledGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy hinted handoff enabled get params +func (o *StorageProxyHintedHandoffEnabledGetParams) WithContext(ctx context.Context) *StorageProxyHintedHandoffEnabledGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy hinted handoff enabled get params +func (o *StorageProxyHintedHandoffEnabledGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy hinted handoff enabled get params +func (o *StorageProxyHintedHandoffEnabledGetParams) WithHTTPClient(client *http.Client) *StorageProxyHintedHandoffEnabledGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy hinted handoff enabled get params +func (o *StorageProxyHintedHandoffEnabledGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyHintedHandoffEnabledGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_get_responses.go new file mode 100644 index 00000000000..1724d039fcd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyHintedHandoffEnabledGetReader is a Reader for the StorageProxyHintedHandoffEnabledGet structure. +type StorageProxyHintedHandoffEnabledGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyHintedHandoffEnabledGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyHintedHandoffEnabledGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyHintedHandoffEnabledGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyHintedHandoffEnabledGetOK creates a StorageProxyHintedHandoffEnabledGetOK with default headers values +func NewStorageProxyHintedHandoffEnabledGetOK() *StorageProxyHintedHandoffEnabledGetOK { + return &StorageProxyHintedHandoffEnabledGetOK{} +} + +/* +StorageProxyHintedHandoffEnabledGetOK handles this case with default header values. + +Success +*/ +type StorageProxyHintedHandoffEnabledGetOK struct { + Payload bool +} + +func (o *StorageProxyHintedHandoffEnabledGetOK) GetPayload() bool { + return o.Payload +} + +func (o *StorageProxyHintedHandoffEnabledGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyHintedHandoffEnabledGetDefault creates a StorageProxyHintedHandoffEnabledGetDefault with default headers values +func NewStorageProxyHintedHandoffEnabledGetDefault(code int) *StorageProxyHintedHandoffEnabledGetDefault { + return &StorageProxyHintedHandoffEnabledGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyHintedHandoffEnabledGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyHintedHandoffEnabledGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy hinted handoff enabled get default response +func (o *StorageProxyHintedHandoffEnabledGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyHintedHandoffEnabledGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyHintedHandoffEnabledGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyHintedHandoffEnabledGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_post_parameters.go new file mode 100644 index 00000000000..d0521c4d8f5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageProxyHintedHandoffEnabledPostParams creates a new StorageProxyHintedHandoffEnabledPostParams object +// with the default values initialized. +func NewStorageProxyHintedHandoffEnabledPostParams() *StorageProxyHintedHandoffEnabledPostParams { + var () + return &StorageProxyHintedHandoffEnabledPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyHintedHandoffEnabledPostParamsWithTimeout creates a new StorageProxyHintedHandoffEnabledPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyHintedHandoffEnabledPostParamsWithTimeout(timeout time.Duration) *StorageProxyHintedHandoffEnabledPostParams { + var () + return &StorageProxyHintedHandoffEnabledPostParams{ + + timeout: timeout, + } +} + +// NewStorageProxyHintedHandoffEnabledPostParamsWithContext creates a new StorageProxyHintedHandoffEnabledPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyHintedHandoffEnabledPostParamsWithContext(ctx context.Context) *StorageProxyHintedHandoffEnabledPostParams { + var () + return &StorageProxyHintedHandoffEnabledPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyHintedHandoffEnabledPostParamsWithHTTPClient creates a new StorageProxyHintedHandoffEnabledPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyHintedHandoffEnabledPostParamsWithHTTPClient(client *http.Client) *StorageProxyHintedHandoffEnabledPostParams { + var () + return &StorageProxyHintedHandoffEnabledPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyHintedHandoffEnabledPostParams contains all the parameters to send to the API endpoint +for the storage proxy hinted handoff enabled post operation typically these are written to a http.Request +*/ +type StorageProxyHintedHandoffEnabledPostParams struct { + + /*Enable + Set to true to enable hinted handoff + + */ + Enable bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy hinted handoff enabled post params +func (o *StorageProxyHintedHandoffEnabledPostParams) WithTimeout(timeout time.Duration) *StorageProxyHintedHandoffEnabledPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy hinted handoff enabled post params +func (o *StorageProxyHintedHandoffEnabledPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy hinted handoff enabled post params +func (o *StorageProxyHintedHandoffEnabledPostParams) WithContext(ctx context.Context) *StorageProxyHintedHandoffEnabledPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy hinted handoff enabled post params +func (o *StorageProxyHintedHandoffEnabledPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy hinted handoff enabled post params +func (o *StorageProxyHintedHandoffEnabledPostParams) WithHTTPClient(client *http.Client) *StorageProxyHintedHandoffEnabledPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy hinted handoff enabled post params +func (o *StorageProxyHintedHandoffEnabledPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEnable adds the enable to the storage proxy hinted handoff enabled post params +func (o *StorageProxyHintedHandoffEnabledPostParams) WithEnable(enable bool) *StorageProxyHintedHandoffEnabledPostParams { + o.SetEnable(enable) + return o +} + +// SetEnable adds the enable to the storage proxy hinted handoff enabled post params +func (o *StorageProxyHintedHandoffEnabledPostParams) SetEnable(enable bool) { + o.Enable = enable +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyHintedHandoffEnabledPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param enable + qrEnable := o.Enable + qEnable := swag.FormatBool(qrEnable) + if qEnable != "" { + if err := r.SetQueryParam("enable", qEnable); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_post_responses.go new file mode 100644 index 00000000000..deabfc55c71 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hinted_handoff_enabled_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyHintedHandoffEnabledPostReader is a Reader for the StorageProxyHintedHandoffEnabledPost structure. +type StorageProxyHintedHandoffEnabledPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyHintedHandoffEnabledPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyHintedHandoffEnabledPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyHintedHandoffEnabledPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyHintedHandoffEnabledPostOK creates a StorageProxyHintedHandoffEnabledPostOK with default headers values +func NewStorageProxyHintedHandoffEnabledPostOK() *StorageProxyHintedHandoffEnabledPostOK { + return &StorageProxyHintedHandoffEnabledPostOK{} +} + +/* +StorageProxyHintedHandoffEnabledPostOK handles this case with default header values. + +Success +*/ +type StorageProxyHintedHandoffEnabledPostOK struct { +} + +func (o *StorageProxyHintedHandoffEnabledPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyHintedHandoffEnabledPostDefault creates a StorageProxyHintedHandoffEnabledPostDefault with default headers values +func NewStorageProxyHintedHandoffEnabledPostDefault(code int) *StorageProxyHintedHandoffEnabledPostDefault { + return &StorageProxyHintedHandoffEnabledPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyHintedHandoffEnabledPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyHintedHandoffEnabledPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy hinted handoff enabled post default response +func (o *StorageProxyHintedHandoffEnabledPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyHintedHandoffEnabledPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyHintedHandoffEnabledPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyHintedHandoffEnabledPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hints_in_progress_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hints_in_progress_get_parameters.go new file mode 100644 index 00000000000..67ca1926138 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hints_in_progress_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyHintsInProgressGetParams creates a new StorageProxyHintsInProgressGetParams object +// with the default values initialized. +func NewStorageProxyHintsInProgressGetParams() *StorageProxyHintsInProgressGetParams { + + return &StorageProxyHintsInProgressGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyHintsInProgressGetParamsWithTimeout creates a new StorageProxyHintsInProgressGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyHintsInProgressGetParamsWithTimeout(timeout time.Duration) *StorageProxyHintsInProgressGetParams { + + return &StorageProxyHintsInProgressGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyHintsInProgressGetParamsWithContext creates a new StorageProxyHintsInProgressGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyHintsInProgressGetParamsWithContext(ctx context.Context) *StorageProxyHintsInProgressGetParams { + + return &StorageProxyHintsInProgressGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyHintsInProgressGetParamsWithHTTPClient creates a new StorageProxyHintsInProgressGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyHintsInProgressGetParamsWithHTTPClient(client *http.Client) *StorageProxyHintsInProgressGetParams { + + return &StorageProxyHintsInProgressGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyHintsInProgressGetParams contains all the parameters to send to the API endpoint +for the storage proxy hints in progress get operation typically these are written to a http.Request +*/ +type StorageProxyHintsInProgressGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy hints in progress get params +func (o *StorageProxyHintsInProgressGetParams) WithTimeout(timeout time.Duration) *StorageProxyHintsInProgressGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy hints in progress get params +func (o *StorageProxyHintsInProgressGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy hints in progress get params +func (o *StorageProxyHintsInProgressGetParams) WithContext(ctx context.Context) *StorageProxyHintsInProgressGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy hints in progress get params +func (o *StorageProxyHintsInProgressGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy hints in progress get params +func (o *StorageProxyHintsInProgressGetParams) WithHTTPClient(client *http.Client) *StorageProxyHintsInProgressGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy hints in progress get params +func (o *StorageProxyHintsInProgressGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyHintsInProgressGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hints_in_progress_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hints_in_progress_get_responses.go new file mode 100644 index 00000000000..7919780b840 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_hints_in_progress_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyHintsInProgressGetReader is a Reader for the StorageProxyHintsInProgressGet structure. +type StorageProxyHintsInProgressGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyHintsInProgressGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyHintsInProgressGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyHintsInProgressGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyHintsInProgressGetOK creates a StorageProxyHintsInProgressGetOK with default headers values +func NewStorageProxyHintsInProgressGetOK() *StorageProxyHintsInProgressGetOK { + return &StorageProxyHintsInProgressGetOK{} +} + +/* +StorageProxyHintsInProgressGetOK handles this case with default header values. + +Success +*/ +type StorageProxyHintsInProgressGetOK struct { + Payload int32 +} + +func (o *StorageProxyHintsInProgressGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyHintsInProgressGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyHintsInProgressGetDefault creates a StorageProxyHintsInProgressGetDefault with default headers values +func NewStorageProxyHintsInProgressGetDefault(code int) *StorageProxyHintsInProgressGetDefault { + return &StorageProxyHintsInProgressGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyHintsInProgressGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyHintsInProgressGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy hints in progress get default response +func (o *StorageProxyHintsInProgressGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyHintsInProgressGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyHintsInProgressGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyHintsInProgressGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_get_parameters.go new file mode 100644 index 00000000000..2afac1eb53f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMaxHintWindowGetParams creates a new StorageProxyMaxHintWindowGetParams object +// with the default values initialized. +func NewStorageProxyMaxHintWindowGetParams() *StorageProxyMaxHintWindowGetParams { + + return &StorageProxyMaxHintWindowGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMaxHintWindowGetParamsWithTimeout creates a new StorageProxyMaxHintWindowGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMaxHintWindowGetParamsWithTimeout(timeout time.Duration) *StorageProxyMaxHintWindowGetParams { + + return &StorageProxyMaxHintWindowGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMaxHintWindowGetParamsWithContext creates a new StorageProxyMaxHintWindowGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMaxHintWindowGetParamsWithContext(ctx context.Context) *StorageProxyMaxHintWindowGetParams { + + return &StorageProxyMaxHintWindowGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMaxHintWindowGetParamsWithHTTPClient creates a new StorageProxyMaxHintWindowGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMaxHintWindowGetParamsWithHTTPClient(client *http.Client) *StorageProxyMaxHintWindowGetParams { + + return &StorageProxyMaxHintWindowGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMaxHintWindowGetParams contains all the parameters to send to the API endpoint +for the storage proxy max hint window get operation typically these are written to a http.Request +*/ +type StorageProxyMaxHintWindowGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy max hint window get params +func (o *StorageProxyMaxHintWindowGetParams) WithTimeout(timeout time.Duration) *StorageProxyMaxHintWindowGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy max hint window get params +func (o *StorageProxyMaxHintWindowGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy max hint window get params +func (o *StorageProxyMaxHintWindowGetParams) WithContext(ctx context.Context) *StorageProxyMaxHintWindowGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy max hint window get params +func (o *StorageProxyMaxHintWindowGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy max hint window get params +func (o *StorageProxyMaxHintWindowGetParams) WithHTTPClient(client *http.Client) *StorageProxyMaxHintWindowGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy max hint window get params +func (o *StorageProxyMaxHintWindowGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMaxHintWindowGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_get_responses.go new file mode 100644 index 00000000000..d169895cfc9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMaxHintWindowGetReader is a Reader for the StorageProxyMaxHintWindowGet structure. +type StorageProxyMaxHintWindowGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMaxHintWindowGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMaxHintWindowGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMaxHintWindowGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMaxHintWindowGetOK creates a StorageProxyMaxHintWindowGetOK with default headers values +func NewStorageProxyMaxHintWindowGetOK() *StorageProxyMaxHintWindowGetOK { + return &StorageProxyMaxHintWindowGetOK{} +} + +/* +StorageProxyMaxHintWindowGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMaxHintWindowGetOK struct { + Payload int32 +} + +func (o *StorageProxyMaxHintWindowGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMaxHintWindowGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMaxHintWindowGetDefault creates a StorageProxyMaxHintWindowGetDefault with default headers values +func NewStorageProxyMaxHintWindowGetDefault(code int) *StorageProxyMaxHintWindowGetDefault { + return &StorageProxyMaxHintWindowGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMaxHintWindowGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMaxHintWindowGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy max hint window get default response +func (o *StorageProxyMaxHintWindowGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMaxHintWindowGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMaxHintWindowGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMaxHintWindowGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_post_parameters.go new file mode 100644 index 00000000000..1408e025c67 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageProxyMaxHintWindowPostParams creates a new StorageProxyMaxHintWindowPostParams object +// with the default values initialized. +func NewStorageProxyMaxHintWindowPostParams() *StorageProxyMaxHintWindowPostParams { + var () + return &StorageProxyMaxHintWindowPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMaxHintWindowPostParamsWithTimeout creates a new StorageProxyMaxHintWindowPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMaxHintWindowPostParamsWithTimeout(timeout time.Duration) *StorageProxyMaxHintWindowPostParams { + var () + return &StorageProxyMaxHintWindowPostParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMaxHintWindowPostParamsWithContext creates a new StorageProxyMaxHintWindowPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMaxHintWindowPostParamsWithContext(ctx context.Context) *StorageProxyMaxHintWindowPostParams { + var () + return &StorageProxyMaxHintWindowPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyMaxHintWindowPostParamsWithHTTPClient creates a new StorageProxyMaxHintWindowPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMaxHintWindowPostParamsWithHTTPClient(client *http.Client) *StorageProxyMaxHintWindowPostParams { + var () + return &StorageProxyMaxHintWindowPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMaxHintWindowPostParams contains all the parameters to send to the API endpoint +for the storage proxy max hint window post operation typically these are written to a http.Request +*/ +type StorageProxyMaxHintWindowPostParams struct { + + /*Ms + max hint window in ms + + */ + Ms int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy max hint window post params +func (o *StorageProxyMaxHintWindowPostParams) WithTimeout(timeout time.Duration) *StorageProxyMaxHintWindowPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy max hint window post params +func (o *StorageProxyMaxHintWindowPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy max hint window post params +func (o *StorageProxyMaxHintWindowPostParams) WithContext(ctx context.Context) *StorageProxyMaxHintWindowPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy max hint window post params +func (o *StorageProxyMaxHintWindowPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy max hint window post params +func (o *StorageProxyMaxHintWindowPostParams) WithHTTPClient(client *http.Client) *StorageProxyMaxHintWindowPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy max hint window post params +func (o *StorageProxyMaxHintWindowPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithMs adds the ms to the storage proxy max hint window post params +func (o *StorageProxyMaxHintWindowPostParams) WithMs(ms int32) *StorageProxyMaxHintWindowPostParams { + o.SetMs(ms) + return o +} + +// SetMs adds the ms to the storage proxy max hint window post params +func (o *StorageProxyMaxHintWindowPostParams) SetMs(ms int32) { + o.Ms = ms +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMaxHintWindowPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param ms + qrMs := o.Ms + qMs := swag.FormatInt32(qrMs) + if qMs != "" { + if err := r.SetQueryParam("ms", qMs); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_post_responses.go new file mode 100644 index 00000000000..7d9e899dddc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hint_window_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMaxHintWindowPostReader is a Reader for the StorageProxyMaxHintWindowPost structure. +type StorageProxyMaxHintWindowPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMaxHintWindowPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMaxHintWindowPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMaxHintWindowPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMaxHintWindowPostOK creates a StorageProxyMaxHintWindowPostOK with default headers values +func NewStorageProxyMaxHintWindowPostOK() *StorageProxyMaxHintWindowPostOK { + return &StorageProxyMaxHintWindowPostOK{} +} + +/* +StorageProxyMaxHintWindowPostOK handles this case with default header values. + +Success +*/ +type StorageProxyMaxHintWindowPostOK struct { +} + +func (o *StorageProxyMaxHintWindowPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMaxHintWindowPostDefault creates a StorageProxyMaxHintWindowPostDefault with default headers values +func NewStorageProxyMaxHintWindowPostDefault(code int) *StorageProxyMaxHintWindowPostDefault { + return &StorageProxyMaxHintWindowPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMaxHintWindowPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMaxHintWindowPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy max hint window post default response +func (o *StorageProxyMaxHintWindowPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMaxHintWindowPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMaxHintWindowPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMaxHintWindowPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_get_parameters.go new file mode 100644 index 00000000000..4d8d88177f6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMaxHintsInProgressGetParams creates a new StorageProxyMaxHintsInProgressGetParams object +// with the default values initialized. +func NewStorageProxyMaxHintsInProgressGetParams() *StorageProxyMaxHintsInProgressGetParams { + + return &StorageProxyMaxHintsInProgressGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMaxHintsInProgressGetParamsWithTimeout creates a new StorageProxyMaxHintsInProgressGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMaxHintsInProgressGetParamsWithTimeout(timeout time.Duration) *StorageProxyMaxHintsInProgressGetParams { + + return &StorageProxyMaxHintsInProgressGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMaxHintsInProgressGetParamsWithContext creates a new StorageProxyMaxHintsInProgressGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMaxHintsInProgressGetParamsWithContext(ctx context.Context) *StorageProxyMaxHintsInProgressGetParams { + + return &StorageProxyMaxHintsInProgressGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMaxHintsInProgressGetParamsWithHTTPClient creates a new StorageProxyMaxHintsInProgressGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMaxHintsInProgressGetParamsWithHTTPClient(client *http.Client) *StorageProxyMaxHintsInProgressGetParams { + + return &StorageProxyMaxHintsInProgressGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMaxHintsInProgressGetParams contains all the parameters to send to the API endpoint +for the storage proxy max hints in progress get operation typically these are written to a http.Request +*/ +type StorageProxyMaxHintsInProgressGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy max hints in progress get params +func (o *StorageProxyMaxHintsInProgressGetParams) WithTimeout(timeout time.Duration) *StorageProxyMaxHintsInProgressGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy max hints in progress get params +func (o *StorageProxyMaxHintsInProgressGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy max hints in progress get params +func (o *StorageProxyMaxHintsInProgressGetParams) WithContext(ctx context.Context) *StorageProxyMaxHintsInProgressGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy max hints in progress get params +func (o *StorageProxyMaxHintsInProgressGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy max hints in progress get params +func (o *StorageProxyMaxHintsInProgressGetParams) WithHTTPClient(client *http.Client) *StorageProxyMaxHintsInProgressGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy max hints in progress get params +func (o *StorageProxyMaxHintsInProgressGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMaxHintsInProgressGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_get_responses.go new file mode 100644 index 00000000000..4934ed8e4dc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMaxHintsInProgressGetReader is a Reader for the StorageProxyMaxHintsInProgressGet structure. +type StorageProxyMaxHintsInProgressGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMaxHintsInProgressGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMaxHintsInProgressGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMaxHintsInProgressGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMaxHintsInProgressGetOK creates a StorageProxyMaxHintsInProgressGetOK with default headers values +func NewStorageProxyMaxHintsInProgressGetOK() *StorageProxyMaxHintsInProgressGetOK { + return &StorageProxyMaxHintsInProgressGetOK{} +} + +/* +StorageProxyMaxHintsInProgressGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMaxHintsInProgressGetOK struct { + Payload int32 +} + +func (o *StorageProxyMaxHintsInProgressGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMaxHintsInProgressGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMaxHintsInProgressGetDefault creates a StorageProxyMaxHintsInProgressGetDefault with default headers values +func NewStorageProxyMaxHintsInProgressGetDefault(code int) *StorageProxyMaxHintsInProgressGetDefault { + return &StorageProxyMaxHintsInProgressGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMaxHintsInProgressGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMaxHintsInProgressGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy max hints in progress get default response +func (o *StorageProxyMaxHintsInProgressGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMaxHintsInProgressGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMaxHintsInProgressGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMaxHintsInProgressGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_post_parameters.go new file mode 100644 index 00000000000..28290d47e23 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageProxyMaxHintsInProgressPostParams creates a new StorageProxyMaxHintsInProgressPostParams object +// with the default values initialized. +func NewStorageProxyMaxHintsInProgressPostParams() *StorageProxyMaxHintsInProgressPostParams { + var () + return &StorageProxyMaxHintsInProgressPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMaxHintsInProgressPostParamsWithTimeout creates a new StorageProxyMaxHintsInProgressPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMaxHintsInProgressPostParamsWithTimeout(timeout time.Duration) *StorageProxyMaxHintsInProgressPostParams { + var () + return &StorageProxyMaxHintsInProgressPostParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMaxHintsInProgressPostParamsWithContext creates a new StorageProxyMaxHintsInProgressPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMaxHintsInProgressPostParamsWithContext(ctx context.Context) *StorageProxyMaxHintsInProgressPostParams { + var () + return &StorageProxyMaxHintsInProgressPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyMaxHintsInProgressPostParamsWithHTTPClient creates a new StorageProxyMaxHintsInProgressPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMaxHintsInProgressPostParamsWithHTTPClient(client *http.Client) *StorageProxyMaxHintsInProgressPostParams { + var () + return &StorageProxyMaxHintsInProgressPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMaxHintsInProgressPostParams contains all the parameters to send to the API endpoint +for the storage proxy max hints in progress post operation typically these are written to a http.Request +*/ +type StorageProxyMaxHintsInProgressPostParams struct { + + /*Qs + max hints in progress + + */ + Qs int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy max hints in progress post params +func (o *StorageProxyMaxHintsInProgressPostParams) WithTimeout(timeout time.Duration) *StorageProxyMaxHintsInProgressPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy max hints in progress post params +func (o *StorageProxyMaxHintsInProgressPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy max hints in progress post params +func (o *StorageProxyMaxHintsInProgressPostParams) WithContext(ctx context.Context) *StorageProxyMaxHintsInProgressPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy max hints in progress post params +func (o *StorageProxyMaxHintsInProgressPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy max hints in progress post params +func (o *StorageProxyMaxHintsInProgressPostParams) WithHTTPClient(client *http.Client) *StorageProxyMaxHintsInProgressPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy max hints in progress post params +func (o *StorageProxyMaxHintsInProgressPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithQs adds the qs to the storage proxy max hints in progress post params +func (o *StorageProxyMaxHintsInProgressPostParams) WithQs(qs int32) *StorageProxyMaxHintsInProgressPostParams { + o.SetQs(qs) + return o +} + +// SetQs adds the qs to the storage proxy max hints in progress post params +func (o *StorageProxyMaxHintsInProgressPostParams) SetQs(qs int32) { + o.Qs = qs +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMaxHintsInProgressPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param qs + qrQs := o.Qs + qQs := swag.FormatInt32(qrQs) + if qQs != "" { + if err := r.SetQueryParam("qs", qQs); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_post_responses.go new file mode 100644 index 00000000000..48fd75d2e68 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_max_hints_in_progress_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMaxHintsInProgressPostReader is a Reader for the StorageProxyMaxHintsInProgressPost structure. +type StorageProxyMaxHintsInProgressPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMaxHintsInProgressPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMaxHintsInProgressPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMaxHintsInProgressPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMaxHintsInProgressPostOK creates a StorageProxyMaxHintsInProgressPostOK with default headers values +func NewStorageProxyMaxHintsInProgressPostOK() *StorageProxyMaxHintsInProgressPostOK { + return &StorageProxyMaxHintsInProgressPostOK{} +} + +/* +StorageProxyMaxHintsInProgressPostOK handles this case with default header values. + +Success +*/ +type StorageProxyMaxHintsInProgressPostOK struct { +} + +func (o *StorageProxyMaxHintsInProgressPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMaxHintsInProgressPostDefault creates a StorageProxyMaxHintsInProgressPostDefault with default headers values +func NewStorageProxyMaxHintsInProgressPostDefault(code int) *StorageProxyMaxHintsInProgressPostDefault { + return &StorageProxyMaxHintsInProgressPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMaxHintsInProgressPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMaxHintsInProgressPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy max hints in progress post default response +func (o *StorageProxyMaxHintsInProgressPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMaxHintsInProgressPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMaxHintsInProgressPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMaxHintsInProgressPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_condition_not_met_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_condition_not_met_get_parameters.go new file mode 100644 index 00000000000..862c13e9fde --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_condition_not_met_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsCasReadConditionNotMetGetParams creates a new StorageProxyMetricsCasReadConditionNotMetGetParams object +// with the default values initialized. +func NewStorageProxyMetricsCasReadConditionNotMetGetParams() *StorageProxyMetricsCasReadConditionNotMetGetParams { + + return &StorageProxyMetricsCasReadConditionNotMetGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsCasReadConditionNotMetGetParamsWithTimeout creates a new StorageProxyMetricsCasReadConditionNotMetGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsCasReadConditionNotMetGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsCasReadConditionNotMetGetParams { + + return &StorageProxyMetricsCasReadConditionNotMetGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsCasReadConditionNotMetGetParamsWithContext creates a new StorageProxyMetricsCasReadConditionNotMetGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsCasReadConditionNotMetGetParamsWithContext(ctx context.Context) *StorageProxyMetricsCasReadConditionNotMetGetParams { + + return &StorageProxyMetricsCasReadConditionNotMetGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsCasReadConditionNotMetGetParamsWithHTTPClient creates a new StorageProxyMetricsCasReadConditionNotMetGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsCasReadConditionNotMetGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsCasReadConditionNotMetGetParams { + + return &StorageProxyMetricsCasReadConditionNotMetGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsCasReadConditionNotMetGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics cas read condition not met get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsCasReadConditionNotMetGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics cas read condition not met get params +func (o *StorageProxyMetricsCasReadConditionNotMetGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsCasReadConditionNotMetGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics cas read condition not met get params +func (o *StorageProxyMetricsCasReadConditionNotMetGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics cas read condition not met get params +func (o *StorageProxyMetricsCasReadConditionNotMetGetParams) WithContext(ctx context.Context) *StorageProxyMetricsCasReadConditionNotMetGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics cas read condition not met get params +func (o *StorageProxyMetricsCasReadConditionNotMetGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics cas read condition not met get params +func (o *StorageProxyMetricsCasReadConditionNotMetGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsCasReadConditionNotMetGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics cas read condition not met get params +func (o *StorageProxyMetricsCasReadConditionNotMetGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsCasReadConditionNotMetGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_condition_not_met_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_condition_not_met_get_responses.go new file mode 100644 index 00000000000..145884503f1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_condition_not_met_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsCasReadConditionNotMetGetReader is a Reader for the StorageProxyMetricsCasReadConditionNotMetGet structure. +type StorageProxyMetricsCasReadConditionNotMetGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsCasReadConditionNotMetGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsCasReadConditionNotMetGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsCasReadConditionNotMetGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsCasReadConditionNotMetGetOK creates a StorageProxyMetricsCasReadConditionNotMetGetOK with default headers values +func NewStorageProxyMetricsCasReadConditionNotMetGetOK() *StorageProxyMetricsCasReadConditionNotMetGetOK { + return &StorageProxyMetricsCasReadConditionNotMetGetOK{} +} + +/* +StorageProxyMetricsCasReadConditionNotMetGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsCasReadConditionNotMetGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsCasReadConditionNotMetGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsCasReadConditionNotMetGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsCasReadConditionNotMetGetDefault creates a StorageProxyMetricsCasReadConditionNotMetGetDefault with default headers values +func NewStorageProxyMetricsCasReadConditionNotMetGetDefault(code int) *StorageProxyMetricsCasReadConditionNotMetGetDefault { + return &StorageProxyMetricsCasReadConditionNotMetGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsCasReadConditionNotMetGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsCasReadConditionNotMetGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics cas read condition not met get default response +func (o *StorageProxyMetricsCasReadConditionNotMetGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsCasReadConditionNotMetGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsCasReadConditionNotMetGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsCasReadConditionNotMetGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_contention_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_contention_get_parameters.go new file mode 100644 index 00000000000..11ed956dbd1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_contention_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsCasReadContentionGetParams creates a new StorageProxyMetricsCasReadContentionGetParams object +// with the default values initialized. +func NewStorageProxyMetricsCasReadContentionGetParams() *StorageProxyMetricsCasReadContentionGetParams { + + return &StorageProxyMetricsCasReadContentionGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsCasReadContentionGetParamsWithTimeout creates a new StorageProxyMetricsCasReadContentionGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsCasReadContentionGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsCasReadContentionGetParams { + + return &StorageProxyMetricsCasReadContentionGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsCasReadContentionGetParamsWithContext creates a new StorageProxyMetricsCasReadContentionGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsCasReadContentionGetParamsWithContext(ctx context.Context) *StorageProxyMetricsCasReadContentionGetParams { + + return &StorageProxyMetricsCasReadContentionGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsCasReadContentionGetParamsWithHTTPClient creates a new StorageProxyMetricsCasReadContentionGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsCasReadContentionGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsCasReadContentionGetParams { + + return &StorageProxyMetricsCasReadContentionGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsCasReadContentionGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics cas read contention get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsCasReadContentionGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics cas read contention get params +func (o *StorageProxyMetricsCasReadContentionGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsCasReadContentionGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics cas read contention get params +func (o *StorageProxyMetricsCasReadContentionGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics cas read contention get params +func (o *StorageProxyMetricsCasReadContentionGetParams) WithContext(ctx context.Context) *StorageProxyMetricsCasReadContentionGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics cas read contention get params +func (o *StorageProxyMetricsCasReadContentionGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics cas read contention get params +func (o *StorageProxyMetricsCasReadContentionGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsCasReadContentionGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics cas read contention get params +func (o *StorageProxyMetricsCasReadContentionGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsCasReadContentionGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_contention_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_contention_get_responses.go new file mode 100644 index 00000000000..d1ecf530720 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_contention_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsCasReadContentionGetReader is a Reader for the StorageProxyMetricsCasReadContentionGet structure. +type StorageProxyMetricsCasReadContentionGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsCasReadContentionGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsCasReadContentionGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsCasReadContentionGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsCasReadContentionGetOK creates a StorageProxyMetricsCasReadContentionGetOK with default headers values +func NewStorageProxyMetricsCasReadContentionGetOK() *StorageProxyMetricsCasReadContentionGetOK { + return &StorageProxyMetricsCasReadContentionGetOK{} +} + +/* +StorageProxyMetricsCasReadContentionGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsCasReadContentionGetOK struct { + Payload interface{} +} + +func (o *StorageProxyMetricsCasReadContentionGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyMetricsCasReadContentionGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsCasReadContentionGetDefault creates a StorageProxyMetricsCasReadContentionGetDefault with default headers values +func NewStorageProxyMetricsCasReadContentionGetDefault(code int) *StorageProxyMetricsCasReadContentionGetDefault { + return &StorageProxyMetricsCasReadContentionGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsCasReadContentionGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsCasReadContentionGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics cas read contention get default response +func (o *StorageProxyMetricsCasReadContentionGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsCasReadContentionGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsCasReadContentionGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsCasReadContentionGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_timeouts_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_timeouts_get_parameters.go new file mode 100644 index 00000000000..f06b69f7806 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_timeouts_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsCasReadTimeoutsGetParams creates a new StorageProxyMetricsCasReadTimeoutsGetParams object +// with the default values initialized. +func NewStorageProxyMetricsCasReadTimeoutsGetParams() *StorageProxyMetricsCasReadTimeoutsGetParams { + + return &StorageProxyMetricsCasReadTimeoutsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsCasReadTimeoutsGetParamsWithTimeout creates a new StorageProxyMetricsCasReadTimeoutsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsCasReadTimeoutsGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsCasReadTimeoutsGetParams { + + return &StorageProxyMetricsCasReadTimeoutsGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsCasReadTimeoutsGetParamsWithContext creates a new StorageProxyMetricsCasReadTimeoutsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsCasReadTimeoutsGetParamsWithContext(ctx context.Context) *StorageProxyMetricsCasReadTimeoutsGetParams { + + return &StorageProxyMetricsCasReadTimeoutsGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsCasReadTimeoutsGetParamsWithHTTPClient creates a new StorageProxyMetricsCasReadTimeoutsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsCasReadTimeoutsGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsCasReadTimeoutsGetParams { + + return &StorageProxyMetricsCasReadTimeoutsGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsCasReadTimeoutsGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics cas read timeouts get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsCasReadTimeoutsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics cas read timeouts get params +func (o *StorageProxyMetricsCasReadTimeoutsGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsCasReadTimeoutsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics cas read timeouts get params +func (o *StorageProxyMetricsCasReadTimeoutsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics cas read timeouts get params +func (o *StorageProxyMetricsCasReadTimeoutsGetParams) WithContext(ctx context.Context) *StorageProxyMetricsCasReadTimeoutsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics cas read timeouts get params +func (o *StorageProxyMetricsCasReadTimeoutsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics cas read timeouts get params +func (o *StorageProxyMetricsCasReadTimeoutsGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsCasReadTimeoutsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics cas read timeouts get params +func (o *StorageProxyMetricsCasReadTimeoutsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsCasReadTimeoutsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_timeouts_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_timeouts_get_responses.go new file mode 100644 index 00000000000..40cd00e458e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_timeouts_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsCasReadTimeoutsGetReader is a Reader for the StorageProxyMetricsCasReadTimeoutsGet structure. +type StorageProxyMetricsCasReadTimeoutsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsCasReadTimeoutsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsCasReadTimeoutsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsCasReadTimeoutsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsCasReadTimeoutsGetOK creates a StorageProxyMetricsCasReadTimeoutsGetOK with default headers values +func NewStorageProxyMetricsCasReadTimeoutsGetOK() *StorageProxyMetricsCasReadTimeoutsGetOK { + return &StorageProxyMetricsCasReadTimeoutsGetOK{} +} + +/* +StorageProxyMetricsCasReadTimeoutsGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsCasReadTimeoutsGetOK struct { + Payload interface{} +} + +func (o *StorageProxyMetricsCasReadTimeoutsGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyMetricsCasReadTimeoutsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsCasReadTimeoutsGetDefault creates a StorageProxyMetricsCasReadTimeoutsGetDefault with default headers values +func NewStorageProxyMetricsCasReadTimeoutsGetDefault(code int) *StorageProxyMetricsCasReadTimeoutsGetDefault { + return &StorageProxyMetricsCasReadTimeoutsGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsCasReadTimeoutsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsCasReadTimeoutsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics cas read timeouts get default response +func (o *StorageProxyMetricsCasReadTimeoutsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsCasReadTimeoutsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsCasReadTimeoutsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsCasReadTimeoutsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unavailables_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unavailables_get_parameters.go new file mode 100644 index 00000000000..b5cd01c56c0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unavailables_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsCasReadUnavailablesGetParams creates a new StorageProxyMetricsCasReadUnavailablesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsCasReadUnavailablesGetParams() *StorageProxyMetricsCasReadUnavailablesGetParams { + + return &StorageProxyMetricsCasReadUnavailablesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsCasReadUnavailablesGetParamsWithTimeout creates a new StorageProxyMetricsCasReadUnavailablesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsCasReadUnavailablesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsCasReadUnavailablesGetParams { + + return &StorageProxyMetricsCasReadUnavailablesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsCasReadUnavailablesGetParamsWithContext creates a new StorageProxyMetricsCasReadUnavailablesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsCasReadUnavailablesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsCasReadUnavailablesGetParams { + + return &StorageProxyMetricsCasReadUnavailablesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsCasReadUnavailablesGetParamsWithHTTPClient creates a new StorageProxyMetricsCasReadUnavailablesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsCasReadUnavailablesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsCasReadUnavailablesGetParams { + + return &StorageProxyMetricsCasReadUnavailablesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsCasReadUnavailablesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics cas read unavailables get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsCasReadUnavailablesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics cas read unavailables get params +func (o *StorageProxyMetricsCasReadUnavailablesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsCasReadUnavailablesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics cas read unavailables get params +func (o *StorageProxyMetricsCasReadUnavailablesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics cas read unavailables get params +func (o *StorageProxyMetricsCasReadUnavailablesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsCasReadUnavailablesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics cas read unavailables get params +func (o *StorageProxyMetricsCasReadUnavailablesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics cas read unavailables get params +func (o *StorageProxyMetricsCasReadUnavailablesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsCasReadUnavailablesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics cas read unavailables get params +func (o *StorageProxyMetricsCasReadUnavailablesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsCasReadUnavailablesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unavailables_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unavailables_get_responses.go new file mode 100644 index 00000000000..e92b86d14cd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unavailables_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsCasReadUnavailablesGetReader is a Reader for the StorageProxyMetricsCasReadUnavailablesGet structure. +type StorageProxyMetricsCasReadUnavailablesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsCasReadUnavailablesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsCasReadUnavailablesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsCasReadUnavailablesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsCasReadUnavailablesGetOK creates a StorageProxyMetricsCasReadUnavailablesGetOK with default headers values +func NewStorageProxyMetricsCasReadUnavailablesGetOK() *StorageProxyMetricsCasReadUnavailablesGetOK { + return &StorageProxyMetricsCasReadUnavailablesGetOK{} +} + +/* +StorageProxyMetricsCasReadUnavailablesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsCasReadUnavailablesGetOK struct { + Payload interface{} +} + +func (o *StorageProxyMetricsCasReadUnavailablesGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyMetricsCasReadUnavailablesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsCasReadUnavailablesGetDefault creates a StorageProxyMetricsCasReadUnavailablesGetDefault with default headers values +func NewStorageProxyMetricsCasReadUnavailablesGetDefault(code int) *StorageProxyMetricsCasReadUnavailablesGetDefault { + return &StorageProxyMetricsCasReadUnavailablesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsCasReadUnavailablesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsCasReadUnavailablesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics cas read unavailables get default response +func (o *StorageProxyMetricsCasReadUnavailablesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsCasReadUnavailablesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsCasReadUnavailablesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsCasReadUnavailablesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unfinished_commit_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unfinished_commit_get_parameters.go new file mode 100644 index 00000000000..f08ddddbf8e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unfinished_commit_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsCasReadUnfinishedCommitGetParams creates a new StorageProxyMetricsCasReadUnfinishedCommitGetParams object +// with the default values initialized. +func NewStorageProxyMetricsCasReadUnfinishedCommitGetParams() *StorageProxyMetricsCasReadUnfinishedCommitGetParams { + + return &StorageProxyMetricsCasReadUnfinishedCommitGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsCasReadUnfinishedCommitGetParamsWithTimeout creates a new StorageProxyMetricsCasReadUnfinishedCommitGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsCasReadUnfinishedCommitGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsCasReadUnfinishedCommitGetParams { + + return &StorageProxyMetricsCasReadUnfinishedCommitGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsCasReadUnfinishedCommitGetParamsWithContext creates a new StorageProxyMetricsCasReadUnfinishedCommitGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsCasReadUnfinishedCommitGetParamsWithContext(ctx context.Context) *StorageProxyMetricsCasReadUnfinishedCommitGetParams { + + return &StorageProxyMetricsCasReadUnfinishedCommitGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsCasReadUnfinishedCommitGetParamsWithHTTPClient creates a new StorageProxyMetricsCasReadUnfinishedCommitGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsCasReadUnfinishedCommitGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsCasReadUnfinishedCommitGetParams { + + return &StorageProxyMetricsCasReadUnfinishedCommitGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsCasReadUnfinishedCommitGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics cas read unfinished commit get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsCasReadUnfinishedCommitGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics cas read unfinished commit get params +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsCasReadUnfinishedCommitGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics cas read unfinished commit get params +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics cas read unfinished commit get params +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetParams) WithContext(ctx context.Context) *StorageProxyMetricsCasReadUnfinishedCommitGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics cas read unfinished commit get params +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics cas read unfinished commit get params +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsCasReadUnfinishedCommitGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics cas read unfinished commit get params +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unfinished_commit_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unfinished_commit_get_responses.go new file mode 100644 index 00000000000..116c57252f9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_read_unfinished_commit_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsCasReadUnfinishedCommitGetReader is a Reader for the StorageProxyMetricsCasReadUnfinishedCommitGet structure. +type StorageProxyMetricsCasReadUnfinishedCommitGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsCasReadUnfinishedCommitGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsCasReadUnfinishedCommitGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsCasReadUnfinishedCommitGetOK creates a StorageProxyMetricsCasReadUnfinishedCommitGetOK with default headers values +func NewStorageProxyMetricsCasReadUnfinishedCommitGetOK() *StorageProxyMetricsCasReadUnfinishedCommitGetOK { + return &StorageProxyMetricsCasReadUnfinishedCommitGetOK{} +} + +/* +StorageProxyMetricsCasReadUnfinishedCommitGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsCasReadUnfinishedCommitGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsCasReadUnfinishedCommitGetDefault creates a StorageProxyMetricsCasReadUnfinishedCommitGetDefault with default headers values +func NewStorageProxyMetricsCasReadUnfinishedCommitGetDefault(code int) *StorageProxyMetricsCasReadUnfinishedCommitGetDefault { + return &StorageProxyMetricsCasReadUnfinishedCommitGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsCasReadUnfinishedCommitGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsCasReadUnfinishedCommitGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics cas read unfinished commit get default response +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsCasReadUnfinishedCommitGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_condition_not_met_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_condition_not_met_get_parameters.go new file mode 100644 index 00000000000..2bf5732f146 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_condition_not_met_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsCasWriteConditionNotMetGetParams creates a new StorageProxyMetricsCasWriteConditionNotMetGetParams object +// with the default values initialized. +func NewStorageProxyMetricsCasWriteConditionNotMetGetParams() *StorageProxyMetricsCasWriteConditionNotMetGetParams { + + return &StorageProxyMetricsCasWriteConditionNotMetGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsCasWriteConditionNotMetGetParamsWithTimeout creates a new StorageProxyMetricsCasWriteConditionNotMetGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsCasWriteConditionNotMetGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsCasWriteConditionNotMetGetParams { + + return &StorageProxyMetricsCasWriteConditionNotMetGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsCasWriteConditionNotMetGetParamsWithContext creates a new StorageProxyMetricsCasWriteConditionNotMetGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsCasWriteConditionNotMetGetParamsWithContext(ctx context.Context) *StorageProxyMetricsCasWriteConditionNotMetGetParams { + + return &StorageProxyMetricsCasWriteConditionNotMetGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsCasWriteConditionNotMetGetParamsWithHTTPClient creates a new StorageProxyMetricsCasWriteConditionNotMetGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsCasWriteConditionNotMetGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsCasWriteConditionNotMetGetParams { + + return &StorageProxyMetricsCasWriteConditionNotMetGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsCasWriteConditionNotMetGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics cas write condition not met get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsCasWriteConditionNotMetGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics cas write condition not met get params +func (o *StorageProxyMetricsCasWriteConditionNotMetGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsCasWriteConditionNotMetGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics cas write condition not met get params +func (o *StorageProxyMetricsCasWriteConditionNotMetGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics cas write condition not met get params +func (o *StorageProxyMetricsCasWriteConditionNotMetGetParams) WithContext(ctx context.Context) *StorageProxyMetricsCasWriteConditionNotMetGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics cas write condition not met get params +func (o *StorageProxyMetricsCasWriteConditionNotMetGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics cas write condition not met get params +func (o *StorageProxyMetricsCasWriteConditionNotMetGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsCasWriteConditionNotMetGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics cas write condition not met get params +func (o *StorageProxyMetricsCasWriteConditionNotMetGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsCasWriteConditionNotMetGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_condition_not_met_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_condition_not_met_get_responses.go new file mode 100644 index 00000000000..1cc05df719f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_condition_not_met_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsCasWriteConditionNotMetGetReader is a Reader for the StorageProxyMetricsCasWriteConditionNotMetGet structure. +type StorageProxyMetricsCasWriteConditionNotMetGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsCasWriteConditionNotMetGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsCasWriteConditionNotMetGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsCasWriteConditionNotMetGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsCasWriteConditionNotMetGetOK creates a StorageProxyMetricsCasWriteConditionNotMetGetOK with default headers values +func NewStorageProxyMetricsCasWriteConditionNotMetGetOK() *StorageProxyMetricsCasWriteConditionNotMetGetOK { + return &StorageProxyMetricsCasWriteConditionNotMetGetOK{} +} + +/* +StorageProxyMetricsCasWriteConditionNotMetGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsCasWriteConditionNotMetGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsCasWriteConditionNotMetGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsCasWriteConditionNotMetGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsCasWriteConditionNotMetGetDefault creates a StorageProxyMetricsCasWriteConditionNotMetGetDefault with default headers values +func NewStorageProxyMetricsCasWriteConditionNotMetGetDefault(code int) *StorageProxyMetricsCasWriteConditionNotMetGetDefault { + return &StorageProxyMetricsCasWriteConditionNotMetGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsCasWriteConditionNotMetGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsCasWriteConditionNotMetGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics cas write condition not met get default response +func (o *StorageProxyMetricsCasWriteConditionNotMetGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsCasWriteConditionNotMetGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsCasWriteConditionNotMetGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsCasWriteConditionNotMetGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_contention_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_contention_get_parameters.go new file mode 100644 index 00000000000..9cbd1c97c38 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_contention_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsCasWriteContentionGetParams creates a new StorageProxyMetricsCasWriteContentionGetParams object +// with the default values initialized. +func NewStorageProxyMetricsCasWriteContentionGetParams() *StorageProxyMetricsCasWriteContentionGetParams { + + return &StorageProxyMetricsCasWriteContentionGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsCasWriteContentionGetParamsWithTimeout creates a new StorageProxyMetricsCasWriteContentionGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsCasWriteContentionGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsCasWriteContentionGetParams { + + return &StorageProxyMetricsCasWriteContentionGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsCasWriteContentionGetParamsWithContext creates a new StorageProxyMetricsCasWriteContentionGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsCasWriteContentionGetParamsWithContext(ctx context.Context) *StorageProxyMetricsCasWriteContentionGetParams { + + return &StorageProxyMetricsCasWriteContentionGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsCasWriteContentionGetParamsWithHTTPClient creates a new StorageProxyMetricsCasWriteContentionGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsCasWriteContentionGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsCasWriteContentionGetParams { + + return &StorageProxyMetricsCasWriteContentionGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsCasWriteContentionGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics cas write contention get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsCasWriteContentionGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics cas write contention get params +func (o *StorageProxyMetricsCasWriteContentionGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsCasWriteContentionGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics cas write contention get params +func (o *StorageProxyMetricsCasWriteContentionGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics cas write contention get params +func (o *StorageProxyMetricsCasWriteContentionGetParams) WithContext(ctx context.Context) *StorageProxyMetricsCasWriteContentionGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics cas write contention get params +func (o *StorageProxyMetricsCasWriteContentionGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics cas write contention get params +func (o *StorageProxyMetricsCasWriteContentionGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsCasWriteContentionGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics cas write contention get params +func (o *StorageProxyMetricsCasWriteContentionGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsCasWriteContentionGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_contention_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_contention_get_responses.go new file mode 100644 index 00000000000..16474b2f05a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_contention_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsCasWriteContentionGetReader is a Reader for the StorageProxyMetricsCasWriteContentionGet structure. +type StorageProxyMetricsCasWriteContentionGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsCasWriteContentionGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsCasWriteContentionGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsCasWriteContentionGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsCasWriteContentionGetOK creates a StorageProxyMetricsCasWriteContentionGetOK with default headers values +func NewStorageProxyMetricsCasWriteContentionGetOK() *StorageProxyMetricsCasWriteContentionGetOK { + return &StorageProxyMetricsCasWriteContentionGetOK{} +} + +/* +StorageProxyMetricsCasWriteContentionGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsCasWriteContentionGetOK struct { + Payload interface{} +} + +func (o *StorageProxyMetricsCasWriteContentionGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyMetricsCasWriteContentionGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsCasWriteContentionGetDefault creates a StorageProxyMetricsCasWriteContentionGetDefault with default headers values +func NewStorageProxyMetricsCasWriteContentionGetDefault(code int) *StorageProxyMetricsCasWriteContentionGetDefault { + return &StorageProxyMetricsCasWriteContentionGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsCasWriteContentionGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsCasWriteContentionGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics cas write contention get default response +func (o *StorageProxyMetricsCasWriteContentionGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsCasWriteContentionGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsCasWriteContentionGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsCasWriteContentionGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_timeouts_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_timeouts_get_parameters.go new file mode 100644 index 00000000000..1f3a7021ea2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_timeouts_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsCasWriteTimeoutsGetParams creates a new StorageProxyMetricsCasWriteTimeoutsGetParams object +// with the default values initialized. +func NewStorageProxyMetricsCasWriteTimeoutsGetParams() *StorageProxyMetricsCasWriteTimeoutsGetParams { + + return &StorageProxyMetricsCasWriteTimeoutsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsCasWriteTimeoutsGetParamsWithTimeout creates a new StorageProxyMetricsCasWriteTimeoutsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsCasWriteTimeoutsGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsCasWriteTimeoutsGetParams { + + return &StorageProxyMetricsCasWriteTimeoutsGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsCasWriteTimeoutsGetParamsWithContext creates a new StorageProxyMetricsCasWriteTimeoutsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsCasWriteTimeoutsGetParamsWithContext(ctx context.Context) *StorageProxyMetricsCasWriteTimeoutsGetParams { + + return &StorageProxyMetricsCasWriteTimeoutsGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsCasWriteTimeoutsGetParamsWithHTTPClient creates a new StorageProxyMetricsCasWriteTimeoutsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsCasWriteTimeoutsGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsCasWriteTimeoutsGetParams { + + return &StorageProxyMetricsCasWriteTimeoutsGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsCasWriteTimeoutsGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics cas write timeouts get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsCasWriteTimeoutsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics cas write timeouts get params +func (o *StorageProxyMetricsCasWriteTimeoutsGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsCasWriteTimeoutsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics cas write timeouts get params +func (o *StorageProxyMetricsCasWriteTimeoutsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics cas write timeouts get params +func (o *StorageProxyMetricsCasWriteTimeoutsGetParams) WithContext(ctx context.Context) *StorageProxyMetricsCasWriteTimeoutsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics cas write timeouts get params +func (o *StorageProxyMetricsCasWriteTimeoutsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics cas write timeouts get params +func (o *StorageProxyMetricsCasWriteTimeoutsGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsCasWriteTimeoutsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics cas write timeouts get params +func (o *StorageProxyMetricsCasWriteTimeoutsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsCasWriteTimeoutsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_timeouts_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_timeouts_get_responses.go new file mode 100644 index 00000000000..caf3ff6b977 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_timeouts_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsCasWriteTimeoutsGetReader is a Reader for the StorageProxyMetricsCasWriteTimeoutsGet structure. +type StorageProxyMetricsCasWriteTimeoutsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsCasWriteTimeoutsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsCasWriteTimeoutsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsCasWriteTimeoutsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsCasWriteTimeoutsGetOK creates a StorageProxyMetricsCasWriteTimeoutsGetOK with default headers values +func NewStorageProxyMetricsCasWriteTimeoutsGetOK() *StorageProxyMetricsCasWriteTimeoutsGetOK { + return &StorageProxyMetricsCasWriteTimeoutsGetOK{} +} + +/* +StorageProxyMetricsCasWriteTimeoutsGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsCasWriteTimeoutsGetOK struct { + Payload interface{} +} + +func (o *StorageProxyMetricsCasWriteTimeoutsGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyMetricsCasWriteTimeoutsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsCasWriteTimeoutsGetDefault creates a StorageProxyMetricsCasWriteTimeoutsGetDefault with default headers values +func NewStorageProxyMetricsCasWriteTimeoutsGetDefault(code int) *StorageProxyMetricsCasWriteTimeoutsGetDefault { + return &StorageProxyMetricsCasWriteTimeoutsGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsCasWriteTimeoutsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsCasWriteTimeoutsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics cas write timeouts get default response +func (o *StorageProxyMetricsCasWriteTimeoutsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsCasWriteTimeoutsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsCasWriteTimeoutsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsCasWriteTimeoutsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unavailables_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unavailables_get_parameters.go new file mode 100644 index 00000000000..6b20d393552 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unavailables_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsCasWriteUnavailablesGetParams creates a new StorageProxyMetricsCasWriteUnavailablesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsCasWriteUnavailablesGetParams() *StorageProxyMetricsCasWriteUnavailablesGetParams { + + return &StorageProxyMetricsCasWriteUnavailablesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsCasWriteUnavailablesGetParamsWithTimeout creates a new StorageProxyMetricsCasWriteUnavailablesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsCasWriteUnavailablesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsCasWriteUnavailablesGetParams { + + return &StorageProxyMetricsCasWriteUnavailablesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsCasWriteUnavailablesGetParamsWithContext creates a new StorageProxyMetricsCasWriteUnavailablesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsCasWriteUnavailablesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsCasWriteUnavailablesGetParams { + + return &StorageProxyMetricsCasWriteUnavailablesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsCasWriteUnavailablesGetParamsWithHTTPClient creates a new StorageProxyMetricsCasWriteUnavailablesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsCasWriteUnavailablesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsCasWriteUnavailablesGetParams { + + return &StorageProxyMetricsCasWriteUnavailablesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsCasWriteUnavailablesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics cas write unavailables get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsCasWriteUnavailablesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics cas write unavailables get params +func (o *StorageProxyMetricsCasWriteUnavailablesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsCasWriteUnavailablesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics cas write unavailables get params +func (o *StorageProxyMetricsCasWriteUnavailablesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics cas write unavailables get params +func (o *StorageProxyMetricsCasWriteUnavailablesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsCasWriteUnavailablesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics cas write unavailables get params +func (o *StorageProxyMetricsCasWriteUnavailablesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics cas write unavailables get params +func (o *StorageProxyMetricsCasWriteUnavailablesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsCasWriteUnavailablesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics cas write unavailables get params +func (o *StorageProxyMetricsCasWriteUnavailablesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsCasWriteUnavailablesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unavailables_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unavailables_get_responses.go new file mode 100644 index 00000000000..7469619bf14 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unavailables_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsCasWriteUnavailablesGetReader is a Reader for the StorageProxyMetricsCasWriteUnavailablesGet structure. +type StorageProxyMetricsCasWriteUnavailablesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsCasWriteUnavailablesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsCasWriteUnavailablesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsCasWriteUnavailablesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsCasWriteUnavailablesGetOK creates a StorageProxyMetricsCasWriteUnavailablesGetOK with default headers values +func NewStorageProxyMetricsCasWriteUnavailablesGetOK() *StorageProxyMetricsCasWriteUnavailablesGetOK { + return &StorageProxyMetricsCasWriteUnavailablesGetOK{} +} + +/* +StorageProxyMetricsCasWriteUnavailablesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsCasWriteUnavailablesGetOK struct { + Payload interface{} +} + +func (o *StorageProxyMetricsCasWriteUnavailablesGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyMetricsCasWriteUnavailablesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsCasWriteUnavailablesGetDefault creates a StorageProxyMetricsCasWriteUnavailablesGetDefault with default headers values +func NewStorageProxyMetricsCasWriteUnavailablesGetDefault(code int) *StorageProxyMetricsCasWriteUnavailablesGetDefault { + return &StorageProxyMetricsCasWriteUnavailablesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsCasWriteUnavailablesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsCasWriteUnavailablesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics cas write unavailables get default response +func (o *StorageProxyMetricsCasWriteUnavailablesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsCasWriteUnavailablesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsCasWriteUnavailablesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsCasWriteUnavailablesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unfinished_commit_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unfinished_commit_get_parameters.go new file mode 100644 index 00000000000..c1924411aed --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unfinished_commit_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsCasWriteUnfinishedCommitGetParams creates a new StorageProxyMetricsCasWriteUnfinishedCommitGetParams object +// with the default values initialized. +func NewStorageProxyMetricsCasWriteUnfinishedCommitGetParams() *StorageProxyMetricsCasWriteUnfinishedCommitGetParams { + + return &StorageProxyMetricsCasWriteUnfinishedCommitGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsCasWriteUnfinishedCommitGetParamsWithTimeout creates a new StorageProxyMetricsCasWriteUnfinishedCommitGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsCasWriteUnfinishedCommitGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsCasWriteUnfinishedCommitGetParams { + + return &StorageProxyMetricsCasWriteUnfinishedCommitGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsCasWriteUnfinishedCommitGetParamsWithContext creates a new StorageProxyMetricsCasWriteUnfinishedCommitGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsCasWriteUnfinishedCommitGetParamsWithContext(ctx context.Context) *StorageProxyMetricsCasWriteUnfinishedCommitGetParams { + + return &StorageProxyMetricsCasWriteUnfinishedCommitGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsCasWriteUnfinishedCommitGetParamsWithHTTPClient creates a new StorageProxyMetricsCasWriteUnfinishedCommitGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsCasWriteUnfinishedCommitGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsCasWriteUnfinishedCommitGetParams { + + return &StorageProxyMetricsCasWriteUnfinishedCommitGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsCasWriteUnfinishedCommitGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics cas write unfinished commit get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsCasWriteUnfinishedCommitGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics cas write unfinished commit get params +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsCasWriteUnfinishedCommitGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics cas write unfinished commit get params +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics cas write unfinished commit get params +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetParams) WithContext(ctx context.Context) *StorageProxyMetricsCasWriteUnfinishedCommitGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics cas write unfinished commit get params +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics cas write unfinished commit get params +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsCasWriteUnfinishedCommitGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics cas write unfinished commit get params +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unfinished_commit_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unfinished_commit_get_responses.go new file mode 100644 index 00000000000..b06fba84ba2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_cas_write_unfinished_commit_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsCasWriteUnfinishedCommitGetReader is a Reader for the StorageProxyMetricsCasWriteUnfinishedCommitGet structure. +type StorageProxyMetricsCasWriteUnfinishedCommitGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsCasWriteUnfinishedCommitGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsCasWriteUnfinishedCommitGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsCasWriteUnfinishedCommitGetOK creates a StorageProxyMetricsCasWriteUnfinishedCommitGetOK with default headers values +func NewStorageProxyMetricsCasWriteUnfinishedCommitGetOK() *StorageProxyMetricsCasWriteUnfinishedCommitGetOK { + return &StorageProxyMetricsCasWriteUnfinishedCommitGetOK{} +} + +/* +StorageProxyMetricsCasWriteUnfinishedCommitGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsCasWriteUnfinishedCommitGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsCasWriteUnfinishedCommitGetDefault creates a StorageProxyMetricsCasWriteUnfinishedCommitGetDefault with default headers values +func NewStorageProxyMetricsCasWriteUnfinishedCommitGetDefault(code int) *StorageProxyMetricsCasWriteUnfinishedCommitGetDefault { + return &StorageProxyMetricsCasWriteUnfinishedCommitGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsCasWriteUnfinishedCommitGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsCasWriteUnfinishedCommitGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics cas write unfinished commit get default response +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsCasWriteUnfinishedCommitGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_estimated_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_estimated_histogram_get_parameters.go new file mode 100644 index 00000000000..98c99fbb2b8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_estimated_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsRangeEstimatedHistogramGetParams creates a new StorageProxyMetricsRangeEstimatedHistogramGetParams object +// with the default values initialized. +func NewStorageProxyMetricsRangeEstimatedHistogramGetParams() *StorageProxyMetricsRangeEstimatedHistogramGetParams { + + return &StorageProxyMetricsRangeEstimatedHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsRangeEstimatedHistogramGetParamsWithTimeout creates a new StorageProxyMetricsRangeEstimatedHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsRangeEstimatedHistogramGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsRangeEstimatedHistogramGetParams { + + return &StorageProxyMetricsRangeEstimatedHistogramGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsRangeEstimatedHistogramGetParamsWithContext creates a new StorageProxyMetricsRangeEstimatedHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsRangeEstimatedHistogramGetParamsWithContext(ctx context.Context) *StorageProxyMetricsRangeEstimatedHistogramGetParams { + + return &StorageProxyMetricsRangeEstimatedHistogramGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsRangeEstimatedHistogramGetParamsWithHTTPClient creates a new StorageProxyMetricsRangeEstimatedHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsRangeEstimatedHistogramGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsRangeEstimatedHistogramGetParams { + + return &StorageProxyMetricsRangeEstimatedHistogramGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsRangeEstimatedHistogramGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics range estimated histogram get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsRangeEstimatedHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics range estimated histogram get params +func (o *StorageProxyMetricsRangeEstimatedHistogramGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsRangeEstimatedHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics range estimated histogram get params +func (o *StorageProxyMetricsRangeEstimatedHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics range estimated histogram get params +func (o *StorageProxyMetricsRangeEstimatedHistogramGetParams) WithContext(ctx context.Context) *StorageProxyMetricsRangeEstimatedHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics range estimated histogram get params +func (o *StorageProxyMetricsRangeEstimatedHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics range estimated histogram get params +func (o *StorageProxyMetricsRangeEstimatedHistogramGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsRangeEstimatedHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics range estimated histogram get params +func (o *StorageProxyMetricsRangeEstimatedHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsRangeEstimatedHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_estimated_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_estimated_histogram_get_responses.go new file mode 100644 index 00000000000..256b87c94f1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_estimated_histogram_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsRangeEstimatedHistogramGetReader is a Reader for the StorageProxyMetricsRangeEstimatedHistogramGet structure. +type StorageProxyMetricsRangeEstimatedHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsRangeEstimatedHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsRangeEstimatedHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsRangeEstimatedHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsRangeEstimatedHistogramGetOK creates a StorageProxyMetricsRangeEstimatedHistogramGetOK with default headers values +func NewStorageProxyMetricsRangeEstimatedHistogramGetOK() *StorageProxyMetricsRangeEstimatedHistogramGetOK { + return &StorageProxyMetricsRangeEstimatedHistogramGetOK{} +} + +/* +StorageProxyMetricsRangeEstimatedHistogramGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsRangeEstimatedHistogramGetOK struct { +} + +func (o *StorageProxyMetricsRangeEstimatedHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMetricsRangeEstimatedHistogramGetDefault creates a StorageProxyMetricsRangeEstimatedHistogramGetDefault with default headers values +func NewStorageProxyMetricsRangeEstimatedHistogramGetDefault(code int) *StorageProxyMetricsRangeEstimatedHistogramGetDefault { + return &StorageProxyMetricsRangeEstimatedHistogramGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsRangeEstimatedHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsRangeEstimatedHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics range estimated histogram get default response +func (o *StorageProxyMetricsRangeEstimatedHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsRangeEstimatedHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsRangeEstimatedHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsRangeEstimatedHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_get_parameters.go new file mode 100644 index 00000000000..fb56554f71d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsRangeGetParams creates a new StorageProxyMetricsRangeGetParams object +// with the default values initialized. +func NewStorageProxyMetricsRangeGetParams() *StorageProxyMetricsRangeGetParams { + + return &StorageProxyMetricsRangeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsRangeGetParamsWithTimeout creates a new StorageProxyMetricsRangeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsRangeGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsRangeGetParams { + + return &StorageProxyMetricsRangeGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsRangeGetParamsWithContext creates a new StorageProxyMetricsRangeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsRangeGetParamsWithContext(ctx context.Context) *StorageProxyMetricsRangeGetParams { + + return &StorageProxyMetricsRangeGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsRangeGetParamsWithHTTPClient creates a new StorageProxyMetricsRangeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsRangeGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsRangeGetParams { + + return &StorageProxyMetricsRangeGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsRangeGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics range get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsRangeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics range get params +func (o *StorageProxyMetricsRangeGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsRangeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics range get params +func (o *StorageProxyMetricsRangeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics range get params +func (o *StorageProxyMetricsRangeGetParams) WithContext(ctx context.Context) *StorageProxyMetricsRangeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics range get params +func (o *StorageProxyMetricsRangeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics range get params +func (o *StorageProxyMetricsRangeGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsRangeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics range get params +func (o *StorageProxyMetricsRangeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsRangeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_get_responses.go new file mode 100644 index 00000000000..43893d7b438 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsRangeGetReader is a Reader for the StorageProxyMetricsRangeGet structure. +type StorageProxyMetricsRangeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsRangeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsRangeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsRangeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsRangeGetOK creates a StorageProxyMetricsRangeGetOK with default headers values +func NewStorageProxyMetricsRangeGetOK() *StorageProxyMetricsRangeGetOK { + return &StorageProxyMetricsRangeGetOK{} +} + +/* +StorageProxyMetricsRangeGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsRangeGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsRangeGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsRangeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsRangeGetDefault creates a StorageProxyMetricsRangeGetDefault with default headers values +func NewStorageProxyMetricsRangeGetDefault(code int) *StorageProxyMetricsRangeGetDefault { + return &StorageProxyMetricsRangeGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsRangeGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsRangeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics range get default response +func (o *StorageProxyMetricsRangeGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsRangeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsRangeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsRangeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_histogram_get_parameters.go new file mode 100644 index 00000000000..3a6778c30c9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsRangeHistogramGetParams creates a new StorageProxyMetricsRangeHistogramGetParams object +// with the default values initialized. +func NewStorageProxyMetricsRangeHistogramGetParams() *StorageProxyMetricsRangeHistogramGetParams { + + return &StorageProxyMetricsRangeHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsRangeHistogramGetParamsWithTimeout creates a new StorageProxyMetricsRangeHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsRangeHistogramGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsRangeHistogramGetParams { + + return &StorageProxyMetricsRangeHistogramGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsRangeHistogramGetParamsWithContext creates a new StorageProxyMetricsRangeHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsRangeHistogramGetParamsWithContext(ctx context.Context) *StorageProxyMetricsRangeHistogramGetParams { + + return &StorageProxyMetricsRangeHistogramGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsRangeHistogramGetParamsWithHTTPClient creates a new StorageProxyMetricsRangeHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsRangeHistogramGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsRangeHistogramGetParams { + + return &StorageProxyMetricsRangeHistogramGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsRangeHistogramGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics range histogram get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsRangeHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics range histogram get params +func (o *StorageProxyMetricsRangeHistogramGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsRangeHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics range histogram get params +func (o *StorageProxyMetricsRangeHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics range histogram get params +func (o *StorageProxyMetricsRangeHistogramGetParams) WithContext(ctx context.Context) *StorageProxyMetricsRangeHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics range histogram get params +func (o *StorageProxyMetricsRangeHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics range histogram get params +func (o *StorageProxyMetricsRangeHistogramGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsRangeHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics range histogram get params +func (o *StorageProxyMetricsRangeHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsRangeHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_histogram_get_responses.go new file mode 100644 index 00000000000..d34de410133 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_histogram_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsRangeHistogramGetReader is a Reader for the StorageProxyMetricsRangeHistogramGet structure. +type StorageProxyMetricsRangeHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsRangeHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsRangeHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsRangeHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsRangeHistogramGetOK creates a StorageProxyMetricsRangeHistogramGetOK with default headers values +func NewStorageProxyMetricsRangeHistogramGetOK() *StorageProxyMetricsRangeHistogramGetOK { + return &StorageProxyMetricsRangeHistogramGetOK{} +} + +/* +StorageProxyMetricsRangeHistogramGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsRangeHistogramGetOK struct { +} + +func (o *StorageProxyMetricsRangeHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMetricsRangeHistogramGetDefault creates a StorageProxyMetricsRangeHistogramGetDefault with default headers values +func NewStorageProxyMetricsRangeHistogramGetDefault(code int) *StorageProxyMetricsRangeHistogramGetDefault { + return &StorageProxyMetricsRangeHistogramGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsRangeHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsRangeHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics range histogram get default response +func (o *StorageProxyMetricsRangeHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsRangeHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsRangeHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsRangeHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_moving_average_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_moving_average_histogram_get_parameters.go new file mode 100644 index 00000000000..ab5b0513714 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_moving_average_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsRangeMovingAverageHistogramGetParams creates a new StorageProxyMetricsRangeMovingAverageHistogramGetParams object +// with the default values initialized. +func NewStorageProxyMetricsRangeMovingAverageHistogramGetParams() *StorageProxyMetricsRangeMovingAverageHistogramGetParams { + + return &StorageProxyMetricsRangeMovingAverageHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsRangeMovingAverageHistogramGetParamsWithTimeout creates a new StorageProxyMetricsRangeMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsRangeMovingAverageHistogramGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsRangeMovingAverageHistogramGetParams { + + return &StorageProxyMetricsRangeMovingAverageHistogramGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsRangeMovingAverageHistogramGetParamsWithContext creates a new StorageProxyMetricsRangeMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsRangeMovingAverageHistogramGetParamsWithContext(ctx context.Context) *StorageProxyMetricsRangeMovingAverageHistogramGetParams { + + return &StorageProxyMetricsRangeMovingAverageHistogramGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsRangeMovingAverageHistogramGetParamsWithHTTPClient creates a new StorageProxyMetricsRangeMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsRangeMovingAverageHistogramGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsRangeMovingAverageHistogramGetParams { + + return &StorageProxyMetricsRangeMovingAverageHistogramGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsRangeMovingAverageHistogramGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics range moving average histogram get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsRangeMovingAverageHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics range moving average histogram get params +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsRangeMovingAverageHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics range moving average histogram get params +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics range moving average histogram get params +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetParams) WithContext(ctx context.Context) *StorageProxyMetricsRangeMovingAverageHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics range moving average histogram get params +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics range moving average histogram get params +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsRangeMovingAverageHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics range moving average histogram get params +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_moving_average_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_moving_average_histogram_get_responses.go new file mode 100644 index 00000000000..1d96309af21 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_moving_average_histogram_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsRangeMovingAverageHistogramGetReader is a Reader for the StorageProxyMetricsRangeMovingAverageHistogramGet structure. +type StorageProxyMetricsRangeMovingAverageHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsRangeMovingAverageHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsRangeMovingAverageHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsRangeMovingAverageHistogramGetOK creates a StorageProxyMetricsRangeMovingAverageHistogramGetOK with default headers values +func NewStorageProxyMetricsRangeMovingAverageHistogramGetOK() *StorageProxyMetricsRangeMovingAverageHistogramGetOK { + return &StorageProxyMetricsRangeMovingAverageHistogramGetOK{} +} + +/* +StorageProxyMetricsRangeMovingAverageHistogramGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsRangeMovingAverageHistogramGetOK struct { +} + +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMetricsRangeMovingAverageHistogramGetDefault creates a StorageProxyMetricsRangeMovingAverageHistogramGetDefault with default headers values +func NewStorageProxyMetricsRangeMovingAverageHistogramGetDefault(code int) *StorageProxyMetricsRangeMovingAverageHistogramGetDefault { + return &StorageProxyMetricsRangeMovingAverageHistogramGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsRangeMovingAverageHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsRangeMovingAverageHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics range moving average histogram get default response +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsRangeMovingAverageHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_get_parameters.go new file mode 100644 index 00000000000..34bca19f878 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsRangeTimeoutsGetParams creates a new StorageProxyMetricsRangeTimeoutsGetParams object +// with the default values initialized. +func NewStorageProxyMetricsRangeTimeoutsGetParams() *StorageProxyMetricsRangeTimeoutsGetParams { + + return &StorageProxyMetricsRangeTimeoutsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsRangeTimeoutsGetParamsWithTimeout creates a new StorageProxyMetricsRangeTimeoutsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsRangeTimeoutsGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsRangeTimeoutsGetParams { + + return &StorageProxyMetricsRangeTimeoutsGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsRangeTimeoutsGetParamsWithContext creates a new StorageProxyMetricsRangeTimeoutsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsRangeTimeoutsGetParamsWithContext(ctx context.Context) *StorageProxyMetricsRangeTimeoutsGetParams { + + return &StorageProxyMetricsRangeTimeoutsGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsRangeTimeoutsGetParamsWithHTTPClient creates a new StorageProxyMetricsRangeTimeoutsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsRangeTimeoutsGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsRangeTimeoutsGetParams { + + return &StorageProxyMetricsRangeTimeoutsGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsRangeTimeoutsGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics range timeouts get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsRangeTimeoutsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics range timeouts get params +func (o *StorageProxyMetricsRangeTimeoutsGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsRangeTimeoutsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics range timeouts get params +func (o *StorageProxyMetricsRangeTimeoutsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics range timeouts get params +func (o *StorageProxyMetricsRangeTimeoutsGetParams) WithContext(ctx context.Context) *StorageProxyMetricsRangeTimeoutsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics range timeouts get params +func (o *StorageProxyMetricsRangeTimeoutsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics range timeouts get params +func (o *StorageProxyMetricsRangeTimeoutsGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsRangeTimeoutsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics range timeouts get params +func (o *StorageProxyMetricsRangeTimeoutsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsRangeTimeoutsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_get_responses.go new file mode 100644 index 00000000000..76164743973 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsRangeTimeoutsGetReader is a Reader for the StorageProxyMetricsRangeTimeoutsGet structure. +type StorageProxyMetricsRangeTimeoutsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsRangeTimeoutsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsRangeTimeoutsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsRangeTimeoutsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsRangeTimeoutsGetOK creates a StorageProxyMetricsRangeTimeoutsGetOK with default headers values +func NewStorageProxyMetricsRangeTimeoutsGetOK() *StorageProxyMetricsRangeTimeoutsGetOK { + return &StorageProxyMetricsRangeTimeoutsGetOK{} +} + +/* +StorageProxyMetricsRangeTimeoutsGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsRangeTimeoutsGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsRangeTimeoutsGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsRangeTimeoutsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsRangeTimeoutsGetDefault creates a StorageProxyMetricsRangeTimeoutsGetDefault with default headers values +func NewStorageProxyMetricsRangeTimeoutsGetDefault(code int) *StorageProxyMetricsRangeTimeoutsGetDefault { + return &StorageProxyMetricsRangeTimeoutsGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsRangeTimeoutsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsRangeTimeoutsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics range timeouts get default response +func (o *StorageProxyMetricsRangeTimeoutsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsRangeTimeoutsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsRangeTimeoutsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsRangeTimeoutsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_rates_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_rates_get_parameters.go new file mode 100644 index 00000000000..be4160f5255 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_rates_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsRangeTimeoutsRatesGetParams creates a new StorageProxyMetricsRangeTimeoutsRatesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsRangeTimeoutsRatesGetParams() *StorageProxyMetricsRangeTimeoutsRatesGetParams { + + return &StorageProxyMetricsRangeTimeoutsRatesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsRangeTimeoutsRatesGetParamsWithTimeout creates a new StorageProxyMetricsRangeTimeoutsRatesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsRangeTimeoutsRatesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsRangeTimeoutsRatesGetParams { + + return &StorageProxyMetricsRangeTimeoutsRatesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsRangeTimeoutsRatesGetParamsWithContext creates a new StorageProxyMetricsRangeTimeoutsRatesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsRangeTimeoutsRatesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsRangeTimeoutsRatesGetParams { + + return &StorageProxyMetricsRangeTimeoutsRatesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsRangeTimeoutsRatesGetParamsWithHTTPClient creates a new StorageProxyMetricsRangeTimeoutsRatesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsRangeTimeoutsRatesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsRangeTimeoutsRatesGetParams { + + return &StorageProxyMetricsRangeTimeoutsRatesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsRangeTimeoutsRatesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics range timeouts rates get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsRangeTimeoutsRatesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics range timeouts rates get params +func (o *StorageProxyMetricsRangeTimeoutsRatesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsRangeTimeoutsRatesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics range timeouts rates get params +func (o *StorageProxyMetricsRangeTimeoutsRatesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics range timeouts rates get params +func (o *StorageProxyMetricsRangeTimeoutsRatesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsRangeTimeoutsRatesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics range timeouts rates get params +func (o *StorageProxyMetricsRangeTimeoutsRatesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics range timeouts rates get params +func (o *StorageProxyMetricsRangeTimeoutsRatesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsRangeTimeoutsRatesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics range timeouts rates get params +func (o *StorageProxyMetricsRangeTimeoutsRatesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsRangeTimeoutsRatesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_rates_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_rates_get_responses.go new file mode 100644 index 00000000000..51087b98f37 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_timeouts_rates_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsRangeTimeoutsRatesGetReader is a Reader for the StorageProxyMetricsRangeTimeoutsRatesGet structure. +type StorageProxyMetricsRangeTimeoutsRatesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsRangeTimeoutsRatesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsRangeTimeoutsRatesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsRangeTimeoutsRatesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsRangeTimeoutsRatesGetOK creates a StorageProxyMetricsRangeTimeoutsRatesGetOK with default headers values +func NewStorageProxyMetricsRangeTimeoutsRatesGetOK() *StorageProxyMetricsRangeTimeoutsRatesGetOK { + return &StorageProxyMetricsRangeTimeoutsRatesGetOK{} +} + +/* +StorageProxyMetricsRangeTimeoutsRatesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsRangeTimeoutsRatesGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *StorageProxyMetricsRangeTimeoutsRatesGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *StorageProxyMetricsRangeTimeoutsRatesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsRangeTimeoutsRatesGetDefault creates a StorageProxyMetricsRangeTimeoutsRatesGetDefault with default headers values +func NewStorageProxyMetricsRangeTimeoutsRatesGetDefault(code int) *StorageProxyMetricsRangeTimeoutsRatesGetDefault { + return &StorageProxyMetricsRangeTimeoutsRatesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsRangeTimeoutsRatesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsRangeTimeoutsRatesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics range timeouts rates get default response +func (o *StorageProxyMetricsRangeTimeoutsRatesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsRangeTimeoutsRatesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsRangeTimeoutsRatesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsRangeTimeoutsRatesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_get_parameters.go new file mode 100644 index 00000000000..1386274f47f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsRangeUnavailablesGetParams creates a new StorageProxyMetricsRangeUnavailablesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsRangeUnavailablesGetParams() *StorageProxyMetricsRangeUnavailablesGetParams { + + return &StorageProxyMetricsRangeUnavailablesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsRangeUnavailablesGetParamsWithTimeout creates a new StorageProxyMetricsRangeUnavailablesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsRangeUnavailablesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsRangeUnavailablesGetParams { + + return &StorageProxyMetricsRangeUnavailablesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsRangeUnavailablesGetParamsWithContext creates a new StorageProxyMetricsRangeUnavailablesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsRangeUnavailablesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsRangeUnavailablesGetParams { + + return &StorageProxyMetricsRangeUnavailablesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsRangeUnavailablesGetParamsWithHTTPClient creates a new StorageProxyMetricsRangeUnavailablesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsRangeUnavailablesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsRangeUnavailablesGetParams { + + return &StorageProxyMetricsRangeUnavailablesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsRangeUnavailablesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics range unavailables get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsRangeUnavailablesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics range unavailables get params +func (o *StorageProxyMetricsRangeUnavailablesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsRangeUnavailablesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics range unavailables get params +func (o *StorageProxyMetricsRangeUnavailablesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics range unavailables get params +func (o *StorageProxyMetricsRangeUnavailablesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsRangeUnavailablesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics range unavailables get params +func (o *StorageProxyMetricsRangeUnavailablesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics range unavailables get params +func (o *StorageProxyMetricsRangeUnavailablesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsRangeUnavailablesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics range unavailables get params +func (o *StorageProxyMetricsRangeUnavailablesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsRangeUnavailablesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_get_responses.go new file mode 100644 index 00000000000..5d9572e46fd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsRangeUnavailablesGetReader is a Reader for the StorageProxyMetricsRangeUnavailablesGet structure. +type StorageProxyMetricsRangeUnavailablesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsRangeUnavailablesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsRangeUnavailablesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsRangeUnavailablesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsRangeUnavailablesGetOK creates a StorageProxyMetricsRangeUnavailablesGetOK with default headers values +func NewStorageProxyMetricsRangeUnavailablesGetOK() *StorageProxyMetricsRangeUnavailablesGetOK { + return &StorageProxyMetricsRangeUnavailablesGetOK{} +} + +/* +StorageProxyMetricsRangeUnavailablesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsRangeUnavailablesGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsRangeUnavailablesGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsRangeUnavailablesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsRangeUnavailablesGetDefault creates a StorageProxyMetricsRangeUnavailablesGetDefault with default headers values +func NewStorageProxyMetricsRangeUnavailablesGetDefault(code int) *StorageProxyMetricsRangeUnavailablesGetDefault { + return &StorageProxyMetricsRangeUnavailablesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsRangeUnavailablesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsRangeUnavailablesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics range unavailables get default response +func (o *StorageProxyMetricsRangeUnavailablesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsRangeUnavailablesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsRangeUnavailablesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsRangeUnavailablesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_rates_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_rates_get_parameters.go new file mode 100644 index 00000000000..6efdc82cab7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_rates_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsRangeUnavailablesRatesGetParams creates a new StorageProxyMetricsRangeUnavailablesRatesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsRangeUnavailablesRatesGetParams() *StorageProxyMetricsRangeUnavailablesRatesGetParams { + + return &StorageProxyMetricsRangeUnavailablesRatesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsRangeUnavailablesRatesGetParamsWithTimeout creates a new StorageProxyMetricsRangeUnavailablesRatesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsRangeUnavailablesRatesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsRangeUnavailablesRatesGetParams { + + return &StorageProxyMetricsRangeUnavailablesRatesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsRangeUnavailablesRatesGetParamsWithContext creates a new StorageProxyMetricsRangeUnavailablesRatesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsRangeUnavailablesRatesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsRangeUnavailablesRatesGetParams { + + return &StorageProxyMetricsRangeUnavailablesRatesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsRangeUnavailablesRatesGetParamsWithHTTPClient creates a new StorageProxyMetricsRangeUnavailablesRatesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsRangeUnavailablesRatesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsRangeUnavailablesRatesGetParams { + + return &StorageProxyMetricsRangeUnavailablesRatesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsRangeUnavailablesRatesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics range unavailables rates get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsRangeUnavailablesRatesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics range unavailables rates get params +func (o *StorageProxyMetricsRangeUnavailablesRatesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsRangeUnavailablesRatesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics range unavailables rates get params +func (o *StorageProxyMetricsRangeUnavailablesRatesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics range unavailables rates get params +func (o *StorageProxyMetricsRangeUnavailablesRatesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsRangeUnavailablesRatesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics range unavailables rates get params +func (o *StorageProxyMetricsRangeUnavailablesRatesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics range unavailables rates get params +func (o *StorageProxyMetricsRangeUnavailablesRatesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsRangeUnavailablesRatesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics range unavailables rates get params +func (o *StorageProxyMetricsRangeUnavailablesRatesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsRangeUnavailablesRatesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_rates_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_rates_get_responses.go new file mode 100644 index 00000000000..b3111ac8a98 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_range_unavailables_rates_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsRangeUnavailablesRatesGetReader is a Reader for the StorageProxyMetricsRangeUnavailablesRatesGet structure. +type StorageProxyMetricsRangeUnavailablesRatesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsRangeUnavailablesRatesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsRangeUnavailablesRatesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsRangeUnavailablesRatesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsRangeUnavailablesRatesGetOK creates a StorageProxyMetricsRangeUnavailablesRatesGetOK with default headers values +func NewStorageProxyMetricsRangeUnavailablesRatesGetOK() *StorageProxyMetricsRangeUnavailablesRatesGetOK { + return &StorageProxyMetricsRangeUnavailablesRatesGetOK{} +} + +/* +StorageProxyMetricsRangeUnavailablesRatesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsRangeUnavailablesRatesGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *StorageProxyMetricsRangeUnavailablesRatesGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *StorageProxyMetricsRangeUnavailablesRatesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsRangeUnavailablesRatesGetDefault creates a StorageProxyMetricsRangeUnavailablesRatesGetDefault with default headers values +func NewStorageProxyMetricsRangeUnavailablesRatesGetDefault(code int) *StorageProxyMetricsRangeUnavailablesRatesGetDefault { + return &StorageProxyMetricsRangeUnavailablesRatesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsRangeUnavailablesRatesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsRangeUnavailablesRatesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics range unavailables rates get default response +func (o *StorageProxyMetricsRangeUnavailablesRatesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsRangeUnavailablesRatesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsRangeUnavailablesRatesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsRangeUnavailablesRatesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_estimated_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_estimated_histogram_get_parameters.go new file mode 100644 index 00000000000..d9ea7ba0d05 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_estimated_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsReadEstimatedHistogramGetParams creates a new StorageProxyMetricsReadEstimatedHistogramGetParams object +// with the default values initialized. +func NewStorageProxyMetricsReadEstimatedHistogramGetParams() *StorageProxyMetricsReadEstimatedHistogramGetParams { + + return &StorageProxyMetricsReadEstimatedHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsReadEstimatedHistogramGetParamsWithTimeout creates a new StorageProxyMetricsReadEstimatedHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsReadEstimatedHistogramGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsReadEstimatedHistogramGetParams { + + return &StorageProxyMetricsReadEstimatedHistogramGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsReadEstimatedHistogramGetParamsWithContext creates a new StorageProxyMetricsReadEstimatedHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsReadEstimatedHistogramGetParamsWithContext(ctx context.Context) *StorageProxyMetricsReadEstimatedHistogramGetParams { + + return &StorageProxyMetricsReadEstimatedHistogramGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsReadEstimatedHistogramGetParamsWithHTTPClient creates a new StorageProxyMetricsReadEstimatedHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsReadEstimatedHistogramGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsReadEstimatedHistogramGetParams { + + return &StorageProxyMetricsReadEstimatedHistogramGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsReadEstimatedHistogramGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics read estimated histogram get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsReadEstimatedHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics read estimated histogram get params +func (o *StorageProxyMetricsReadEstimatedHistogramGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsReadEstimatedHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics read estimated histogram get params +func (o *StorageProxyMetricsReadEstimatedHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics read estimated histogram get params +func (o *StorageProxyMetricsReadEstimatedHistogramGetParams) WithContext(ctx context.Context) *StorageProxyMetricsReadEstimatedHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics read estimated histogram get params +func (o *StorageProxyMetricsReadEstimatedHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics read estimated histogram get params +func (o *StorageProxyMetricsReadEstimatedHistogramGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsReadEstimatedHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics read estimated histogram get params +func (o *StorageProxyMetricsReadEstimatedHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsReadEstimatedHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_estimated_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_estimated_histogram_get_responses.go new file mode 100644 index 00000000000..c004f14ea47 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_estimated_histogram_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsReadEstimatedHistogramGetReader is a Reader for the StorageProxyMetricsReadEstimatedHistogramGet structure. +type StorageProxyMetricsReadEstimatedHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsReadEstimatedHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsReadEstimatedHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsReadEstimatedHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsReadEstimatedHistogramGetOK creates a StorageProxyMetricsReadEstimatedHistogramGetOK with default headers values +func NewStorageProxyMetricsReadEstimatedHistogramGetOK() *StorageProxyMetricsReadEstimatedHistogramGetOK { + return &StorageProxyMetricsReadEstimatedHistogramGetOK{} +} + +/* +StorageProxyMetricsReadEstimatedHistogramGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsReadEstimatedHistogramGetOK struct { +} + +func (o *StorageProxyMetricsReadEstimatedHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMetricsReadEstimatedHistogramGetDefault creates a StorageProxyMetricsReadEstimatedHistogramGetDefault with default headers values +func NewStorageProxyMetricsReadEstimatedHistogramGetDefault(code int) *StorageProxyMetricsReadEstimatedHistogramGetDefault { + return &StorageProxyMetricsReadEstimatedHistogramGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsReadEstimatedHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsReadEstimatedHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics read estimated histogram get default response +func (o *StorageProxyMetricsReadEstimatedHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsReadEstimatedHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsReadEstimatedHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsReadEstimatedHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_get_parameters.go new file mode 100644 index 00000000000..98e0a9d1da4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsReadGetParams creates a new StorageProxyMetricsReadGetParams object +// with the default values initialized. +func NewStorageProxyMetricsReadGetParams() *StorageProxyMetricsReadGetParams { + + return &StorageProxyMetricsReadGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsReadGetParamsWithTimeout creates a new StorageProxyMetricsReadGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsReadGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsReadGetParams { + + return &StorageProxyMetricsReadGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsReadGetParamsWithContext creates a new StorageProxyMetricsReadGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsReadGetParamsWithContext(ctx context.Context) *StorageProxyMetricsReadGetParams { + + return &StorageProxyMetricsReadGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsReadGetParamsWithHTTPClient creates a new StorageProxyMetricsReadGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsReadGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsReadGetParams { + + return &StorageProxyMetricsReadGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsReadGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics read get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsReadGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics read get params +func (o *StorageProxyMetricsReadGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsReadGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics read get params +func (o *StorageProxyMetricsReadGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics read get params +func (o *StorageProxyMetricsReadGetParams) WithContext(ctx context.Context) *StorageProxyMetricsReadGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics read get params +func (o *StorageProxyMetricsReadGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics read get params +func (o *StorageProxyMetricsReadGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsReadGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics read get params +func (o *StorageProxyMetricsReadGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsReadGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_get_responses.go new file mode 100644 index 00000000000..1838fb0f87f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsReadGetReader is a Reader for the StorageProxyMetricsReadGet structure. +type StorageProxyMetricsReadGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsReadGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsReadGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsReadGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsReadGetOK creates a StorageProxyMetricsReadGetOK with default headers values +func NewStorageProxyMetricsReadGetOK() *StorageProxyMetricsReadGetOK { + return &StorageProxyMetricsReadGetOK{} +} + +/* +StorageProxyMetricsReadGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsReadGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsReadGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsReadGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsReadGetDefault creates a StorageProxyMetricsReadGetDefault with default headers values +func NewStorageProxyMetricsReadGetDefault(code int) *StorageProxyMetricsReadGetDefault { + return &StorageProxyMetricsReadGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsReadGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsReadGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics read get default response +func (o *StorageProxyMetricsReadGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsReadGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsReadGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsReadGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_histogram_get_parameters.go new file mode 100644 index 00000000000..e352814b666 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsReadHistogramGetParams creates a new StorageProxyMetricsReadHistogramGetParams object +// with the default values initialized. +func NewStorageProxyMetricsReadHistogramGetParams() *StorageProxyMetricsReadHistogramGetParams { + + return &StorageProxyMetricsReadHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsReadHistogramGetParamsWithTimeout creates a new StorageProxyMetricsReadHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsReadHistogramGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsReadHistogramGetParams { + + return &StorageProxyMetricsReadHistogramGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsReadHistogramGetParamsWithContext creates a new StorageProxyMetricsReadHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsReadHistogramGetParamsWithContext(ctx context.Context) *StorageProxyMetricsReadHistogramGetParams { + + return &StorageProxyMetricsReadHistogramGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsReadHistogramGetParamsWithHTTPClient creates a new StorageProxyMetricsReadHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsReadHistogramGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsReadHistogramGetParams { + + return &StorageProxyMetricsReadHistogramGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsReadHistogramGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics read histogram get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsReadHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics read histogram get params +func (o *StorageProxyMetricsReadHistogramGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsReadHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics read histogram get params +func (o *StorageProxyMetricsReadHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics read histogram get params +func (o *StorageProxyMetricsReadHistogramGetParams) WithContext(ctx context.Context) *StorageProxyMetricsReadHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics read histogram get params +func (o *StorageProxyMetricsReadHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics read histogram get params +func (o *StorageProxyMetricsReadHistogramGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsReadHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics read histogram get params +func (o *StorageProxyMetricsReadHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsReadHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_histogram_get_responses.go new file mode 100644 index 00000000000..c5681d37053 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_histogram_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsReadHistogramGetReader is a Reader for the StorageProxyMetricsReadHistogramGet structure. +type StorageProxyMetricsReadHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsReadHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsReadHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsReadHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsReadHistogramGetOK creates a StorageProxyMetricsReadHistogramGetOK with default headers values +func NewStorageProxyMetricsReadHistogramGetOK() *StorageProxyMetricsReadHistogramGetOK { + return &StorageProxyMetricsReadHistogramGetOK{} +} + +/* +StorageProxyMetricsReadHistogramGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsReadHistogramGetOK struct { +} + +func (o *StorageProxyMetricsReadHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMetricsReadHistogramGetDefault creates a StorageProxyMetricsReadHistogramGetDefault with default headers values +func NewStorageProxyMetricsReadHistogramGetDefault(code int) *StorageProxyMetricsReadHistogramGetDefault { + return &StorageProxyMetricsReadHistogramGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsReadHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsReadHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics read histogram get default response +func (o *StorageProxyMetricsReadHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsReadHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsReadHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsReadHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_moving_average_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_moving_average_histogram_get_parameters.go new file mode 100644 index 00000000000..d52706c709d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_moving_average_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsReadMovingAverageHistogramGetParams creates a new StorageProxyMetricsReadMovingAverageHistogramGetParams object +// with the default values initialized. +func NewStorageProxyMetricsReadMovingAverageHistogramGetParams() *StorageProxyMetricsReadMovingAverageHistogramGetParams { + + return &StorageProxyMetricsReadMovingAverageHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsReadMovingAverageHistogramGetParamsWithTimeout creates a new StorageProxyMetricsReadMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsReadMovingAverageHistogramGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsReadMovingAverageHistogramGetParams { + + return &StorageProxyMetricsReadMovingAverageHistogramGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsReadMovingAverageHistogramGetParamsWithContext creates a new StorageProxyMetricsReadMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsReadMovingAverageHistogramGetParamsWithContext(ctx context.Context) *StorageProxyMetricsReadMovingAverageHistogramGetParams { + + return &StorageProxyMetricsReadMovingAverageHistogramGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsReadMovingAverageHistogramGetParamsWithHTTPClient creates a new StorageProxyMetricsReadMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsReadMovingAverageHistogramGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsReadMovingAverageHistogramGetParams { + + return &StorageProxyMetricsReadMovingAverageHistogramGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsReadMovingAverageHistogramGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics read moving average histogram get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsReadMovingAverageHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics read moving average histogram get params +func (o *StorageProxyMetricsReadMovingAverageHistogramGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsReadMovingAverageHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics read moving average histogram get params +func (o *StorageProxyMetricsReadMovingAverageHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics read moving average histogram get params +func (o *StorageProxyMetricsReadMovingAverageHistogramGetParams) WithContext(ctx context.Context) *StorageProxyMetricsReadMovingAverageHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics read moving average histogram get params +func (o *StorageProxyMetricsReadMovingAverageHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics read moving average histogram get params +func (o *StorageProxyMetricsReadMovingAverageHistogramGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsReadMovingAverageHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics read moving average histogram get params +func (o *StorageProxyMetricsReadMovingAverageHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsReadMovingAverageHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_moving_average_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_moving_average_histogram_get_responses.go new file mode 100644 index 00000000000..50e304b1c19 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_moving_average_histogram_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsReadMovingAverageHistogramGetReader is a Reader for the StorageProxyMetricsReadMovingAverageHistogramGet structure. +type StorageProxyMetricsReadMovingAverageHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsReadMovingAverageHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsReadMovingAverageHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsReadMovingAverageHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsReadMovingAverageHistogramGetOK creates a StorageProxyMetricsReadMovingAverageHistogramGetOK with default headers values +func NewStorageProxyMetricsReadMovingAverageHistogramGetOK() *StorageProxyMetricsReadMovingAverageHistogramGetOK { + return &StorageProxyMetricsReadMovingAverageHistogramGetOK{} +} + +/* +StorageProxyMetricsReadMovingAverageHistogramGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsReadMovingAverageHistogramGetOK struct { +} + +func (o *StorageProxyMetricsReadMovingAverageHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMetricsReadMovingAverageHistogramGetDefault creates a StorageProxyMetricsReadMovingAverageHistogramGetDefault with default headers values +func NewStorageProxyMetricsReadMovingAverageHistogramGetDefault(code int) *StorageProxyMetricsReadMovingAverageHistogramGetDefault { + return &StorageProxyMetricsReadMovingAverageHistogramGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsReadMovingAverageHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsReadMovingAverageHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics read moving average histogram get default response +func (o *StorageProxyMetricsReadMovingAverageHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsReadMovingAverageHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsReadMovingAverageHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsReadMovingAverageHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_get_parameters.go new file mode 100644 index 00000000000..eeca48375fa --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsReadTimeoutsGetParams creates a new StorageProxyMetricsReadTimeoutsGetParams object +// with the default values initialized. +func NewStorageProxyMetricsReadTimeoutsGetParams() *StorageProxyMetricsReadTimeoutsGetParams { + + return &StorageProxyMetricsReadTimeoutsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsReadTimeoutsGetParamsWithTimeout creates a new StorageProxyMetricsReadTimeoutsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsReadTimeoutsGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsReadTimeoutsGetParams { + + return &StorageProxyMetricsReadTimeoutsGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsReadTimeoutsGetParamsWithContext creates a new StorageProxyMetricsReadTimeoutsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsReadTimeoutsGetParamsWithContext(ctx context.Context) *StorageProxyMetricsReadTimeoutsGetParams { + + return &StorageProxyMetricsReadTimeoutsGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsReadTimeoutsGetParamsWithHTTPClient creates a new StorageProxyMetricsReadTimeoutsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsReadTimeoutsGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsReadTimeoutsGetParams { + + return &StorageProxyMetricsReadTimeoutsGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsReadTimeoutsGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics read timeouts get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsReadTimeoutsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics read timeouts get params +func (o *StorageProxyMetricsReadTimeoutsGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsReadTimeoutsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics read timeouts get params +func (o *StorageProxyMetricsReadTimeoutsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics read timeouts get params +func (o *StorageProxyMetricsReadTimeoutsGetParams) WithContext(ctx context.Context) *StorageProxyMetricsReadTimeoutsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics read timeouts get params +func (o *StorageProxyMetricsReadTimeoutsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics read timeouts get params +func (o *StorageProxyMetricsReadTimeoutsGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsReadTimeoutsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics read timeouts get params +func (o *StorageProxyMetricsReadTimeoutsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsReadTimeoutsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_get_responses.go new file mode 100644 index 00000000000..ca08315778d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsReadTimeoutsGetReader is a Reader for the StorageProxyMetricsReadTimeoutsGet structure. +type StorageProxyMetricsReadTimeoutsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsReadTimeoutsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsReadTimeoutsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsReadTimeoutsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsReadTimeoutsGetOK creates a StorageProxyMetricsReadTimeoutsGetOK with default headers values +func NewStorageProxyMetricsReadTimeoutsGetOK() *StorageProxyMetricsReadTimeoutsGetOK { + return &StorageProxyMetricsReadTimeoutsGetOK{} +} + +/* +StorageProxyMetricsReadTimeoutsGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsReadTimeoutsGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsReadTimeoutsGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsReadTimeoutsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsReadTimeoutsGetDefault creates a StorageProxyMetricsReadTimeoutsGetDefault with default headers values +func NewStorageProxyMetricsReadTimeoutsGetDefault(code int) *StorageProxyMetricsReadTimeoutsGetDefault { + return &StorageProxyMetricsReadTimeoutsGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsReadTimeoutsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsReadTimeoutsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics read timeouts get default response +func (o *StorageProxyMetricsReadTimeoutsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsReadTimeoutsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsReadTimeoutsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsReadTimeoutsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_rates_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_rates_get_parameters.go new file mode 100644 index 00000000000..8358addcce2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_rates_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsReadTimeoutsRatesGetParams creates a new StorageProxyMetricsReadTimeoutsRatesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsReadTimeoutsRatesGetParams() *StorageProxyMetricsReadTimeoutsRatesGetParams { + + return &StorageProxyMetricsReadTimeoutsRatesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsReadTimeoutsRatesGetParamsWithTimeout creates a new StorageProxyMetricsReadTimeoutsRatesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsReadTimeoutsRatesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsReadTimeoutsRatesGetParams { + + return &StorageProxyMetricsReadTimeoutsRatesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsReadTimeoutsRatesGetParamsWithContext creates a new StorageProxyMetricsReadTimeoutsRatesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsReadTimeoutsRatesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsReadTimeoutsRatesGetParams { + + return &StorageProxyMetricsReadTimeoutsRatesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsReadTimeoutsRatesGetParamsWithHTTPClient creates a new StorageProxyMetricsReadTimeoutsRatesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsReadTimeoutsRatesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsReadTimeoutsRatesGetParams { + + return &StorageProxyMetricsReadTimeoutsRatesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsReadTimeoutsRatesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics read timeouts rates get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsReadTimeoutsRatesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics read timeouts rates get params +func (o *StorageProxyMetricsReadTimeoutsRatesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsReadTimeoutsRatesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics read timeouts rates get params +func (o *StorageProxyMetricsReadTimeoutsRatesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics read timeouts rates get params +func (o *StorageProxyMetricsReadTimeoutsRatesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsReadTimeoutsRatesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics read timeouts rates get params +func (o *StorageProxyMetricsReadTimeoutsRatesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics read timeouts rates get params +func (o *StorageProxyMetricsReadTimeoutsRatesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsReadTimeoutsRatesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics read timeouts rates get params +func (o *StorageProxyMetricsReadTimeoutsRatesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsReadTimeoutsRatesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_rates_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_rates_get_responses.go new file mode 100644 index 00000000000..c46776b36ac --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_timeouts_rates_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsReadTimeoutsRatesGetReader is a Reader for the StorageProxyMetricsReadTimeoutsRatesGet structure. +type StorageProxyMetricsReadTimeoutsRatesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsReadTimeoutsRatesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsReadTimeoutsRatesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsReadTimeoutsRatesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsReadTimeoutsRatesGetOK creates a StorageProxyMetricsReadTimeoutsRatesGetOK with default headers values +func NewStorageProxyMetricsReadTimeoutsRatesGetOK() *StorageProxyMetricsReadTimeoutsRatesGetOK { + return &StorageProxyMetricsReadTimeoutsRatesGetOK{} +} + +/* +StorageProxyMetricsReadTimeoutsRatesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsReadTimeoutsRatesGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *StorageProxyMetricsReadTimeoutsRatesGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *StorageProxyMetricsReadTimeoutsRatesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsReadTimeoutsRatesGetDefault creates a StorageProxyMetricsReadTimeoutsRatesGetDefault with default headers values +func NewStorageProxyMetricsReadTimeoutsRatesGetDefault(code int) *StorageProxyMetricsReadTimeoutsRatesGetDefault { + return &StorageProxyMetricsReadTimeoutsRatesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsReadTimeoutsRatesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsReadTimeoutsRatesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics read timeouts rates get default response +func (o *StorageProxyMetricsReadTimeoutsRatesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsReadTimeoutsRatesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsReadTimeoutsRatesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsReadTimeoutsRatesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_get_parameters.go new file mode 100644 index 00000000000..e3eda39d4fc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsReadUnavailablesGetParams creates a new StorageProxyMetricsReadUnavailablesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsReadUnavailablesGetParams() *StorageProxyMetricsReadUnavailablesGetParams { + + return &StorageProxyMetricsReadUnavailablesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsReadUnavailablesGetParamsWithTimeout creates a new StorageProxyMetricsReadUnavailablesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsReadUnavailablesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsReadUnavailablesGetParams { + + return &StorageProxyMetricsReadUnavailablesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsReadUnavailablesGetParamsWithContext creates a new StorageProxyMetricsReadUnavailablesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsReadUnavailablesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsReadUnavailablesGetParams { + + return &StorageProxyMetricsReadUnavailablesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsReadUnavailablesGetParamsWithHTTPClient creates a new StorageProxyMetricsReadUnavailablesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsReadUnavailablesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsReadUnavailablesGetParams { + + return &StorageProxyMetricsReadUnavailablesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsReadUnavailablesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics read unavailables get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsReadUnavailablesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics read unavailables get params +func (o *StorageProxyMetricsReadUnavailablesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsReadUnavailablesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics read unavailables get params +func (o *StorageProxyMetricsReadUnavailablesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics read unavailables get params +func (o *StorageProxyMetricsReadUnavailablesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsReadUnavailablesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics read unavailables get params +func (o *StorageProxyMetricsReadUnavailablesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics read unavailables get params +func (o *StorageProxyMetricsReadUnavailablesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsReadUnavailablesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics read unavailables get params +func (o *StorageProxyMetricsReadUnavailablesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsReadUnavailablesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_get_responses.go new file mode 100644 index 00000000000..c8b9a08eb1b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsReadUnavailablesGetReader is a Reader for the StorageProxyMetricsReadUnavailablesGet structure. +type StorageProxyMetricsReadUnavailablesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsReadUnavailablesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsReadUnavailablesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsReadUnavailablesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsReadUnavailablesGetOK creates a StorageProxyMetricsReadUnavailablesGetOK with default headers values +func NewStorageProxyMetricsReadUnavailablesGetOK() *StorageProxyMetricsReadUnavailablesGetOK { + return &StorageProxyMetricsReadUnavailablesGetOK{} +} + +/* +StorageProxyMetricsReadUnavailablesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsReadUnavailablesGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsReadUnavailablesGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsReadUnavailablesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsReadUnavailablesGetDefault creates a StorageProxyMetricsReadUnavailablesGetDefault with default headers values +func NewStorageProxyMetricsReadUnavailablesGetDefault(code int) *StorageProxyMetricsReadUnavailablesGetDefault { + return &StorageProxyMetricsReadUnavailablesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsReadUnavailablesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsReadUnavailablesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics read unavailables get default response +func (o *StorageProxyMetricsReadUnavailablesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsReadUnavailablesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsReadUnavailablesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsReadUnavailablesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_rates_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_rates_get_parameters.go new file mode 100644 index 00000000000..a7fac6a2f2e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_rates_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsReadUnavailablesRatesGetParams creates a new StorageProxyMetricsReadUnavailablesRatesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsReadUnavailablesRatesGetParams() *StorageProxyMetricsReadUnavailablesRatesGetParams { + + return &StorageProxyMetricsReadUnavailablesRatesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsReadUnavailablesRatesGetParamsWithTimeout creates a new StorageProxyMetricsReadUnavailablesRatesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsReadUnavailablesRatesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsReadUnavailablesRatesGetParams { + + return &StorageProxyMetricsReadUnavailablesRatesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsReadUnavailablesRatesGetParamsWithContext creates a new StorageProxyMetricsReadUnavailablesRatesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsReadUnavailablesRatesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsReadUnavailablesRatesGetParams { + + return &StorageProxyMetricsReadUnavailablesRatesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsReadUnavailablesRatesGetParamsWithHTTPClient creates a new StorageProxyMetricsReadUnavailablesRatesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsReadUnavailablesRatesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsReadUnavailablesRatesGetParams { + + return &StorageProxyMetricsReadUnavailablesRatesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsReadUnavailablesRatesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics read unavailables rates get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsReadUnavailablesRatesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics read unavailables rates get params +func (o *StorageProxyMetricsReadUnavailablesRatesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsReadUnavailablesRatesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics read unavailables rates get params +func (o *StorageProxyMetricsReadUnavailablesRatesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics read unavailables rates get params +func (o *StorageProxyMetricsReadUnavailablesRatesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsReadUnavailablesRatesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics read unavailables rates get params +func (o *StorageProxyMetricsReadUnavailablesRatesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics read unavailables rates get params +func (o *StorageProxyMetricsReadUnavailablesRatesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsReadUnavailablesRatesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics read unavailables rates get params +func (o *StorageProxyMetricsReadUnavailablesRatesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsReadUnavailablesRatesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_rates_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_rates_get_responses.go new file mode 100644 index 00000000000..886915a5324 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_read_unavailables_rates_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsReadUnavailablesRatesGetReader is a Reader for the StorageProxyMetricsReadUnavailablesRatesGet structure. +type StorageProxyMetricsReadUnavailablesRatesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsReadUnavailablesRatesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsReadUnavailablesRatesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsReadUnavailablesRatesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsReadUnavailablesRatesGetOK creates a StorageProxyMetricsReadUnavailablesRatesGetOK with default headers values +func NewStorageProxyMetricsReadUnavailablesRatesGetOK() *StorageProxyMetricsReadUnavailablesRatesGetOK { + return &StorageProxyMetricsReadUnavailablesRatesGetOK{} +} + +/* +StorageProxyMetricsReadUnavailablesRatesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsReadUnavailablesRatesGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *StorageProxyMetricsReadUnavailablesRatesGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *StorageProxyMetricsReadUnavailablesRatesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsReadUnavailablesRatesGetDefault creates a StorageProxyMetricsReadUnavailablesRatesGetDefault with default headers values +func NewStorageProxyMetricsReadUnavailablesRatesGetDefault(code int) *StorageProxyMetricsReadUnavailablesRatesGetDefault { + return &StorageProxyMetricsReadUnavailablesRatesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsReadUnavailablesRatesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsReadUnavailablesRatesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics read unavailables rates get default response +func (o *StorageProxyMetricsReadUnavailablesRatesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsReadUnavailablesRatesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsReadUnavailablesRatesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsReadUnavailablesRatesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_estimated_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_estimated_histogram_get_parameters.go new file mode 100644 index 00000000000..a50b4ed136e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_estimated_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsWriteEstimatedHistogramGetParams creates a new StorageProxyMetricsWriteEstimatedHistogramGetParams object +// with the default values initialized. +func NewStorageProxyMetricsWriteEstimatedHistogramGetParams() *StorageProxyMetricsWriteEstimatedHistogramGetParams { + + return &StorageProxyMetricsWriteEstimatedHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsWriteEstimatedHistogramGetParamsWithTimeout creates a new StorageProxyMetricsWriteEstimatedHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsWriteEstimatedHistogramGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsWriteEstimatedHistogramGetParams { + + return &StorageProxyMetricsWriteEstimatedHistogramGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsWriteEstimatedHistogramGetParamsWithContext creates a new StorageProxyMetricsWriteEstimatedHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsWriteEstimatedHistogramGetParamsWithContext(ctx context.Context) *StorageProxyMetricsWriteEstimatedHistogramGetParams { + + return &StorageProxyMetricsWriteEstimatedHistogramGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsWriteEstimatedHistogramGetParamsWithHTTPClient creates a new StorageProxyMetricsWriteEstimatedHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsWriteEstimatedHistogramGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsWriteEstimatedHistogramGetParams { + + return &StorageProxyMetricsWriteEstimatedHistogramGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsWriteEstimatedHistogramGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics write estimated histogram get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsWriteEstimatedHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics write estimated histogram get params +func (o *StorageProxyMetricsWriteEstimatedHistogramGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsWriteEstimatedHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics write estimated histogram get params +func (o *StorageProxyMetricsWriteEstimatedHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics write estimated histogram get params +func (o *StorageProxyMetricsWriteEstimatedHistogramGetParams) WithContext(ctx context.Context) *StorageProxyMetricsWriteEstimatedHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics write estimated histogram get params +func (o *StorageProxyMetricsWriteEstimatedHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics write estimated histogram get params +func (o *StorageProxyMetricsWriteEstimatedHistogramGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsWriteEstimatedHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics write estimated histogram get params +func (o *StorageProxyMetricsWriteEstimatedHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsWriteEstimatedHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_estimated_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_estimated_histogram_get_responses.go new file mode 100644 index 00000000000..58e3ea86cd0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_estimated_histogram_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsWriteEstimatedHistogramGetReader is a Reader for the StorageProxyMetricsWriteEstimatedHistogramGet structure. +type StorageProxyMetricsWriteEstimatedHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsWriteEstimatedHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsWriteEstimatedHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsWriteEstimatedHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsWriteEstimatedHistogramGetOK creates a StorageProxyMetricsWriteEstimatedHistogramGetOK with default headers values +func NewStorageProxyMetricsWriteEstimatedHistogramGetOK() *StorageProxyMetricsWriteEstimatedHistogramGetOK { + return &StorageProxyMetricsWriteEstimatedHistogramGetOK{} +} + +/* +StorageProxyMetricsWriteEstimatedHistogramGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsWriteEstimatedHistogramGetOK struct { +} + +func (o *StorageProxyMetricsWriteEstimatedHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMetricsWriteEstimatedHistogramGetDefault creates a StorageProxyMetricsWriteEstimatedHistogramGetDefault with default headers values +func NewStorageProxyMetricsWriteEstimatedHistogramGetDefault(code int) *StorageProxyMetricsWriteEstimatedHistogramGetDefault { + return &StorageProxyMetricsWriteEstimatedHistogramGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsWriteEstimatedHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsWriteEstimatedHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics write estimated histogram get default response +func (o *StorageProxyMetricsWriteEstimatedHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsWriteEstimatedHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsWriteEstimatedHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsWriteEstimatedHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_get_parameters.go new file mode 100644 index 00000000000..ad43968248b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsWriteGetParams creates a new StorageProxyMetricsWriteGetParams object +// with the default values initialized. +func NewStorageProxyMetricsWriteGetParams() *StorageProxyMetricsWriteGetParams { + + return &StorageProxyMetricsWriteGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsWriteGetParamsWithTimeout creates a new StorageProxyMetricsWriteGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsWriteGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsWriteGetParams { + + return &StorageProxyMetricsWriteGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsWriteGetParamsWithContext creates a new StorageProxyMetricsWriteGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsWriteGetParamsWithContext(ctx context.Context) *StorageProxyMetricsWriteGetParams { + + return &StorageProxyMetricsWriteGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsWriteGetParamsWithHTTPClient creates a new StorageProxyMetricsWriteGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsWriteGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsWriteGetParams { + + return &StorageProxyMetricsWriteGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsWriteGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics write get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsWriteGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics write get params +func (o *StorageProxyMetricsWriteGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsWriteGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics write get params +func (o *StorageProxyMetricsWriteGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics write get params +func (o *StorageProxyMetricsWriteGetParams) WithContext(ctx context.Context) *StorageProxyMetricsWriteGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics write get params +func (o *StorageProxyMetricsWriteGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics write get params +func (o *StorageProxyMetricsWriteGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsWriteGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics write get params +func (o *StorageProxyMetricsWriteGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsWriteGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_get_responses.go new file mode 100644 index 00000000000..00a0d9c12da --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsWriteGetReader is a Reader for the StorageProxyMetricsWriteGet structure. +type StorageProxyMetricsWriteGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsWriteGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsWriteGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsWriteGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsWriteGetOK creates a StorageProxyMetricsWriteGetOK with default headers values +func NewStorageProxyMetricsWriteGetOK() *StorageProxyMetricsWriteGetOK { + return &StorageProxyMetricsWriteGetOK{} +} + +/* +StorageProxyMetricsWriteGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsWriteGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsWriteGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsWriteGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsWriteGetDefault creates a StorageProxyMetricsWriteGetDefault with default headers values +func NewStorageProxyMetricsWriteGetDefault(code int) *StorageProxyMetricsWriteGetDefault { + return &StorageProxyMetricsWriteGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsWriteGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsWriteGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics write get default response +func (o *StorageProxyMetricsWriteGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsWriteGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsWriteGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsWriteGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_histogram_get_parameters.go new file mode 100644 index 00000000000..e4bce2c56bb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsWriteHistogramGetParams creates a new StorageProxyMetricsWriteHistogramGetParams object +// with the default values initialized. +func NewStorageProxyMetricsWriteHistogramGetParams() *StorageProxyMetricsWriteHistogramGetParams { + + return &StorageProxyMetricsWriteHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsWriteHistogramGetParamsWithTimeout creates a new StorageProxyMetricsWriteHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsWriteHistogramGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsWriteHistogramGetParams { + + return &StorageProxyMetricsWriteHistogramGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsWriteHistogramGetParamsWithContext creates a new StorageProxyMetricsWriteHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsWriteHistogramGetParamsWithContext(ctx context.Context) *StorageProxyMetricsWriteHistogramGetParams { + + return &StorageProxyMetricsWriteHistogramGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsWriteHistogramGetParamsWithHTTPClient creates a new StorageProxyMetricsWriteHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsWriteHistogramGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsWriteHistogramGetParams { + + return &StorageProxyMetricsWriteHistogramGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsWriteHistogramGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics write histogram get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsWriteHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics write histogram get params +func (o *StorageProxyMetricsWriteHistogramGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsWriteHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics write histogram get params +func (o *StorageProxyMetricsWriteHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics write histogram get params +func (o *StorageProxyMetricsWriteHistogramGetParams) WithContext(ctx context.Context) *StorageProxyMetricsWriteHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics write histogram get params +func (o *StorageProxyMetricsWriteHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics write histogram get params +func (o *StorageProxyMetricsWriteHistogramGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsWriteHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics write histogram get params +func (o *StorageProxyMetricsWriteHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsWriteHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_histogram_get_responses.go new file mode 100644 index 00000000000..f5d32e071de --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_histogram_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsWriteHistogramGetReader is a Reader for the StorageProxyMetricsWriteHistogramGet structure. +type StorageProxyMetricsWriteHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsWriteHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsWriteHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsWriteHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsWriteHistogramGetOK creates a StorageProxyMetricsWriteHistogramGetOK with default headers values +func NewStorageProxyMetricsWriteHistogramGetOK() *StorageProxyMetricsWriteHistogramGetOK { + return &StorageProxyMetricsWriteHistogramGetOK{} +} + +/* +StorageProxyMetricsWriteHistogramGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsWriteHistogramGetOK struct { +} + +func (o *StorageProxyMetricsWriteHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMetricsWriteHistogramGetDefault creates a StorageProxyMetricsWriteHistogramGetDefault with default headers values +func NewStorageProxyMetricsWriteHistogramGetDefault(code int) *StorageProxyMetricsWriteHistogramGetDefault { + return &StorageProxyMetricsWriteHistogramGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsWriteHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsWriteHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics write histogram get default response +func (o *StorageProxyMetricsWriteHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsWriteHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsWriteHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsWriteHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_moving_average_histogram_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_moving_average_histogram_get_parameters.go new file mode 100644 index 00000000000..33d5dca1c98 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_moving_average_histogram_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsWriteMovingAverageHistogramGetParams creates a new StorageProxyMetricsWriteMovingAverageHistogramGetParams object +// with the default values initialized. +func NewStorageProxyMetricsWriteMovingAverageHistogramGetParams() *StorageProxyMetricsWriteMovingAverageHistogramGetParams { + + return &StorageProxyMetricsWriteMovingAverageHistogramGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsWriteMovingAverageHistogramGetParamsWithTimeout creates a new StorageProxyMetricsWriteMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsWriteMovingAverageHistogramGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsWriteMovingAverageHistogramGetParams { + + return &StorageProxyMetricsWriteMovingAverageHistogramGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsWriteMovingAverageHistogramGetParamsWithContext creates a new StorageProxyMetricsWriteMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsWriteMovingAverageHistogramGetParamsWithContext(ctx context.Context) *StorageProxyMetricsWriteMovingAverageHistogramGetParams { + + return &StorageProxyMetricsWriteMovingAverageHistogramGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsWriteMovingAverageHistogramGetParamsWithHTTPClient creates a new StorageProxyMetricsWriteMovingAverageHistogramGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsWriteMovingAverageHistogramGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsWriteMovingAverageHistogramGetParams { + + return &StorageProxyMetricsWriteMovingAverageHistogramGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsWriteMovingAverageHistogramGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics write moving average histogram get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsWriteMovingAverageHistogramGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics write moving average histogram get params +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsWriteMovingAverageHistogramGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics write moving average histogram get params +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics write moving average histogram get params +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetParams) WithContext(ctx context.Context) *StorageProxyMetricsWriteMovingAverageHistogramGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics write moving average histogram get params +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics write moving average histogram get params +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsWriteMovingAverageHistogramGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics write moving average histogram get params +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_moving_average_histogram_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_moving_average_histogram_get_responses.go new file mode 100644 index 00000000000..a81140415d5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_moving_average_histogram_get_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsWriteMovingAverageHistogramGetReader is a Reader for the StorageProxyMetricsWriteMovingAverageHistogramGet structure. +type StorageProxyMetricsWriteMovingAverageHistogramGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsWriteMovingAverageHistogramGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsWriteMovingAverageHistogramGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsWriteMovingAverageHistogramGetOK creates a StorageProxyMetricsWriteMovingAverageHistogramGetOK with default headers values +func NewStorageProxyMetricsWriteMovingAverageHistogramGetOK() *StorageProxyMetricsWriteMovingAverageHistogramGetOK { + return &StorageProxyMetricsWriteMovingAverageHistogramGetOK{} +} + +/* +StorageProxyMetricsWriteMovingAverageHistogramGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsWriteMovingAverageHistogramGetOK struct { +} + +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyMetricsWriteMovingAverageHistogramGetDefault creates a StorageProxyMetricsWriteMovingAverageHistogramGetDefault with default headers values +func NewStorageProxyMetricsWriteMovingAverageHistogramGetDefault(code int) *StorageProxyMetricsWriteMovingAverageHistogramGetDefault { + return &StorageProxyMetricsWriteMovingAverageHistogramGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsWriteMovingAverageHistogramGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsWriteMovingAverageHistogramGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics write moving average histogram get default response +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsWriteMovingAverageHistogramGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_get_parameters.go new file mode 100644 index 00000000000..dbd36137367 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsWriteTimeoutsGetParams creates a new StorageProxyMetricsWriteTimeoutsGetParams object +// with the default values initialized. +func NewStorageProxyMetricsWriteTimeoutsGetParams() *StorageProxyMetricsWriteTimeoutsGetParams { + + return &StorageProxyMetricsWriteTimeoutsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsWriteTimeoutsGetParamsWithTimeout creates a new StorageProxyMetricsWriteTimeoutsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsWriteTimeoutsGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsWriteTimeoutsGetParams { + + return &StorageProxyMetricsWriteTimeoutsGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsWriteTimeoutsGetParamsWithContext creates a new StorageProxyMetricsWriteTimeoutsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsWriteTimeoutsGetParamsWithContext(ctx context.Context) *StorageProxyMetricsWriteTimeoutsGetParams { + + return &StorageProxyMetricsWriteTimeoutsGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsWriteTimeoutsGetParamsWithHTTPClient creates a new StorageProxyMetricsWriteTimeoutsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsWriteTimeoutsGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsWriteTimeoutsGetParams { + + return &StorageProxyMetricsWriteTimeoutsGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsWriteTimeoutsGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics write timeouts get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsWriteTimeoutsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics write timeouts get params +func (o *StorageProxyMetricsWriteTimeoutsGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsWriteTimeoutsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics write timeouts get params +func (o *StorageProxyMetricsWriteTimeoutsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics write timeouts get params +func (o *StorageProxyMetricsWriteTimeoutsGetParams) WithContext(ctx context.Context) *StorageProxyMetricsWriteTimeoutsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics write timeouts get params +func (o *StorageProxyMetricsWriteTimeoutsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics write timeouts get params +func (o *StorageProxyMetricsWriteTimeoutsGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsWriteTimeoutsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics write timeouts get params +func (o *StorageProxyMetricsWriteTimeoutsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsWriteTimeoutsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_get_responses.go new file mode 100644 index 00000000000..2840b508923 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsWriteTimeoutsGetReader is a Reader for the StorageProxyMetricsWriteTimeoutsGet structure. +type StorageProxyMetricsWriteTimeoutsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsWriteTimeoutsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsWriteTimeoutsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsWriteTimeoutsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsWriteTimeoutsGetOK creates a StorageProxyMetricsWriteTimeoutsGetOK with default headers values +func NewStorageProxyMetricsWriteTimeoutsGetOK() *StorageProxyMetricsWriteTimeoutsGetOK { + return &StorageProxyMetricsWriteTimeoutsGetOK{} +} + +/* +StorageProxyMetricsWriteTimeoutsGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsWriteTimeoutsGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsWriteTimeoutsGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsWriteTimeoutsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsWriteTimeoutsGetDefault creates a StorageProxyMetricsWriteTimeoutsGetDefault with default headers values +func NewStorageProxyMetricsWriteTimeoutsGetDefault(code int) *StorageProxyMetricsWriteTimeoutsGetDefault { + return &StorageProxyMetricsWriteTimeoutsGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsWriteTimeoutsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsWriteTimeoutsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics write timeouts get default response +func (o *StorageProxyMetricsWriteTimeoutsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsWriteTimeoutsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsWriteTimeoutsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsWriteTimeoutsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_rates_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_rates_get_parameters.go new file mode 100644 index 00000000000..5fd5df10531 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_rates_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsWriteTimeoutsRatesGetParams creates a new StorageProxyMetricsWriteTimeoutsRatesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsWriteTimeoutsRatesGetParams() *StorageProxyMetricsWriteTimeoutsRatesGetParams { + + return &StorageProxyMetricsWriteTimeoutsRatesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsWriteTimeoutsRatesGetParamsWithTimeout creates a new StorageProxyMetricsWriteTimeoutsRatesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsWriteTimeoutsRatesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsWriteTimeoutsRatesGetParams { + + return &StorageProxyMetricsWriteTimeoutsRatesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsWriteTimeoutsRatesGetParamsWithContext creates a new StorageProxyMetricsWriteTimeoutsRatesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsWriteTimeoutsRatesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsWriteTimeoutsRatesGetParams { + + return &StorageProxyMetricsWriteTimeoutsRatesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsWriteTimeoutsRatesGetParamsWithHTTPClient creates a new StorageProxyMetricsWriteTimeoutsRatesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsWriteTimeoutsRatesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsWriteTimeoutsRatesGetParams { + + return &StorageProxyMetricsWriteTimeoutsRatesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsWriteTimeoutsRatesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics write timeouts rates get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsWriteTimeoutsRatesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics write timeouts rates get params +func (o *StorageProxyMetricsWriteTimeoutsRatesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsWriteTimeoutsRatesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics write timeouts rates get params +func (o *StorageProxyMetricsWriteTimeoutsRatesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics write timeouts rates get params +func (o *StorageProxyMetricsWriteTimeoutsRatesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsWriteTimeoutsRatesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics write timeouts rates get params +func (o *StorageProxyMetricsWriteTimeoutsRatesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics write timeouts rates get params +func (o *StorageProxyMetricsWriteTimeoutsRatesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsWriteTimeoutsRatesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics write timeouts rates get params +func (o *StorageProxyMetricsWriteTimeoutsRatesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsWriteTimeoutsRatesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_rates_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_rates_get_responses.go new file mode 100644 index 00000000000..bb5ec880b1d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_timeouts_rates_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsWriteTimeoutsRatesGetReader is a Reader for the StorageProxyMetricsWriteTimeoutsRatesGet structure. +type StorageProxyMetricsWriteTimeoutsRatesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsWriteTimeoutsRatesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsWriteTimeoutsRatesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsWriteTimeoutsRatesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsWriteTimeoutsRatesGetOK creates a StorageProxyMetricsWriteTimeoutsRatesGetOK with default headers values +func NewStorageProxyMetricsWriteTimeoutsRatesGetOK() *StorageProxyMetricsWriteTimeoutsRatesGetOK { + return &StorageProxyMetricsWriteTimeoutsRatesGetOK{} +} + +/* +StorageProxyMetricsWriteTimeoutsRatesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsWriteTimeoutsRatesGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *StorageProxyMetricsWriteTimeoutsRatesGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *StorageProxyMetricsWriteTimeoutsRatesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsWriteTimeoutsRatesGetDefault creates a StorageProxyMetricsWriteTimeoutsRatesGetDefault with default headers values +func NewStorageProxyMetricsWriteTimeoutsRatesGetDefault(code int) *StorageProxyMetricsWriteTimeoutsRatesGetDefault { + return &StorageProxyMetricsWriteTimeoutsRatesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsWriteTimeoutsRatesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsWriteTimeoutsRatesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics write timeouts rates get default response +func (o *StorageProxyMetricsWriteTimeoutsRatesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsWriteTimeoutsRatesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsWriteTimeoutsRatesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsWriteTimeoutsRatesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_get_parameters.go new file mode 100644 index 00000000000..189ddbac066 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsWriteUnavailablesGetParams creates a new StorageProxyMetricsWriteUnavailablesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsWriteUnavailablesGetParams() *StorageProxyMetricsWriteUnavailablesGetParams { + + return &StorageProxyMetricsWriteUnavailablesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsWriteUnavailablesGetParamsWithTimeout creates a new StorageProxyMetricsWriteUnavailablesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsWriteUnavailablesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsWriteUnavailablesGetParams { + + return &StorageProxyMetricsWriteUnavailablesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsWriteUnavailablesGetParamsWithContext creates a new StorageProxyMetricsWriteUnavailablesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsWriteUnavailablesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsWriteUnavailablesGetParams { + + return &StorageProxyMetricsWriteUnavailablesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsWriteUnavailablesGetParamsWithHTTPClient creates a new StorageProxyMetricsWriteUnavailablesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsWriteUnavailablesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsWriteUnavailablesGetParams { + + return &StorageProxyMetricsWriteUnavailablesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsWriteUnavailablesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics write unavailables get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsWriteUnavailablesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics write unavailables get params +func (o *StorageProxyMetricsWriteUnavailablesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsWriteUnavailablesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics write unavailables get params +func (o *StorageProxyMetricsWriteUnavailablesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics write unavailables get params +func (o *StorageProxyMetricsWriteUnavailablesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsWriteUnavailablesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics write unavailables get params +func (o *StorageProxyMetricsWriteUnavailablesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics write unavailables get params +func (o *StorageProxyMetricsWriteUnavailablesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsWriteUnavailablesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics write unavailables get params +func (o *StorageProxyMetricsWriteUnavailablesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsWriteUnavailablesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_get_responses.go new file mode 100644 index 00000000000..a66d1951308 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsWriteUnavailablesGetReader is a Reader for the StorageProxyMetricsWriteUnavailablesGet structure. +type StorageProxyMetricsWriteUnavailablesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsWriteUnavailablesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsWriteUnavailablesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsWriteUnavailablesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsWriteUnavailablesGetOK creates a StorageProxyMetricsWriteUnavailablesGetOK with default headers values +func NewStorageProxyMetricsWriteUnavailablesGetOK() *StorageProxyMetricsWriteUnavailablesGetOK { + return &StorageProxyMetricsWriteUnavailablesGetOK{} +} + +/* +StorageProxyMetricsWriteUnavailablesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsWriteUnavailablesGetOK struct { + Payload int32 +} + +func (o *StorageProxyMetricsWriteUnavailablesGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageProxyMetricsWriteUnavailablesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsWriteUnavailablesGetDefault creates a StorageProxyMetricsWriteUnavailablesGetDefault with default headers values +func NewStorageProxyMetricsWriteUnavailablesGetDefault(code int) *StorageProxyMetricsWriteUnavailablesGetDefault { + return &StorageProxyMetricsWriteUnavailablesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsWriteUnavailablesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsWriteUnavailablesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics write unavailables get default response +func (o *StorageProxyMetricsWriteUnavailablesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsWriteUnavailablesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsWriteUnavailablesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsWriteUnavailablesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_rates_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_rates_get_parameters.go new file mode 100644 index 00000000000..5bc601c32dc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_rates_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyMetricsWriteUnavailablesRatesGetParams creates a new StorageProxyMetricsWriteUnavailablesRatesGetParams object +// with the default values initialized. +func NewStorageProxyMetricsWriteUnavailablesRatesGetParams() *StorageProxyMetricsWriteUnavailablesRatesGetParams { + + return &StorageProxyMetricsWriteUnavailablesRatesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyMetricsWriteUnavailablesRatesGetParamsWithTimeout creates a new StorageProxyMetricsWriteUnavailablesRatesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyMetricsWriteUnavailablesRatesGetParamsWithTimeout(timeout time.Duration) *StorageProxyMetricsWriteUnavailablesRatesGetParams { + + return &StorageProxyMetricsWriteUnavailablesRatesGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyMetricsWriteUnavailablesRatesGetParamsWithContext creates a new StorageProxyMetricsWriteUnavailablesRatesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyMetricsWriteUnavailablesRatesGetParamsWithContext(ctx context.Context) *StorageProxyMetricsWriteUnavailablesRatesGetParams { + + return &StorageProxyMetricsWriteUnavailablesRatesGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyMetricsWriteUnavailablesRatesGetParamsWithHTTPClient creates a new StorageProxyMetricsWriteUnavailablesRatesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyMetricsWriteUnavailablesRatesGetParamsWithHTTPClient(client *http.Client) *StorageProxyMetricsWriteUnavailablesRatesGetParams { + + return &StorageProxyMetricsWriteUnavailablesRatesGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyMetricsWriteUnavailablesRatesGetParams contains all the parameters to send to the API endpoint +for the storage proxy metrics write unavailables rates get operation typically these are written to a http.Request +*/ +type StorageProxyMetricsWriteUnavailablesRatesGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy metrics write unavailables rates get params +func (o *StorageProxyMetricsWriteUnavailablesRatesGetParams) WithTimeout(timeout time.Duration) *StorageProxyMetricsWriteUnavailablesRatesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy metrics write unavailables rates get params +func (o *StorageProxyMetricsWriteUnavailablesRatesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy metrics write unavailables rates get params +func (o *StorageProxyMetricsWriteUnavailablesRatesGetParams) WithContext(ctx context.Context) *StorageProxyMetricsWriteUnavailablesRatesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy metrics write unavailables rates get params +func (o *StorageProxyMetricsWriteUnavailablesRatesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy metrics write unavailables rates get params +func (o *StorageProxyMetricsWriteUnavailablesRatesGetParams) WithHTTPClient(client *http.Client) *StorageProxyMetricsWriteUnavailablesRatesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy metrics write unavailables rates get params +func (o *StorageProxyMetricsWriteUnavailablesRatesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyMetricsWriteUnavailablesRatesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_rates_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_rates_get_responses.go new file mode 100644 index 00000000000..2733de277f8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_metrics_write_unavailables_rates_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyMetricsWriteUnavailablesRatesGetReader is a Reader for the StorageProxyMetricsWriteUnavailablesRatesGet structure. +type StorageProxyMetricsWriteUnavailablesRatesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyMetricsWriteUnavailablesRatesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyMetricsWriteUnavailablesRatesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyMetricsWriteUnavailablesRatesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyMetricsWriteUnavailablesRatesGetOK creates a StorageProxyMetricsWriteUnavailablesRatesGetOK with default headers values +func NewStorageProxyMetricsWriteUnavailablesRatesGetOK() *StorageProxyMetricsWriteUnavailablesRatesGetOK { + return &StorageProxyMetricsWriteUnavailablesRatesGetOK{} +} + +/* +StorageProxyMetricsWriteUnavailablesRatesGetOK handles this case with default header values. + +Success +*/ +type StorageProxyMetricsWriteUnavailablesRatesGetOK struct { + Payload *models.RateMovingAverage +} + +func (o *StorageProxyMetricsWriteUnavailablesRatesGetOK) GetPayload() *models.RateMovingAverage { + return o.Payload +} + +func (o *StorageProxyMetricsWriteUnavailablesRatesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.RateMovingAverage) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyMetricsWriteUnavailablesRatesGetDefault creates a StorageProxyMetricsWriteUnavailablesRatesGetDefault with default headers values +func NewStorageProxyMetricsWriteUnavailablesRatesGetDefault(code int) *StorageProxyMetricsWriteUnavailablesRatesGetDefault { + return &StorageProxyMetricsWriteUnavailablesRatesGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyMetricsWriteUnavailablesRatesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyMetricsWriteUnavailablesRatesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy metrics write unavailables rates get default response +func (o *StorageProxyMetricsWriteUnavailablesRatesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyMetricsWriteUnavailablesRatesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyMetricsWriteUnavailablesRatesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyMetricsWriteUnavailablesRatesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_get_parameters.go new file mode 100644 index 00000000000..8145a6f738c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyRangeRPCTimeoutGetParams creates a new StorageProxyRangeRPCTimeoutGetParams object +// with the default values initialized. +func NewStorageProxyRangeRPCTimeoutGetParams() *StorageProxyRangeRPCTimeoutGetParams { + + return &StorageProxyRangeRPCTimeoutGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyRangeRPCTimeoutGetParamsWithTimeout creates a new StorageProxyRangeRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyRangeRPCTimeoutGetParamsWithTimeout(timeout time.Duration) *StorageProxyRangeRPCTimeoutGetParams { + + return &StorageProxyRangeRPCTimeoutGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyRangeRPCTimeoutGetParamsWithContext creates a new StorageProxyRangeRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyRangeRPCTimeoutGetParamsWithContext(ctx context.Context) *StorageProxyRangeRPCTimeoutGetParams { + + return &StorageProxyRangeRPCTimeoutGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyRangeRPCTimeoutGetParamsWithHTTPClient creates a new StorageProxyRangeRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyRangeRPCTimeoutGetParamsWithHTTPClient(client *http.Client) *StorageProxyRangeRPCTimeoutGetParams { + + return &StorageProxyRangeRPCTimeoutGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyRangeRPCTimeoutGetParams contains all the parameters to send to the API endpoint +for the storage proxy range Rpc timeout get operation typically these are written to a http.Request +*/ +type StorageProxyRangeRPCTimeoutGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy range Rpc timeout get params +func (o *StorageProxyRangeRPCTimeoutGetParams) WithTimeout(timeout time.Duration) *StorageProxyRangeRPCTimeoutGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy range Rpc timeout get params +func (o *StorageProxyRangeRPCTimeoutGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy range Rpc timeout get params +func (o *StorageProxyRangeRPCTimeoutGetParams) WithContext(ctx context.Context) *StorageProxyRangeRPCTimeoutGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy range Rpc timeout get params +func (o *StorageProxyRangeRPCTimeoutGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy range Rpc timeout get params +func (o *StorageProxyRangeRPCTimeoutGetParams) WithHTTPClient(client *http.Client) *StorageProxyRangeRPCTimeoutGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy range Rpc timeout get params +func (o *StorageProxyRangeRPCTimeoutGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyRangeRPCTimeoutGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_get_responses.go new file mode 100644 index 00000000000..305eea738ff --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyRangeRPCTimeoutGetReader is a Reader for the StorageProxyRangeRPCTimeoutGet structure. +type StorageProxyRangeRPCTimeoutGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyRangeRPCTimeoutGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyRangeRPCTimeoutGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyRangeRPCTimeoutGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyRangeRPCTimeoutGetOK creates a StorageProxyRangeRPCTimeoutGetOK with default headers values +func NewStorageProxyRangeRPCTimeoutGetOK() *StorageProxyRangeRPCTimeoutGetOK { + return &StorageProxyRangeRPCTimeoutGetOK{} +} + +/* +StorageProxyRangeRPCTimeoutGetOK handles this case with default header values. + +Success +*/ +type StorageProxyRangeRPCTimeoutGetOK struct { + Payload interface{} +} + +func (o *StorageProxyRangeRPCTimeoutGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyRangeRPCTimeoutGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyRangeRPCTimeoutGetDefault creates a StorageProxyRangeRPCTimeoutGetDefault with default headers values +func NewStorageProxyRangeRPCTimeoutGetDefault(code int) *StorageProxyRangeRPCTimeoutGetDefault { + return &StorageProxyRangeRPCTimeoutGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyRangeRPCTimeoutGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyRangeRPCTimeoutGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy range Rpc timeout get default response +func (o *StorageProxyRangeRPCTimeoutGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyRangeRPCTimeoutGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyRangeRPCTimeoutGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyRangeRPCTimeoutGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_post_parameters.go new file mode 100644 index 00000000000..ed5b5e94ee7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyRangeRPCTimeoutPostParams creates a new StorageProxyRangeRPCTimeoutPostParams object +// with the default values initialized. +func NewStorageProxyRangeRPCTimeoutPostParams() *StorageProxyRangeRPCTimeoutPostParams { + var () + return &StorageProxyRangeRPCTimeoutPostParams{ + + requestTimeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyRangeRPCTimeoutPostParamsWithTimeout creates a new StorageProxyRangeRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyRangeRPCTimeoutPostParamsWithTimeout(timeout time.Duration) *StorageProxyRangeRPCTimeoutPostParams { + var () + return &StorageProxyRangeRPCTimeoutPostParams{ + + requestTimeout: timeout, + } +} + +// NewStorageProxyRangeRPCTimeoutPostParamsWithContext creates a new StorageProxyRangeRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyRangeRPCTimeoutPostParamsWithContext(ctx context.Context) *StorageProxyRangeRPCTimeoutPostParams { + var () + return &StorageProxyRangeRPCTimeoutPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyRangeRPCTimeoutPostParamsWithHTTPClient creates a new StorageProxyRangeRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyRangeRPCTimeoutPostParamsWithHTTPClient(client *http.Client) *StorageProxyRangeRPCTimeoutPostParams { + var () + return &StorageProxyRangeRPCTimeoutPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyRangeRPCTimeoutPostParams contains all the parameters to send to the API endpoint +for the storage proxy range Rpc timeout post operation typically these are written to a http.Request +*/ +type StorageProxyRangeRPCTimeoutPostParams struct { + + /*Timeout + timeout in second + + */ + Timeout string + + requestTimeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithRequestTimeout adds the timeout to the storage proxy range Rpc timeout post params +func (o *StorageProxyRangeRPCTimeoutPostParams) WithRequestTimeout(timeout time.Duration) *StorageProxyRangeRPCTimeoutPostParams { + o.SetRequestTimeout(timeout) + return o +} + +// SetRequestTimeout adds the timeout to the storage proxy range Rpc timeout post params +func (o *StorageProxyRangeRPCTimeoutPostParams) SetRequestTimeout(timeout time.Duration) { + o.requestTimeout = timeout +} + +// WithContext adds the context to the storage proxy range Rpc timeout post params +func (o *StorageProxyRangeRPCTimeoutPostParams) WithContext(ctx context.Context) *StorageProxyRangeRPCTimeoutPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy range Rpc timeout post params +func (o *StorageProxyRangeRPCTimeoutPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy range Rpc timeout post params +func (o *StorageProxyRangeRPCTimeoutPostParams) WithHTTPClient(client *http.Client) *StorageProxyRangeRPCTimeoutPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy range Rpc timeout post params +func (o *StorageProxyRangeRPCTimeoutPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTimeout adds the timeout to the storage proxy range Rpc timeout post params +func (o *StorageProxyRangeRPCTimeoutPostParams) WithTimeout(timeout string) *StorageProxyRangeRPCTimeoutPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy range Rpc timeout post params +func (o *StorageProxyRangeRPCTimeoutPostParams) SetTimeout(timeout string) { + o.Timeout = timeout +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyRangeRPCTimeoutPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.requestTimeout); err != nil { + return err + } + var res []error + + // query param timeout + qrTimeout := o.Timeout + qTimeout := qrTimeout + if qTimeout != "" { + if err := r.SetQueryParam("timeout", qTimeout); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_post_responses.go new file mode 100644 index 00000000000..c3484daf286 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_range_rpc_timeout_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyRangeRPCTimeoutPostReader is a Reader for the StorageProxyRangeRPCTimeoutPost structure. +type StorageProxyRangeRPCTimeoutPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyRangeRPCTimeoutPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyRangeRPCTimeoutPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyRangeRPCTimeoutPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyRangeRPCTimeoutPostOK creates a StorageProxyRangeRPCTimeoutPostOK with default headers values +func NewStorageProxyRangeRPCTimeoutPostOK() *StorageProxyRangeRPCTimeoutPostOK { + return &StorageProxyRangeRPCTimeoutPostOK{} +} + +/* +StorageProxyRangeRPCTimeoutPostOK handles this case with default header values. + +Success +*/ +type StorageProxyRangeRPCTimeoutPostOK struct { +} + +func (o *StorageProxyRangeRPCTimeoutPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyRangeRPCTimeoutPostDefault creates a StorageProxyRangeRPCTimeoutPostDefault with default headers values +func NewStorageProxyRangeRPCTimeoutPostDefault(code int) *StorageProxyRangeRPCTimeoutPostDefault { + return &StorageProxyRangeRPCTimeoutPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyRangeRPCTimeoutPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyRangeRPCTimeoutPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy range Rpc timeout post default response +func (o *StorageProxyRangeRPCTimeoutPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyRangeRPCTimeoutPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyRangeRPCTimeoutPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyRangeRPCTimeoutPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_attempted_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_attempted_get_parameters.go new file mode 100644 index 00000000000..2cf834f9c7d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_attempted_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyReadRepairAttemptedGetParams creates a new StorageProxyReadRepairAttemptedGetParams object +// with the default values initialized. +func NewStorageProxyReadRepairAttemptedGetParams() *StorageProxyReadRepairAttemptedGetParams { + + return &StorageProxyReadRepairAttemptedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyReadRepairAttemptedGetParamsWithTimeout creates a new StorageProxyReadRepairAttemptedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyReadRepairAttemptedGetParamsWithTimeout(timeout time.Duration) *StorageProxyReadRepairAttemptedGetParams { + + return &StorageProxyReadRepairAttemptedGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyReadRepairAttemptedGetParamsWithContext creates a new StorageProxyReadRepairAttemptedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyReadRepairAttemptedGetParamsWithContext(ctx context.Context) *StorageProxyReadRepairAttemptedGetParams { + + return &StorageProxyReadRepairAttemptedGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyReadRepairAttemptedGetParamsWithHTTPClient creates a new StorageProxyReadRepairAttemptedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyReadRepairAttemptedGetParamsWithHTTPClient(client *http.Client) *StorageProxyReadRepairAttemptedGetParams { + + return &StorageProxyReadRepairAttemptedGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyReadRepairAttemptedGetParams contains all the parameters to send to the API endpoint +for the storage proxy read repair attempted get operation typically these are written to a http.Request +*/ +type StorageProxyReadRepairAttemptedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy read repair attempted get params +func (o *StorageProxyReadRepairAttemptedGetParams) WithTimeout(timeout time.Duration) *StorageProxyReadRepairAttemptedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy read repair attempted get params +func (o *StorageProxyReadRepairAttemptedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy read repair attempted get params +func (o *StorageProxyReadRepairAttemptedGetParams) WithContext(ctx context.Context) *StorageProxyReadRepairAttemptedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy read repair attempted get params +func (o *StorageProxyReadRepairAttemptedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy read repair attempted get params +func (o *StorageProxyReadRepairAttemptedGetParams) WithHTTPClient(client *http.Client) *StorageProxyReadRepairAttemptedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy read repair attempted get params +func (o *StorageProxyReadRepairAttemptedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyReadRepairAttemptedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_attempted_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_attempted_get_responses.go new file mode 100644 index 00000000000..11af928e00a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_attempted_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyReadRepairAttemptedGetReader is a Reader for the StorageProxyReadRepairAttemptedGet structure. +type StorageProxyReadRepairAttemptedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyReadRepairAttemptedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyReadRepairAttemptedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyReadRepairAttemptedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyReadRepairAttemptedGetOK creates a StorageProxyReadRepairAttemptedGetOK with default headers values +func NewStorageProxyReadRepairAttemptedGetOK() *StorageProxyReadRepairAttemptedGetOK { + return &StorageProxyReadRepairAttemptedGetOK{} +} + +/* +StorageProxyReadRepairAttemptedGetOK handles this case with default header values. + +Success +*/ +type StorageProxyReadRepairAttemptedGetOK struct { + Payload interface{} +} + +func (o *StorageProxyReadRepairAttemptedGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyReadRepairAttemptedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyReadRepairAttemptedGetDefault creates a StorageProxyReadRepairAttemptedGetDefault with default headers values +func NewStorageProxyReadRepairAttemptedGetDefault(code int) *StorageProxyReadRepairAttemptedGetDefault { + return &StorageProxyReadRepairAttemptedGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyReadRepairAttemptedGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyReadRepairAttemptedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy read repair attempted get default response +func (o *StorageProxyReadRepairAttemptedGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyReadRepairAttemptedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyReadRepairAttemptedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyReadRepairAttemptedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_background_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_background_get_parameters.go new file mode 100644 index 00000000000..00aa5841c68 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_background_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyReadRepairRepairedBackgroundGetParams creates a new StorageProxyReadRepairRepairedBackgroundGetParams object +// with the default values initialized. +func NewStorageProxyReadRepairRepairedBackgroundGetParams() *StorageProxyReadRepairRepairedBackgroundGetParams { + + return &StorageProxyReadRepairRepairedBackgroundGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyReadRepairRepairedBackgroundGetParamsWithTimeout creates a new StorageProxyReadRepairRepairedBackgroundGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyReadRepairRepairedBackgroundGetParamsWithTimeout(timeout time.Duration) *StorageProxyReadRepairRepairedBackgroundGetParams { + + return &StorageProxyReadRepairRepairedBackgroundGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyReadRepairRepairedBackgroundGetParamsWithContext creates a new StorageProxyReadRepairRepairedBackgroundGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyReadRepairRepairedBackgroundGetParamsWithContext(ctx context.Context) *StorageProxyReadRepairRepairedBackgroundGetParams { + + return &StorageProxyReadRepairRepairedBackgroundGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyReadRepairRepairedBackgroundGetParamsWithHTTPClient creates a new StorageProxyReadRepairRepairedBackgroundGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyReadRepairRepairedBackgroundGetParamsWithHTTPClient(client *http.Client) *StorageProxyReadRepairRepairedBackgroundGetParams { + + return &StorageProxyReadRepairRepairedBackgroundGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyReadRepairRepairedBackgroundGetParams contains all the parameters to send to the API endpoint +for the storage proxy read repair repaired background get operation typically these are written to a http.Request +*/ +type StorageProxyReadRepairRepairedBackgroundGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy read repair repaired background get params +func (o *StorageProxyReadRepairRepairedBackgroundGetParams) WithTimeout(timeout time.Duration) *StorageProxyReadRepairRepairedBackgroundGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy read repair repaired background get params +func (o *StorageProxyReadRepairRepairedBackgroundGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy read repair repaired background get params +func (o *StorageProxyReadRepairRepairedBackgroundGetParams) WithContext(ctx context.Context) *StorageProxyReadRepairRepairedBackgroundGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy read repair repaired background get params +func (o *StorageProxyReadRepairRepairedBackgroundGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy read repair repaired background get params +func (o *StorageProxyReadRepairRepairedBackgroundGetParams) WithHTTPClient(client *http.Client) *StorageProxyReadRepairRepairedBackgroundGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy read repair repaired background get params +func (o *StorageProxyReadRepairRepairedBackgroundGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyReadRepairRepairedBackgroundGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_background_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_background_get_responses.go new file mode 100644 index 00000000000..d765b560b36 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_background_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyReadRepairRepairedBackgroundGetReader is a Reader for the StorageProxyReadRepairRepairedBackgroundGet structure. +type StorageProxyReadRepairRepairedBackgroundGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyReadRepairRepairedBackgroundGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyReadRepairRepairedBackgroundGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyReadRepairRepairedBackgroundGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyReadRepairRepairedBackgroundGetOK creates a StorageProxyReadRepairRepairedBackgroundGetOK with default headers values +func NewStorageProxyReadRepairRepairedBackgroundGetOK() *StorageProxyReadRepairRepairedBackgroundGetOK { + return &StorageProxyReadRepairRepairedBackgroundGetOK{} +} + +/* +StorageProxyReadRepairRepairedBackgroundGetOK handles this case with default header values. + +Success +*/ +type StorageProxyReadRepairRepairedBackgroundGetOK struct { + Payload interface{} +} + +func (o *StorageProxyReadRepairRepairedBackgroundGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyReadRepairRepairedBackgroundGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyReadRepairRepairedBackgroundGetDefault creates a StorageProxyReadRepairRepairedBackgroundGetDefault with default headers values +func NewStorageProxyReadRepairRepairedBackgroundGetDefault(code int) *StorageProxyReadRepairRepairedBackgroundGetDefault { + return &StorageProxyReadRepairRepairedBackgroundGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyReadRepairRepairedBackgroundGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyReadRepairRepairedBackgroundGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy read repair repaired background get default response +func (o *StorageProxyReadRepairRepairedBackgroundGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyReadRepairRepairedBackgroundGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyReadRepairRepairedBackgroundGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyReadRepairRepairedBackgroundGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_blocking_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_blocking_get_parameters.go new file mode 100644 index 00000000000..7807056b190 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_blocking_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyReadRepairRepairedBlockingGetParams creates a new StorageProxyReadRepairRepairedBlockingGetParams object +// with the default values initialized. +func NewStorageProxyReadRepairRepairedBlockingGetParams() *StorageProxyReadRepairRepairedBlockingGetParams { + + return &StorageProxyReadRepairRepairedBlockingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyReadRepairRepairedBlockingGetParamsWithTimeout creates a new StorageProxyReadRepairRepairedBlockingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyReadRepairRepairedBlockingGetParamsWithTimeout(timeout time.Duration) *StorageProxyReadRepairRepairedBlockingGetParams { + + return &StorageProxyReadRepairRepairedBlockingGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyReadRepairRepairedBlockingGetParamsWithContext creates a new StorageProxyReadRepairRepairedBlockingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyReadRepairRepairedBlockingGetParamsWithContext(ctx context.Context) *StorageProxyReadRepairRepairedBlockingGetParams { + + return &StorageProxyReadRepairRepairedBlockingGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyReadRepairRepairedBlockingGetParamsWithHTTPClient creates a new StorageProxyReadRepairRepairedBlockingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyReadRepairRepairedBlockingGetParamsWithHTTPClient(client *http.Client) *StorageProxyReadRepairRepairedBlockingGetParams { + + return &StorageProxyReadRepairRepairedBlockingGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyReadRepairRepairedBlockingGetParams contains all the parameters to send to the API endpoint +for the storage proxy read repair repaired blocking get operation typically these are written to a http.Request +*/ +type StorageProxyReadRepairRepairedBlockingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy read repair repaired blocking get params +func (o *StorageProxyReadRepairRepairedBlockingGetParams) WithTimeout(timeout time.Duration) *StorageProxyReadRepairRepairedBlockingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy read repair repaired blocking get params +func (o *StorageProxyReadRepairRepairedBlockingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy read repair repaired blocking get params +func (o *StorageProxyReadRepairRepairedBlockingGetParams) WithContext(ctx context.Context) *StorageProxyReadRepairRepairedBlockingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy read repair repaired blocking get params +func (o *StorageProxyReadRepairRepairedBlockingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy read repair repaired blocking get params +func (o *StorageProxyReadRepairRepairedBlockingGetParams) WithHTTPClient(client *http.Client) *StorageProxyReadRepairRepairedBlockingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy read repair repaired blocking get params +func (o *StorageProxyReadRepairRepairedBlockingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyReadRepairRepairedBlockingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_blocking_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_blocking_get_responses.go new file mode 100644 index 00000000000..a5abab95450 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_repair_repaired_blocking_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyReadRepairRepairedBlockingGetReader is a Reader for the StorageProxyReadRepairRepairedBlockingGet structure. +type StorageProxyReadRepairRepairedBlockingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyReadRepairRepairedBlockingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyReadRepairRepairedBlockingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyReadRepairRepairedBlockingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyReadRepairRepairedBlockingGetOK creates a StorageProxyReadRepairRepairedBlockingGetOK with default headers values +func NewStorageProxyReadRepairRepairedBlockingGetOK() *StorageProxyReadRepairRepairedBlockingGetOK { + return &StorageProxyReadRepairRepairedBlockingGetOK{} +} + +/* +StorageProxyReadRepairRepairedBlockingGetOK handles this case with default header values. + +Success +*/ +type StorageProxyReadRepairRepairedBlockingGetOK struct { + Payload interface{} +} + +func (o *StorageProxyReadRepairRepairedBlockingGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyReadRepairRepairedBlockingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyReadRepairRepairedBlockingGetDefault creates a StorageProxyReadRepairRepairedBlockingGetDefault with default headers values +func NewStorageProxyReadRepairRepairedBlockingGetDefault(code int) *StorageProxyReadRepairRepairedBlockingGetDefault { + return &StorageProxyReadRepairRepairedBlockingGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyReadRepairRepairedBlockingGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyReadRepairRepairedBlockingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy read repair repaired blocking get default response +func (o *StorageProxyReadRepairRepairedBlockingGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyReadRepairRepairedBlockingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyReadRepairRepairedBlockingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyReadRepairRepairedBlockingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_get_parameters.go new file mode 100644 index 00000000000..9076ce5db2e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyReadRPCTimeoutGetParams creates a new StorageProxyReadRPCTimeoutGetParams object +// with the default values initialized. +func NewStorageProxyReadRPCTimeoutGetParams() *StorageProxyReadRPCTimeoutGetParams { + + return &StorageProxyReadRPCTimeoutGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyReadRPCTimeoutGetParamsWithTimeout creates a new StorageProxyReadRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyReadRPCTimeoutGetParamsWithTimeout(timeout time.Duration) *StorageProxyReadRPCTimeoutGetParams { + + return &StorageProxyReadRPCTimeoutGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyReadRPCTimeoutGetParamsWithContext creates a new StorageProxyReadRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyReadRPCTimeoutGetParamsWithContext(ctx context.Context) *StorageProxyReadRPCTimeoutGetParams { + + return &StorageProxyReadRPCTimeoutGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyReadRPCTimeoutGetParamsWithHTTPClient creates a new StorageProxyReadRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyReadRPCTimeoutGetParamsWithHTTPClient(client *http.Client) *StorageProxyReadRPCTimeoutGetParams { + + return &StorageProxyReadRPCTimeoutGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyReadRPCTimeoutGetParams contains all the parameters to send to the API endpoint +for the storage proxy read Rpc timeout get operation typically these are written to a http.Request +*/ +type StorageProxyReadRPCTimeoutGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy read Rpc timeout get params +func (o *StorageProxyReadRPCTimeoutGetParams) WithTimeout(timeout time.Duration) *StorageProxyReadRPCTimeoutGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy read Rpc timeout get params +func (o *StorageProxyReadRPCTimeoutGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy read Rpc timeout get params +func (o *StorageProxyReadRPCTimeoutGetParams) WithContext(ctx context.Context) *StorageProxyReadRPCTimeoutGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy read Rpc timeout get params +func (o *StorageProxyReadRPCTimeoutGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy read Rpc timeout get params +func (o *StorageProxyReadRPCTimeoutGetParams) WithHTTPClient(client *http.Client) *StorageProxyReadRPCTimeoutGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy read Rpc timeout get params +func (o *StorageProxyReadRPCTimeoutGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyReadRPCTimeoutGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_get_responses.go new file mode 100644 index 00000000000..62a00581441 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyReadRPCTimeoutGetReader is a Reader for the StorageProxyReadRPCTimeoutGet structure. +type StorageProxyReadRPCTimeoutGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyReadRPCTimeoutGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyReadRPCTimeoutGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyReadRPCTimeoutGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyReadRPCTimeoutGetOK creates a StorageProxyReadRPCTimeoutGetOK with default headers values +func NewStorageProxyReadRPCTimeoutGetOK() *StorageProxyReadRPCTimeoutGetOK { + return &StorageProxyReadRPCTimeoutGetOK{} +} + +/* +StorageProxyReadRPCTimeoutGetOK handles this case with default header values. + +Success +*/ +type StorageProxyReadRPCTimeoutGetOK struct { + Payload interface{} +} + +func (o *StorageProxyReadRPCTimeoutGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyReadRPCTimeoutGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyReadRPCTimeoutGetDefault creates a StorageProxyReadRPCTimeoutGetDefault with default headers values +func NewStorageProxyReadRPCTimeoutGetDefault(code int) *StorageProxyReadRPCTimeoutGetDefault { + return &StorageProxyReadRPCTimeoutGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyReadRPCTimeoutGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyReadRPCTimeoutGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy read Rpc timeout get default response +func (o *StorageProxyReadRPCTimeoutGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyReadRPCTimeoutGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyReadRPCTimeoutGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyReadRPCTimeoutGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_post_parameters.go new file mode 100644 index 00000000000..d48fbaf544f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyReadRPCTimeoutPostParams creates a new StorageProxyReadRPCTimeoutPostParams object +// with the default values initialized. +func NewStorageProxyReadRPCTimeoutPostParams() *StorageProxyReadRPCTimeoutPostParams { + var () + return &StorageProxyReadRPCTimeoutPostParams{ + + requestTimeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyReadRPCTimeoutPostParamsWithTimeout creates a new StorageProxyReadRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyReadRPCTimeoutPostParamsWithTimeout(timeout time.Duration) *StorageProxyReadRPCTimeoutPostParams { + var () + return &StorageProxyReadRPCTimeoutPostParams{ + + requestTimeout: timeout, + } +} + +// NewStorageProxyReadRPCTimeoutPostParamsWithContext creates a new StorageProxyReadRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyReadRPCTimeoutPostParamsWithContext(ctx context.Context) *StorageProxyReadRPCTimeoutPostParams { + var () + return &StorageProxyReadRPCTimeoutPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyReadRPCTimeoutPostParamsWithHTTPClient creates a new StorageProxyReadRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyReadRPCTimeoutPostParamsWithHTTPClient(client *http.Client) *StorageProxyReadRPCTimeoutPostParams { + var () + return &StorageProxyReadRPCTimeoutPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyReadRPCTimeoutPostParams contains all the parameters to send to the API endpoint +for the storage proxy read Rpc timeout post operation typically these are written to a http.Request +*/ +type StorageProxyReadRPCTimeoutPostParams struct { + + /*Timeout + The timeout in second + + */ + Timeout string + + requestTimeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithRequestTimeout adds the timeout to the storage proxy read Rpc timeout post params +func (o *StorageProxyReadRPCTimeoutPostParams) WithRequestTimeout(timeout time.Duration) *StorageProxyReadRPCTimeoutPostParams { + o.SetRequestTimeout(timeout) + return o +} + +// SetRequestTimeout adds the timeout to the storage proxy read Rpc timeout post params +func (o *StorageProxyReadRPCTimeoutPostParams) SetRequestTimeout(timeout time.Duration) { + o.requestTimeout = timeout +} + +// WithContext adds the context to the storage proxy read Rpc timeout post params +func (o *StorageProxyReadRPCTimeoutPostParams) WithContext(ctx context.Context) *StorageProxyReadRPCTimeoutPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy read Rpc timeout post params +func (o *StorageProxyReadRPCTimeoutPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy read Rpc timeout post params +func (o *StorageProxyReadRPCTimeoutPostParams) WithHTTPClient(client *http.Client) *StorageProxyReadRPCTimeoutPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy read Rpc timeout post params +func (o *StorageProxyReadRPCTimeoutPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTimeout adds the timeout to the storage proxy read Rpc timeout post params +func (o *StorageProxyReadRPCTimeoutPostParams) WithTimeout(timeout string) *StorageProxyReadRPCTimeoutPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy read Rpc timeout post params +func (o *StorageProxyReadRPCTimeoutPostParams) SetTimeout(timeout string) { + o.Timeout = timeout +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyReadRPCTimeoutPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.requestTimeout); err != nil { + return err + } + var res []error + + // query param timeout + qrTimeout := o.Timeout + qTimeout := qrTimeout + if qTimeout != "" { + if err := r.SetQueryParam("timeout", qTimeout); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_post_responses.go new file mode 100644 index 00000000000..31995bfc234 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_read_rpc_timeout_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyReadRPCTimeoutPostReader is a Reader for the StorageProxyReadRPCTimeoutPost structure. +type StorageProxyReadRPCTimeoutPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyReadRPCTimeoutPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyReadRPCTimeoutPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyReadRPCTimeoutPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyReadRPCTimeoutPostOK creates a StorageProxyReadRPCTimeoutPostOK with default headers values +func NewStorageProxyReadRPCTimeoutPostOK() *StorageProxyReadRPCTimeoutPostOK { + return &StorageProxyReadRPCTimeoutPostOK{} +} + +/* +StorageProxyReadRPCTimeoutPostOK handles this case with default header values. + +Success +*/ +type StorageProxyReadRPCTimeoutPostOK struct { +} + +func (o *StorageProxyReadRPCTimeoutPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyReadRPCTimeoutPostDefault creates a StorageProxyReadRPCTimeoutPostDefault with default headers values +func NewStorageProxyReadRPCTimeoutPostDefault(code int) *StorageProxyReadRPCTimeoutPostDefault { + return &StorageProxyReadRPCTimeoutPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyReadRPCTimeoutPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyReadRPCTimeoutPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy read Rpc timeout post default response +func (o *StorageProxyReadRPCTimeoutPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyReadRPCTimeoutPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyReadRPCTimeoutPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyReadRPCTimeoutPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_reload_trigger_classes_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_reload_trigger_classes_post_parameters.go new file mode 100644 index 00000000000..eb3a1ac5b92 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_reload_trigger_classes_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyReloadTriggerClassesPostParams creates a new StorageProxyReloadTriggerClassesPostParams object +// with the default values initialized. +func NewStorageProxyReloadTriggerClassesPostParams() *StorageProxyReloadTriggerClassesPostParams { + + return &StorageProxyReloadTriggerClassesPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyReloadTriggerClassesPostParamsWithTimeout creates a new StorageProxyReloadTriggerClassesPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyReloadTriggerClassesPostParamsWithTimeout(timeout time.Duration) *StorageProxyReloadTriggerClassesPostParams { + + return &StorageProxyReloadTriggerClassesPostParams{ + + timeout: timeout, + } +} + +// NewStorageProxyReloadTriggerClassesPostParamsWithContext creates a new StorageProxyReloadTriggerClassesPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyReloadTriggerClassesPostParamsWithContext(ctx context.Context) *StorageProxyReloadTriggerClassesPostParams { + + return &StorageProxyReloadTriggerClassesPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyReloadTriggerClassesPostParamsWithHTTPClient creates a new StorageProxyReloadTriggerClassesPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyReloadTriggerClassesPostParamsWithHTTPClient(client *http.Client) *StorageProxyReloadTriggerClassesPostParams { + + return &StorageProxyReloadTriggerClassesPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyReloadTriggerClassesPostParams contains all the parameters to send to the API endpoint +for the storage proxy reload trigger classes post operation typically these are written to a http.Request +*/ +type StorageProxyReloadTriggerClassesPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy reload trigger classes post params +func (o *StorageProxyReloadTriggerClassesPostParams) WithTimeout(timeout time.Duration) *StorageProxyReloadTriggerClassesPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy reload trigger classes post params +func (o *StorageProxyReloadTriggerClassesPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy reload trigger classes post params +func (o *StorageProxyReloadTriggerClassesPostParams) WithContext(ctx context.Context) *StorageProxyReloadTriggerClassesPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy reload trigger classes post params +func (o *StorageProxyReloadTriggerClassesPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy reload trigger classes post params +func (o *StorageProxyReloadTriggerClassesPostParams) WithHTTPClient(client *http.Client) *StorageProxyReloadTriggerClassesPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy reload trigger classes post params +func (o *StorageProxyReloadTriggerClassesPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyReloadTriggerClassesPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_reload_trigger_classes_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_reload_trigger_classes_post_responses.go new file mode 100644 index 00000000000..3b979ad9fa2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_reload_trigger_classes_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyReloadTriggerClassesPostReader is a Reader for the StorageProxyReloadTriggerClassesPost structure. +type StorageProxyReloadTriggerClassesPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyReloadTriggerClassesPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyReloadTriggerClassesPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyReloadTriggerClassesPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyReloadTriggerClassesPostOK creates a StorageProxyReloadTriggerClassesPostOK with default headers values +func NewStorageProxyReloadTriggerClassesPostOK() *StorageProxyReloadTriggerClassesPostOK { + return &StorageProxyReloadTriggerClassesPostOK{} +} + +/* +StorageProxyReloadTriggerClassesPostOK handles this case with default header values. + +Success +*/ +type StorageProxyReloadTriggerClassesPostOK struct { +} + +func (o *StorageProxyReloadTriggerClassesPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyReloadTriggerClassesPostDefault creates a StorageProxyReloadTriggerClassesPostDefault with default headers values +func NewStorageProxyReloadTriggerClassesPostDefault(code int) *StorageProxyReloadTriggerClassesPostDefault { + return &StorageProxyReloadTriggerClassesPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyReloadTriggerClassesPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyReloadTriggerClassesPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy reload trigger classes post default response +func (o *StorageProxyReloadTriggerClassesPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyReloadTriggerClassesPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyReloadTriggerClassesPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyReloadTriggerClassesPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_get_parameters.go new file mode 100644 index 00000000000..19762041168 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyRPCTimeoutGetParams creates a new StorageProxyRPCTimeoutGetParams object +// with the default values initialized. +func NewStorageProxyRPCTimeoutGetParams() *StorageProxyRPCTimeoutGetParams { + + return &StorageProxyRPCTimeoutGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyRPCTimeoutGetParamsWithTimeout creates a new StorageProxyRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyRPCTimeoutGetParamsWithTimeout(timeout time.Duration) *StorageProxyRPCTimeoutGetParams { + + return &StorageProxyRPCTimeoutGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyRPCTimeoutGetParamsWithContext creates a new StorageProxyRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyRPCTimeoutGetParamsWithContext(ctx context.Context) *StorageProxyRPCTimeoutGetParams { + + return &StorageProxyRPCTimeoutGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyRPCTimeoutGetParamsWithHTTPClient creates a new StorageProxyRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyRPCTimeoutGetParamsWithHTTPClient(client *http.Client) *StorageProxyRPCTimeoutGetParams { + + return &StorageProxyRPCTimeoutGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyRPCTimeoutGetParams contains all the parameters to send to the API endpoint +for the storage proxy Rpc timeout get operation typically these are written to a http.Request +*/ +type StorageProxyRPCTimeoutGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy Rpc timeout get params +func (o *StorageProxyRPCTimeoutGetParams) WithTimeout(timeout time.Duration) *StorageProxyRPCTimeoutGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy Rpc timeout get params +func (o *StorageProxyRPCTimeoutGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy Rpc timeout get params +func (o *StorageProxyRPCTimeoutGetParams) WithContext(ctx context.Context) *StorageProxyRPCTimeoutGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy Rpc timeout get params +func (o *StorageProxyRPCTimeoutGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy Rpc timeout get params +func (o *StorageProxyRPCTimeoutGetParams) WithHTTPClient(client *http.Client) *StorageProxyRPCTimeoutGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy Rpc timeout get params +func (o *StorageProxyRPCTimeoutGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyRPCTimeoutGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_get_responses.go new file mode 100644 index 00000000000..0c48c10af58 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyRPCTimeoutGetReader is a Reader for the StorageProxyRPCTimeoutGet structure. +type StorageProxyRPCTimeoutGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyRPCTimeoutGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyRPCTimeoutGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyRPCTimeoutGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyRPCTimeoutGetOK creates a StorageProxyRPCTimeoutGetOK with default headers values +func NewStorageProxyRPCTimeoutGetOK() *StorageProxyRPCTimeoutGetOK { + return &StorageProxyRPCTimeoutGetOK{} +} + +/* +StorageProxyRPCTimeoutGetOK handles this case with default header values. + +Success +*/ +type StorageProxyRPCTimeoutGetOK struct { + Payload interface{} +} + +func (o *StorageProxyRPCTimeoutGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyRPCTimeoutGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyRPCTimeoutGetDefault creates a StorageProxyRPCTimeoutGetDefault with default headers values +func NewStorageProxyRPCTimeoutGetDefault(code int) *StorageProxyRPCTimeoutGetDefault { + return &StorageProxyRPCTimeoutGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyRPCTimeoutGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyRPCTimeoutGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy Rpc timeout get default response +func (o *StorageProxyRPCTimeoutGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyRPCTimeoutGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyRPCTimeoutGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyRPCTimeoutGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_post_parameters.go new file mode 100644 index 00000000000..0f0556a7bae --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyRPCTimeoutPostParams creates a new StorageProxyRPCTimeoutPostParams object +// with the default values initialized. +func NewStorageProxyRPCTimeoutPostParams() *StorageProxyRPCTimeoutPostParams { + var () + return &StorageProxyRPCTimeoutPostParams{ + + requestTimeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyRPCTimeoutPostParamsWithTimeout creates a new StorageProxyRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyRPCTimeoutPostParamsWithTimeout(timeout time.Duration) *StorageProxyRPCTimeoutPostParams { + var () + return &StorageProxyRPCTimeoutPostParams{ + + requestTimeout: timeout, + } +} + +// NewStorageProxyRPCTimeoutPostParamsWithContext creates a new StorageProxyRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyRPCTimeoutPostParamsWithContext(ctx context.Context) *StorageProxyRPCTimeoutPostParams { + var () + return &StorageProxyRPCTimeoutPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyRPCTimeoutPostParamsWithHTTPClient creates a new StorageProxyRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyRPCTimeoutPostParamsWithHTTPClient(client *http.Client) *StorageProxyRPCTimeoutPostParams { + var () + return &StorageProxyRPCTimeoutPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyRPCTimeoutPostParams contains all the parameters to send to the API endpoint +for the storage proxy Rpc timeout post operation typically these are written to a http.Request +*/ +type StorageProxyRPCTimeoutPostParams struct { + + /*Timeout + Timeout in seconds + + */ + Timeout string + + requestTimeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithRequestTimeout adds the timeout to the storage proxy Rpc timeout post params +func (o *StorageProxyRPCTimeoutPostParams) WithRequestTimeout(timeout time.Duration) *StorageProxyRPCTimeoutPostParams { + o.SetRequestTimeout(timeout) + return o +} + +// SetRequestTimeout adds the timeout to the storage proxy Rpc timeout post params +func (o *StorageProxyRPCTimeoutPostParams) SetRequestTimeout(timeout time.Duration) { + o.requestTimeout = timeout +} + +// WithContext adds the context to the storage proxy Rpc timeout post params +func (o *StorageProxyRPCTimeoutPostParams) WithContext(ctx context.Context) *StorageProxyRPCTimeoutPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy Rpc timeout post params +func (o *StorageProxyRPCTimeoutPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy Rpc timeout post params +func (o *StorageProxyRPCTimeoutPostParams) WithHTTPClient(client *http.Client) *StorageProxyRPCTimeoutPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy Rpc timeout post params +func (o *StorageProxyRPCTimeoutPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTimeout adds the timeout to the storage proxy Rpc timeout post params +func (o *StorageProxyRPCTimeoutPostParams) WithTimeout(timeout string) *StorageProxyRPCTimeoutPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy Rpc timeout post params +func (o *StorageProxyRPCTimeoutPostParams) SetTimeout(timeout string) { + o.Timeout = timeout +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyRPCTimeoutPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.requestTimeout); err != nil { + return err + } + var res []error + + // query param timeout + qrTimeout := o.Timeout + qTimeout := qrTimeout + if qTimeout != "" { + if err := r.SetQueryParam("timeout", qTimeout); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_post_responses.go new file mode 100644 index 00000000000..9c3a6b7416f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_rpc_timeout_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyRPCTimeoutPostReader is a Reader for the StorageProxyRPCTimeoutPost structure. +type StorageProxyRPCTimeoutPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyRPCTimeoutPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyRPCTimeoutPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyRPCTimeoutPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyRPCTimeoutPostOK creates a StorageProxyRPCTimeoutPostOK with default headers values +func NewStorageProxyRPCTimeoutPostOK() *StorageProxyRPCTimeoutPostOK { + return &StorageProxyRPCTimeoutPostOK{} +} + +/* +StorageProxyRPCTimeoutPostOK handles this case with default header values. + +Success +*/ +type StorageProxyRPCTimeoutPostOK struct { +} + +func (o *StorageProxyRPCTimeoutPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyRPCTimeoutPostDefault creates a StorageProxyRPCTimeoutPostDefault with default headers values +func NewStorageProxyRPCTimeoutPostDefault(code int) *StorageProxyRPCTimeoutPostDefault { + return &StorageProxyRPCTimeoutPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyRPCTimeoutPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyRPCTimeoutPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy Rpc timeout post default response +func (o *StorageProxyRPCTimeoutPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyRPCTimeoutPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyRPCTimeoutPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyRPCTimeoutPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_schema_versions_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_schema_versions_get_parameters.go new file mode 100644 index 00000000000..b238c7adbfd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_schema_versions_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxySchemaVersionsGetParams creates a new StorageProxySchemaVersionsGetParams object +// with the default values initialized. +func NewStorageProxySchemaVersionsGetParams() *StorageProxySchemaVersionsGetParams { + + return &StorageProxySchemaVersionsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxySchemaVersionsGetParamsWithTimeout creates a new StorageProxySchemaVersionsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxySchemaVersionsGetParamsWithTimeout(timeout time.Duration) *StorageProxySchemaVersionsGetParams { + + return &StorageProxySchemaVersionsGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxySchemaVersionsGetParamsWithContext creates a new StorageProxySchemaVersionsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxySchemaVersionsGetParamsWithContext(ctx context.Context) *StorageProxySchemaVersionsGetParams { + + return &StorageProxySchemaVersionsGetParams{ + + Context: ctx, + } +} + +// NewStorageProxySchemaVersionsGetParamsWithHTTPClient creates a new StorageProxySchemaVersionsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxySchemaVersionsGetParamsWithHTTPClient(client *http.Client) *StorageProxySchemaVersionsGetParams { + + return &StorageProxySchemaVersionsGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxySchemaVersionsGetParams contains all the parameters to send to the API endpoint +for the storage proxy schema versions get operation typically these are written to a http.Request +*/ +type StorageProxySchemaVersionsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy schema versions get params +func (o *StorageProxySchemaVersionsGetParams) WithTimeout(timeout time.Duration) *StorageProxySchemaVersionsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy schema versions get params +func (o *StorageProxySchemaVersionsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy schema versions get params +func (o *StorageProxySchemaVersionsGetParams) WithContext(ctx context.Context) *StorageProxySchemaVersionsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy schema versions get params +func (o *StorageProxySchemaVersionsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy schema versions get params +func (o *StorageProxySchemaVersionsGetParams) WithHTTPClient(client *http.Client) *StorageProxySchemaVersionsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy schema versions get params +func (o *StorageProxySchemaVersionsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxySchemaVersionsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_schema_versions_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_schema_versions_get_responses.go new file mode 100644 index 00000000000..640f4d7d8d5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_schema_versions_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxySchemaVersionsGetReader is a Reader for the StorageProxySchemaVersionsGet structure. +type StorageProxySchemaVersionsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxySchemaVersionsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxySchemaVersionsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxySchemaVersionsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxySchemaVersionsGetOK creates a StorageProxySchemaVersionsGetOK with default headers values +func NewStorageProxySchemaVersionsGetOK() *StorageProxySchemaVersionsGetOK { + return &StorageProxySchemaVersionsGetOK{} +} + +/* +StorageProxySchemaVersionsGetOK handles this case with default header values. + +Success +*/ +type StorageProxySchemaVersionsGetOK struct { + Payload []*models.MapperList +} + +func (o *StorageProxySchemaVersionsGetOK) GetPayload() []*models.MapperList { + return o.Payload +} + +func (o *StorageProxySchemaVersionsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxySchemaVersionsGetDefault creates a StorageProxySchemaVersionsGetDefault with default headers values +func NewStorageProxySchemaVersionsGetDefault(code int) *StorageProxySchemaVersionsGetDefault { + return &StorageProxySchemaVersionsGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxySchemaVersionsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxySchemaVersionsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy schema versions get default response +func (o *StorageProxySchemaVersionsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxySchemaVersionsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxySchemaVersionsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxySchemaVersionsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_total_hints_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_total_hints_get_parameters.go new file mode 100644 index 00000000000..9a5ffc17de4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_total_hints_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyTotalHintsGetParams creates a new StorageProxyTotalHintsGetParams object +// with the default values initialized. +func NewStorageProxyTotalHintsGetParams() *StorageProxyTotalHintsGetParams { + + return &StorageProxyTotalHintsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyTotalHintsGetParamsWithTimeout creates a new StorageProxyTotalHintsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyTotalHintsGetParamsWithTimeout(timeout time.Duration) *StorageProxyTotalHintsGetParams { + + return &StorageProxyTotalHintsGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyTotalHintsGetParamsWithContext creates a new StorageProxyTotalHintsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyTotalHintsGetParamsWithContext(ctx context.Context) *StorageProxyTotalHintsGetParams { + + return &StorageProxyTotalHintsGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyTotalHintsGetParamsWithHTTPClient creates a new StorageProxyTotalHintsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyTotalHintsGetParamsWithHTTPClient(client *http.Client) *StorageProxyTotalHintsGetParams { + + return &StorageProxyTotalHintsGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyTotalHintsGetParams contains all the parameters to send to the API endpoint +for the storage proxy total hints get operation typically these are written to a http.Request +*/ +type StorageProxyTotalHintsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy total hints get params +func (o *StorageProxyTotalHintsGetParams) WithTimeout(timeout time.Duration) *StorageProxyTotalHintsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy total hints get params +func (o *StorageProxyTotalHintsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy total hints get params +func (o *StorageProxyTotalHintsGetParams) WithContext(ctx context.Context) *StorageProxyTotalHintsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy total hints get params +func (o *StorageProxyTotalHintsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy total hints get params +func (o *StorageProxyTotalHintsGetParams) WithHTTPClient(client *http.Client) *StorageProxyTotalHintsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy total hints get params +func (o *StorageProxyTotalHintsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyTotalHintsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_total_hints_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_total_hints_get_responses.go new file mode 100644 index 00000000000..364221ed5d9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_total_hints_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyTotalHintsGetReader is a Reader for the StorageProxyTotalHintsGet structure. +type StorageProxyTotalHintsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyTotalHintsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyTotalHintsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyTotalHintsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyTotalHintsGetOK creates a StorageProxyTotalHintsGetOK with default headers values +func NewStorageProxyTotalHintsGetOK() *StorageProxyTotalHintsGetOK { + return &StorageProxyTotalHintsGetOK{} +} + +/* +StorageProxyTotalHintsGetOK handles this case with default header values. + +Success +*/ +type StorageProxyTotalHintsGetOK struct { + Payload interface{} +} + +func (o *StorageProxyTotalHintsGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyTotalHintsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyTotalHintsGetDefault creates a StorageProxyTotalHintsGetDefault with default headers values +func NewStorageProxyTotalHintsGetDefault(code int) *StorageProxyTotalHintsGetDefault { + return &StorageProxyTotalHintsGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyTotalHintsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyTotalHintsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy total hints get default response +func (o *StorageProxyTotalHintsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyTotalHintsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyTotalHintsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyTotalHintsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_get_parameters.go new file mode 100644 index 00000000000..002feefc9d6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyTruncateRPCTimeoutGetParams creates a new StorageProxyTruncateRPCTimeoutGetParams object +// with the default values initialized. +func NewStorageProxyTruncateRPCTimeoutGetParams() *StorageProxyTruncateRPCTimeoutGetParams { + + return &StorageProxyTruncateRPCTimeoutGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyTruncateRPCTimeoutGetParamsWithTimeout creates a new StorageProxyTruncateRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyTruncateRPCTimeoutGetParamsWithTimeout(timeout time.Duration) *StorageProxyTruncateRPCTimeoutGetParams { + + return &StorageProxyTruncateRPCTimeoutGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyTruncateRPCTimeoutGetParamsWithContext creates a new StorageProxyTruncateRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyTruncateRPCTimeoutGetParamsWithContext(ctx context.Context) *StorageProxyTruncateRPCTimeoutGetParams { + + return &StorageProxyTruncateRPCTimeoutGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyTruncateRPCTimeoutGetParamsWithHTTPClient creates a new StorageProxyTruncateRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyTruncateRPCTimeoutGetParamsWithHTTPClient(client *http.Client) *StorageProxyTruncateRPCTimeoutGetParams { + + return &StorageProxyTruncateRPCTimeoutGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyTruncateRPCTimeoutGetParams contains all the parameters to send to the API endpoint +for the storage proxy truncate Rpc timeout get operation typically these are written to a http.Request +*/ +type StorageProxyTruncateRPCTimeoutGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy truncate Rpc timeout get params +func (o *StorageProxyTruncateRPCTimeoutGetParams) WithTimeout(timeout time.Duration) *StorageProxyTruncateRPCTimeoutGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy truncate Rpc timeout get params +func (o *StorageProxyTruncateRPCTimeoutGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy truncate Rpc timeout get params +func (o *StorageProxyTruncateRPCTimeoutGetParams) WithContext(ctx context.Context) *StorageProxyTruncateRPCTimeoutGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy truncate Rpc timeout get params +func (o *StorageProxyTruncateRPCTimeoutGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy truncate Rpc timeout get params +func (o *StorageProxyTruncateRPCTimeoutGetParams) WithHTTPClient(client *http.Client) *StorageProxyTruncateRPCTimeoutGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy truncate Rpc timeout get params +func (o *StorageProxyTruncateRPCTimeoutGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyTruncateRPCTimeoutGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_get_responses.go new file mode 100644 index 00000000000..4a039c99782 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyTruncateRPCTimeoutGetReader is a Reader for the StorageProxyTruncateRPCTimeoutGet structure. +type StorageProxyTruncateRPCTimeoutGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyTruncateRPCTimeoutGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyTruncateRPCTimeoutGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyTruncateRPCTimeoutGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyTruncateRPCTimeoutGetOK creates a StorageProxyTruncateRPCTimeoutGetOK with default headers values +func NewStorageProxyTruncateRPCTimeoutGetOK() *StorageProxyTruncateRPCTimeoutGetOK { + return &StorageProxyTruncateRPCTimeoutGetOK{} +} + +/* +StorageProxyTruncateRPCTimeoutGetOK handles this case with default header values. + +Success +*/ +type StorageProxyTruncateRPCTimeoutGetOK struct { + Payload interface{} +} + +func (o *StorageProxyTruncateRPCTimeoutGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyTruncateRPCTimeoutGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyTruncateRPCTimeoutGetDefault creates a StorageProxyTruncateRPCTimeoutGetDefault with default headers values +func NewStorageProxyTruncateRPCTimeoutGetDefault(code int) *StorageProxyTruncateRPCTimeoutGetDefault { + return &StorageProxyTruncateRPCTimeoutGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyTruncateRPCTimeoutGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyTruncateRPCTimeoutGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy truncate Rpc timeout get default response +func (o *StorageProxyTruncateRPCTimeoutGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyTruncateRPCTimeoutGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyTruncateRPCTimeoutGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyTruncateRPCTimeoutGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_post_parameters.go new file mode 100644 index 00000000000..87171067f85 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyTruncateRPCTimeoutPostParams creates a new StorageProxyTruncateRPCTimeoutPostParams object +// with the default values initialized. +func NewStorageProxyTruncateRPCTimeoutPostParams() *StorageProxyTruncateRPCTimeoutPostParams { + var () + return &StorageProxyTruncateRPCTimeoutPostParams{ + + requestTimeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyTruncateRPCTimeoutPostParamsWithTimeout creates a new StorageProxyTruncateRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyTruncateRPCTimeoutPostParamsWithTimeout(timeout time.Duration) *StorageProxyTruncateRPCTimeoutPostParams { + var () + return &StorageProxyTruncateRPCTimeoutPostParams{ + + requestTimeout: timeout, + } +} + +// NewStorageProxyTruncateRPCTimeoutPostParamsWithContext creates a new StorageProxyTruncateRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyTruncateRPCTimeoutPostParamsWithContext(ctx context.Context) *StorageProxyTruncateRPCTimeoutPostParams { + var () + return &StorageProxyTruncateRPCTimeoutPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyTruncateRPCTimeoutPostParamsWithHTTPClient creates a new StorageProxyTruncateRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyTruncateRPCTimeoutPostParamsWithHTTPClient(client *http.Client) *StorageProxyTruncateRPCTimeoutPostParams { + var () + return &StorageProxyTruncateRPCTimeoutPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyTruncateRPCTimeoutPostParams contains all the parameters to send to the API endpoint +for the storage proxy truncate Rpc timeout post operation typically these are written to a http.Request +*/ +type StorageProxyTruncateRPCTimeoutPostParams struct { + + /*Timeout + timeout in second + + */ + Timeout string + + requestTimeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithRequestTimeout adds the timeout to the storage proxy truncate Rpc timeout post params +func (o *StorageProxyTruncateRPCTimeoutPostParams) WithRequestTimeout(timeout time.Duration) *StorageProxyTruncateRPCTimeoutPostParams { + o.SetRequestTimeout(timeout) + return o +} + +// SetRequestTimeout adds the timeout to the storage proxy truncate Rpc timeout post params +func (o *StorageProxyTruncateRPCTimeoutPostParams) SetRequestTimeout(timeout time.Duration) { + o.requestTimeout = timeout +} + +// WithContext adds the context to the storage proxy truncate Rpc timeout post params +func (o *StorageProxyTruncateRPCTimeoutPostParams) WithContext(ctx context.Context) *StorageProxyTruncateRPCTimeoutPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy truncate Rpc timeout post params +func (o *StorageProxyTruncateRPCTimeoutPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy truncate Rpc timeout post params +func (o *StorageProxyTruncateRPCTimeoutPostParams) WithHTTPClient(client *http.Client) *StorageProxyTruncateRPCTimeoutPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy truncate Rpc timeout post params +func (o *StorageProxyTruncateRPCTimeoutPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTimeout adds the timeout to the storage proxy truncate Rpc timeout post params +func (o *StorageProxyTruncateRPCTimeoutPostParams) WithTimeout(timeout string) *StorageProxyTruncateRPCTimeoutPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy truncate Rpc timeout post params +func (o *StorageProxyTruncateRPCTimeoutPostParams) SetTimeout(timeout string) { + o.Timeout = timeout +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyTruncateRPCTimeoutPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.requestTimeout); err != nil { + return err + } + var res []error + + // query param timeout + qrTimeout := o.Timeout + qTimeout := qrTimeout + if qTimeout != "" { + if err := r.SetQueryParam("timeout", qTimeout); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_post_responses.go new file mode 100644 index 00000000000..484abfa0bff --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_truncate_rpc_timeout_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyTruncateRPCTimeoutPostReader is a Reader for the StorageProxyTruncateRPCTimeoutPost structure. +type StorageProxyTruncateRPCTimeoutPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyTruncateRPCTimeoutPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyTruncateRPCTimeoutPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyTruncateRPCTimeoutPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyTruncateRPCTimeoutPostOK creates a StorageProxyTruncateRPCTimeoutPostOK with default headers values +func NewStorageProxyTruncateRPCTimeoutPostOK() *StorageProxyTruncateRPCTimeoutPostOK { + return &StorageProxyTruncateRPCTimeoutPostOK{} +} + +/* +StorageProxyTruncateRPCTimeoutPostOK handles this case with default header values. + +Success +*/ +type StorageProxyTruncateRPCTimeoutPostOK struct { +} + +func (o *StorageProxyTruncateRPCTimeoutPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyTruncateRPCTimeoutPostDefault creates a StorageProxyTruncateRPCTimeoutPostDefault with default headers values +func NewStorageProxyTruncateRPCTimeoutPostDefault(code int) *StorageProxyTruncateRPCTimeoutPostDefault { + return &StorageProxyTruncateRPCTimeoutPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyTruncateRPCTimeoutPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyTruncateRPCTimeoutPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy truncate Rpc timeout post default response +func (o *StorageProxyTruncateRPCTimeoutPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyTruncateRPCTimeoutPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyTruncateRPCTimeoutPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyTruncateRPCTimeoutPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_get_parameters.go new file mode 100644 index 00000000000..9a8fa5c2dd3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyWriteRPCTimeoutGetParams creates a new StorageProxyWriteRPCTimeoutGetParams object +// with the default values initialized. +func NewStorageProxyWriteRPCTimeoutGetParams() *StorageProxyWriteRPCTimeoutGetParams { + + return &StorageProxyWriteRPCTimeoutGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyWriteRPCTimeoutGetParamsWithTimeout creates a new StorageProxyWriteRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyWriteRPCTimeoutGetParamsWithTimeout(timeout time.Duration) *StorageProxyWriteRPCTimeoutGetParams { + + return &StorageProxyWriteRPCTimeoutGetParams{ + + timeout: timeout, + } +} + +// NewStorageProxyWriteRPCTimeoutGetParamsWithContext creates a new StorageProxyWriteRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyWriteRPCTimeoutGetParamsWithContext(ctx context.Context) *StorageProxyWriteRPCTimeoutGetParams { + + return &StorageProxyWriteRPCTimeoutGetParams{ + + Context: ctx, + } +} + +// NewStorageProxyWriteRPCTimeoutGetParamsWithHTTPClient creates a new StorageProxyWriteRPCTimeoutGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyWriteRPCTimeoutGetParamsWithHTTPClient(client *http.Client) *StorageProxyWriteRPCTimeoutGetParams { + + return &StorageProxyWriteRPCTimeoutGetParams{ + HTTPClient: client, + } +} + +/* +StorageProxyWriteRPCTimeoutGetParams contains all the parameters to send to the API endpoint +for the storage proxy write Rpc timeout get operation typically these are written to a http.Request +*/ +type StorageProxyWriteRPCTimeoutGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage proxy write Rpc timeout get params +func (o *StorageProxyWriteRPCTimeoutGetParams) WithTimeout(timeout time.Duration) *StorageProxyWriteRPCTimeoutGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy write Rpc timeout get params +func (o *StorageProxyWriteRPCTimeoutGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage proxy write Rpc timeout get params +func (o *StorageProxyWriteRPCTimeoutGetParams) WithContext(ctx context.Context) *StorageProxyWriteRPCTimeoutGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy write Rpc timeout get params +func (o *StorageProxyWriteRPCTimeoutGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy write Rpc timeout get params +func (o *StorageProxyWriteRPCTimeoutGetParams) WithHTTPClient(client *http.Client) *StorageProxyWriteRPCTimeoutGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy write Rpc timeout get params +func (o *StorageProxyWriteRPCTimeoutGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyWriteRPCTimeoutGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_get_responses.go new file mode 100644 index 00000000000..b70bb9b1b86 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyWriteRPCTimeoutGetReader is a Reader for the StorageProxyWriteRPCTimeoutGet structure. +type StorageProxyWriteRPCTimeoutGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyWriteRPCTimeoutGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyWriteRPCTimeoutGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyWriteRPCTimeoutGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyWriteRPCTimeoutGetOK creates a StorageProxyWriteRPCTimeoutGetOK with default headers values +func NewStorageProxyWriteRPCTimeoutGetOK() *StorageProxyWriteRPCTimeoutGetOK { + return &StorageProxyWriteRPCTimeoutGetOK{} +} + +/* +StorageProxyWriteRPCTimeoutGetOK handles this case with default header values. + +Success +*/ +type StorageProxyWriteRPCTimeoutGetOK struct { + Payload interface{} +} + +func (o *StorageProxyWriteRPCTimeoutGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageProxyWriteRPCTimeoutGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageProxyWriteRPCTimeoutGetDefault creates a StorageProxyWriteRPCTimeoutGetDefault with default headers values +func NewStorageProxyWriteRPCTimeoutGetDefault(code int) *StorageProxyWriteRPCTimeoutGetDefault { + return &StorageProxyWriteRPCTimeoutGetDefault{ + _statusCode: code, + } +} + +/* +StorageProxyWriteRPCTimeoutGetDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyWriteRPCTimeoutGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy write Rpc timeout get default response +func (o *StorageProxyWriteRPCTimeoutGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyWriteRPCTimeoutGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyWriteRPCTimeoutGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyWriteRPCTimeoutGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_post_parameters.go new file mode 100644 index 00000000000..444c4355c65 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageProxyWriteRPCTimeoutPostParams creates a new StorageProxyWriteRPCTimeoutPostParams object +// with the default values initialized. +func NewStorageProxyWriteRPCTimeoutPostParams() *StorageProxyWriteRPCTimeoutPostParams { + var () + return &StorageProxyWriteRPCTimeoutPostParams{ + + requestTimeout: cr.DefaultTimeout, + } +} + +// NewStorageProxyWriteRPCTimeoutPostParamsWithTimeout creates a new StorageProxyWriteRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageProxyWriteRPCTimeoutPostParamsWithTimeout(timeout time.Duration) *StorageProxyWriteRPCTimeoutPostParams { + var () + return &StorageProxyWriteRPCTimeoutPostParams{ + + requestTimeout: timeout, + } +} + +// NewStorageProxyWriteRPCTimeoutPostParamsWithContext creates a new StorageProxyWriteRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageProxyWriteRPCTimeoutPostParamsWithContext(ctx context.Context) *StorageProxyWriteRPCTimeoutPostParams { + var () + return &StorageProxyWriteRPCTimeoutPostParams{ + + Context: ctx, + } +} + +// NewStorageProxyWriteRPCTimeoutPostParamsWithHTTPClient creates a new StorageProxyWriteRPCTimeoutPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageProxyWriteRPCTimeoutPostParamsWithHTTPClient(client *http.Client) *StorageProxyWriteRPCTimeoutPostParams { + var () + return &StorageProxyWriteRPCTimeoutPostParams{ + HTTPClient: client, + } +} + +/* +StorageProxyWriteRPCTimeoutPostParams contains all the parameters to send to the API endpoint +for the storage proxy write Rpc timeout post operation typically these are written to a http.Request +*/ +type StorageProxyWriteRPCTimeoutPostParams struct { + + /*Timeout + timeout in seconds + + */ + Timeout string + + requestTimeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithRequestTimeout adds the timeout to the storage proxy write Rpc timeout post params +func (o *StorageProxyWriteRPCTimeoutPostParams) WithRequestTimeout(timeout time.Duration) *StorageProxyWriteRPCTimeoutPostParams { + o.SetRequestTimeout(timeout) + return o +} + +// SetRequestTimeout adds the timeout to the storage proxy write Rpc timeout post params +func (o *StorageProxyWriteRPCTimeoutPostParams) SetRequestTimeout(timeout time.Duration) { + o.requestTimeout = timeout +} + +// WithContext adds the context to the storage proxy write Rpc timeout post params +func (o *StorageProxyWriteRPCTimeoutPostParams) WithContext(ctx context.Context) *StorageProxyWriteRPCTimeoutPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage proxy write Rpc timeout post params +func (o *StorageProxyWriteRPCTimeoutPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage proxy write Rpc timeout post params +func (o *StorageProxyWriteRPCTimeoutPostParams) WithHTTPClient(client *http.Client) *StorageProxyWriteRPCTimeoutPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage proxy write Rpc timeout post params +func (o *StorageProxyWriteRPCTimeoutPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTimeout adds the timeout to the storage proxy write Rpc timeout post params +func (o *StorageProxyWriteRPCTimeoutPostParams) WithTimeout(timeout string) *StorageProxyWriteRPCTimeoutPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage proxy write Rpc timeout post params +func (o *StorageProxyWriteRPCTimeoutPostParams) SetTimeout(timeout string) { + o.Timeout = timeout +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageProxyWriteRPCTimeoutPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.requestTimeout); err != nil { + return err + } + var res []error + + // query param timeout + qrTimeout := o.Timeout + qTimeout := qrTimeout + if qTimeout != "" { + if err := r.SetQueryParam("timeout", qTimeout); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_post_responses.go new file mode 100644 index 00000000000..aaacd551a38 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_proxy_write_rpc_timeout_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageProxyWriteRPCTimeoutPostReader is a Reader for the StorageProxyWriteRPCTimeoutPost structure. +type StorageProxyWriteRPCTimeoutPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageProxyWriteRPCTimeoutPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageProxyWriteRPCTimeoutPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageProxyWriteRPCTimeoutPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageProxyWriteRPCTimeoutPostOK creates a StorageProxyWriteRPCTimeoutPostOK with default headers values +func NewStorageProxyWriteRPCTimeoutPostOK() *StorageProxyWriteRPCTimeoutPostOK { + return &StorageProxyWriteRPCTimeoutPostOK{} +} + +/* +StorageProxyWriteRPCTimeoutPostOK handles this case with default header values. + +Success +*/ +type StorageProxyWriteRPCTimeoutPostOK struct { +} + +func (o *StorageProxyWriteRPCTimeoutPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageProxyWriteRPCTimeoutPostDefault creates a StorageProxyWriteRPCTimeoutPostDefault with default headers values +func NewStorageProxyWriteRPCTimeoutPostDefault(code int) *StorageProxyWriteRPCTimeoutPostDefault { + return &StorageProxyWriteRPCTimeoutPostDefault{ + _statusCode: code, + } +} + +/* +StorageProxyWriteRPCTimeoutPostDefault handles this case with default header values. + +internal server error +*/ +type StorageProxyWriteRPCTimeoutPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage proxy write Rpc timeout post default response +func (o *StorageProxyWriteRPCTimeoutPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageProxyWriteRPCTimeoutPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageProxyWriteRPCTimeoutPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageProxyWriteRPCTimeoutPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_active_repair_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_active_repair_get_parameters.go new file mode 100644 index 00000000000..d90dddca9b6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_active_repair_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceActiveRepairGetParams creates a new StorageServiceActiveRepairGetParams object +// with the default values initialized. +func NewStorageServiceActiveRepairGetParams() *StorageServiceActiveRepairGetParams { + + return &StorageServiceActiveRepairGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceActiveRepairGetParamsWithTimeout creates a new StorageServiceActiveRepairGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceActiveRepairGetParamsWithTimeout(timeout time.Duration) *StorageServiceActiveRepairGetParams { + + return &StorageServiceActiveRepairGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceActiveRepairGetParamsWithContext creates a new StorageServiceActiveRepairGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceActiveRepairGetParamsWithContext(ctx context.Context) *StorageServiceActiveRepairGetParams { + + return &StorageServiceActiveRepairGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceActiveRepairGetParamsWithHTTPClient creates a new StorageServiceActiveRepairGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceActiveRepairGetParamsWithHTTPClient(client *http.Client) *StorageServiceActiveRepairGetParams { + + return &StorageServiceActiveRepairGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceActiveRepairGetParams contains all the parameters to send to the API endpoint +for the storage service active repair get operation typically these are written to a http.Request +*/ +type StorageServiceActiveRepairGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service active repair get params +func (o *StorageServiceActiveRepairGetParams) WithTimeout(timeout time.Duration) *StorageServiceActiveRepairGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service active repair get params +func (o *StorageServiceActiveRepairGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service active repair get params +func (o *StorageServiceActiveRepairGetParams) WithContext(ctx context.Context) *StorageServiceActiveRepairGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service active repair get params +func (o *StorageServiceActiveRepairGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service active repair get params +func (o *StorageServiceActiveRepairGetParams) WithHTTPClient(client *http.Client) *StorageServiceActiveRepairGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service active repair get params +func (o *StorageServiceActiveRepairGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceActiveRepairGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_active_repair_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_active_repair_get_responses.go new file mode 100644 index 00000000000..200a7a06da9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_active_repair_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceActiveRepairGetReader is a Reader for the StorageServiceActiveRepairGet structure. +type StorageServiceActiveRepairGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceActiveRepairGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceActiveRepairGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceActiveRepairGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceActiveRepairGetOK creates a StorageServiceActiveRepairGetOK with default headers values +func NewStorageServiceActiveRepairGetOK() *StorageServiceActiveRepairGetOK { + return &StorageServiceActiveRepairGetOK{} +} + +/* +StorageServiceActiveRepairGetOK handles this case with default header values. + +Success +*/ +type StorageServiceActiveRepairGetOK struct { + Payload []int32 +} + +func (o *StorageServiceActiveRepairGetOK) GetPayload() []int32 { + return o.Payload +} + +func (o *StorageServiceActiveRepairGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceActiveRepairGetDefault creates a StorageServiceActiveRepairGetDefault with default headers values +func NewStorageServiceActiveRepairGetDefault(code int) *StorageServiceActiveRepairGetDefault { + return &StorageServiceActiveRepairGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceActiveRepairGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceActiveRepairGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service active repair get default response +func (o *StorageServiceActiveRepairGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceActiveRepairGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceActiveRepairGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceActiveRepairGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_delete_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_delete_parameters.go new file mode 100644 index 00000000000..55c477ad410 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_delete_parameters.go @@ -0,0 +1,168 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceAutoCompactionByKeyspaceDeleteParams creates a new StorageServiceAutoCompactionByKeyspaceDeleteParams object +// with the default values initialized. +func NewStorageServiceAutoCompactionByKeyspaceDeleteParams() *StorageServiceAutoCompactionByKeyspaceDeleteParams { + var () + return &StorageServiceAutoCompactionByKeyspaceDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceAutoCompactionByKeyspaceDeleteParamsWithTimeout creates a new StorageServiceAutoCompactionByKeyspaceDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceAutoCompactionByKeyspaceDeleteParamsWithTimeout(timeout time.Duration) *StorageServiceAutoCompactionByKeyspaceDeleteParams { + var () + return &StorageServiceAutoCompactionByKeyspaceDeleteParams{ + + timeout: timeout, + } +} + +// NewStorageServiceAutoCompactionByKeyspaceDeleteParamsWithContext creates a new StorageServiceAutoCompactionByKeyspaceDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceAutoCompactionByKeyspaceDeleteParamsWithContext(ctx context.Context) *StorageServiceAutoCompactionByKeyspaceDeleteParams { + var () + return &StorageServiceAutoCompactionByKeyspaceDeleteParams{ + + Context: ctx, + } +} + +// NewStorageServiceAutoCompactionByKeyspaceDeleteParamsWithHTTPClient creates a new StorageServiceAutoCompactionByKeyspaceDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceAutoCompactionByKeyspaceDeleteParamsWithHTTPClient(client *http.Client) *StorageServiceAutoCompactionByKeyspaceDeleteParams { + var () + return &StorageServiceAutoCompactionByKeyspaceDeleteParams{ + HTTPClient: client, + } +} + +/* +StorageServiceAutoCompactionByKeyspaceDeleteParams contains all the parameters to send to the API endpoint +for the storage service auto compaction by keyspace delete operation typically these are written to a http.Request +*/ +type StorageServiceAutoCompactionByKeyspaceDeleteParams struct { + + /*Cf + Comma seperated column family names + + */ + Cf *string + /*Keyspace + The keyspace + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service auto compaction by keyspace delete params +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) WithTimeout(timeout time.Duration) *StorageServiceAutoCompactionByKeyspaceDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service auto compaction by keyspace delete params +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service auto compaction by keyspace delete params +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) WithContext(ctx context.Context) *StorageServiceAutoCompactionByKeyspaceDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service auto compaction by keyspace delete params +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service auto compaction by keyspace delete params +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) WithHTTPClient(client *http.Client) *StorageServiceAutoCompactionByKeyspaceDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service auto compaction by keyspace delete params +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service auto compaction by keyspace delete params +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) WithCf(cf *string) *StorageServiceAutoCompactionByKeyspaceDeleteParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service auto compaction by keyspace delete params +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) SetCf(cf *string) { + o.Cf = cf +} + +// WithKeyspace adds the keyspace to the storage service auto compaction by keyspace delete params +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) WithKeyspace(keyspace string) *StorageServiceAutoCompactionByKeyspaceDeleteParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service auto compaction by keyspace delete params +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceAutoCompactionByKeyspaceDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Cf != nil { + + // query param cf + var qrCf string + if o.Cf != nil { + qrCf = *o.Cf + } + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_delete_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_delete_responses.go new file mode 100644 index 00000000000..28ac8590b5b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_delete_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceAutoCompactionByKeyspaceDeleteReader is a Reader for the StorageServiceAutoCompactionByKeyspaceDelete structure. +type StorageServiceAutoCompactionByKeyspaceDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceAutoCompactionByKeyspaceDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceAutoCompactionByKeyspaceDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceAutoCompactionByKeyspaceDeleteDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceAutoCompactionByKeyspaceDeleteOK creates a StorageServiceAutoCompactionByKeyspaceDeleteOK with default headers values +func NewStorageServiceAutoCompactionByKeyspaceDeleteOK() *StorageServiceAutoCompactionByKeyspaceDeleteOK { + return &StorageServiceAutoCompactionByKeyspaceDeleteOK{} +} + +/* +StorageServiceAutoCompactionByKeyspaceDeleteOK handles this case with default header values. + +Success +*/ +type StorageServiceAutoCompactionByKeyspaceDeleteOK struct { +} + +func (o *StorageServiceAutoCompactionByKeyspaceDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceAutoCompactionByKeyspaceDeleteDefault creates a StorageServiceAutoCompactionByKeyspaceDeleteDefault with default headers values +func NewStorageServiceAutoCompactionByKeyspaceDeleteDefault(code int) *StorageServiceAutoCompactionByKeyspaceDeleteDefault { + return &StorageServiceAutoCompactionByKeyspaceDeleteDefault{ + _statusCode: code, + } +} + +/* +StorageServiceAutoCompactionByKeyspaceDeleteDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceAutoCompactionByKeyspaceDeleteDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service auto compaction by keyspace delete default response +func (o *StorageServiceAutoCompactionByKeyspaceDeleteDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceAutoCompactionByKeyspaceDeleteDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceAutoCompactionByKeyspaceDeleteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceAutoCompactionByKeyspaceDeleteDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_post_parameters.go new file mode 100644 index 00000000000..297316e1071 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_post_parameters.go @@ -0,0 +1,168 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceAutoCompactionByKeyspacePostParams creates a new StorageServiceAutoCompactionByKeyspacePostParams object +// with the default values initialized. +func NewStorageServiceAutoCompactionByKeyspacePostParams() *StorageServiceAutoCompactionByKeyspacePostParams { + var () + return &StorageServiceAutoCompactionByKeyspacePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceAutoCompactionByKeyspacePostParamsWithTimeout creates a new StorageServiceAutoCompactionByKeyspacePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceAutoCompactionByKeyspacePostParamsWithTimeout(timeout time.Duration) *StorageServiceAutoCompactionByKeyspacePostParams { + var () + return &StorageServiceAutoCompactionByKeyspacePostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceAutoCompactionByKeyspacePostParamsWithContext creates a new StorageServiceAutoCompactionByKeyspacePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceAutoCompactionByKeyspacePostParamsWithContext(ctx context.Context) *StorageServiceAutoCompactionByKeyspacePostParams { + var () + return &StorageServiceAutoCompactionByKeyspacePostParams{ + + Context: ctx, + } +} + +// NewStorageServiceAutoCompactionByKeyspacePostParamsWithHTTPClient creates a new StorageServiceAutoCompactionByKeyspacePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceAutoCompactionByKeyspacePostParamsWithHTTPClient(client *http.Client) *StorageServiceAutoCompactionByKeyspacePostParams { + var () + return &StorageServiceAutoCompactionByKeyspacePostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceAutoCompactionByKeyspacePostParams contains all the parameters to send to the API endpoint +for the storage service auto compaction by keyspace post operation typically these are written to a http.Request +*/ +type StorageServiceAutoCompactionByKeyspacePostParams struct { + + /*Cf + Comma seperated column family names + + */ + Cf *string + /*Keyspace + The keyspace + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service auto compaction by keyspace post params +func (o *StorageServiceAutoCompactionByKeyspacePostParams) WithTimeout(timeout time.Duration) *StorageServiceAutoCompactionByKeyspacePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service auto compaction by keyspace post params +func (o *StorageServiceAutoCompactionByKeyspacePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service auto compaction by keyspace post params +func (o *StorageServiceAutoCompactionByKeyspacePostParams) WithContext(ctx context.Context) *StorageServiceAutoCompactionByKeyspacePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service auto compaction by keyspace post params +func (o *StorageServiceAutoCompactionByKeyspacePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service auto compaction by keyspace post params +func (o *StorageServiceAutoCompactionByKeyspacePostParams) WithHTTPClient(client *http.Client) *StorageServiceAutoCompactionByKeyspacePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service auto compaction by keyspace post params +func (o *StorageServiceAutoCompactionByKeyspacePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service auto compaction by keyspace post params +func (o *StorageServiceAutoCompactionByKeyspacePostParams) WithCf(cf *string) *StorageServiceAutoCompactionByKeyspacePostParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service auto compaction by keyspace post params +func (o *StorageServiceAutoCompactionByKeyspacePostParams) SetCf(cf *string) { + o.Cf = cf +} + +// WithKeyspace adds the keyspace to the storage service auto compaction by keyspace post params +func (o *StorageServiceAutoCompactionByKeyspacePostParams) WithKeyspace(keyspace string) *StorageServiceAutoCompactionByKeyspacePostParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service auto compaction by keyspace post params +func (o *StorageServiceAutoCompactionByKeyspacePostParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceAutoCompactionByKeyspacePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Cf != nil { + + // query param cf + var qrCf string + if o.Cf != nil { + qrCf = *o.Cf + } + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_post_responses.go new file mode 100644 index 00000000000..1b39fb64f74 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_auto_compaction_by_keyspace_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceAutoCompactionByKeyspacePostReader is a Reader for the StorageServiceAutoCompactionByKeyspacePost structure. +type StorageServiceAutoCompactionByKeyspacePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceAutoCompactionByKeyspacePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceAutoCompactionByKeyspacePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceAutoCompactionByKeyspacePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceAutoCompactionByKeyspacePostOK creates a StorageServiceAutoCompactionByKeyspacePostOK with default headers values +func NewStorageServiceAutoCompactionByKeyspacePostOK() *StorageServiceAutoCompactionByKeyspacePostOK { + return &StorageServiceAutoCompactionByKeyspacePostOK{} +} + +/* +StorageServiceAutoCompactionByKeyspacePostOK handles this case with default header values. + +Success +*/ +type StorageServiceAutoCompactionByKeyspacePostOK struct { +} + +func (o *StorageServiceAutoCompactionByKeyspacePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceAutoCompactionByKeyspacePostDefault creates a StorageServiceAutoCompactionByKeyspacePostDefault with default headers values +func NewStorageServiceAutoCompactionByKeyspacePostDefault(code int) *StorageServiceAutoCompactionByKeyspacePostDefault { + return &StorageServiceAutoCompactionByKeyspacePostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceAutoCompactionByKeyspacePostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceAutoCompactionByKeyspacePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service auto compaction by keyspace post default response +func (o *StorageServiceAutoCompactionByKeyspacePostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceAutoCompactionByKeyspacePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceAutoCompactionByKeyspacePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceAutoCompactionByKeyspacePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_get_parameters.go new file mode 100644 index 00000000000..c8055645e98 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceBatchSizeFailureThresholdGetParams creates a new StorageServiceBatchSizeFailureThresholdGetParams object +// with the default values initialized. +func NewStorageServiceBatchSizeFailureThresholdGetParams() *StorageServiceBatchSizeFailureThresholdGetParams { + + return &StorageServiceBatchSizeFailureThresholdGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceBatchSizeFailureThresholdGetParamsWithTimeout creates a new StorageServiceBatchSizeFailureThresholdGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceBatchSizeFailureThresholdGetParamsWithTimeout(timeout time.Duration) *StorageServiceBatchSizeFailureThresholdGetParams { + + return &StorageServiceBatchSizeFailureThresholdGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceBatchSizeFailureThresholdGetParamsWithContext creates a new StorageServiceBatchSizeFailureThresholdGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceBatchSizeFailureThresholdGetParamsWithContext(ctx context.Context) *StorageServiceBatchSizeFailureThresholdGetParams { + + return &StorageServiceBatchSizeFailureThresholdGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceBatchSizeFailureThresholdGetParamsWithHTTPClient creates a new StorageServiceBatchSizeFailureThresholdGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceBatchSizeFailureThresholdGetParamsWithHTTPClient(client *http.Client) *StorageServiceBatchSizeFailureThresholdGetParams { + + return &StorageServiceBatchSizeFailureThresholdGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceBatchSizeFailureThresholdGetParams contains all the parameters to send to the API endpoint +for the storage service batch size failure threshold get operation typically these are written to a http.Request +*/ +type StorageServiceBatchSizeFailureThresholdGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service batch size failure threshold get params +func (o *StorageServiceBatchSizeFailureThresholdGetParams) WithTimeout(timeout time.Duration) *StorageServiceBatchSizeFailureThresholdGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service batch size failure threshold get params +func (o *StorageServiceBatchSizeFailureThresholdGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service batch size failure threshold get params +func (o *StorageServiceBatchSizeFailureThresholdGetParams) WithContext(ctx context.Context) *StorageServiceBatchSizeFailureThresholdGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service batch size failure threshold get params +func (o *StorageServiceBatchSizeFailureThresholdGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service batch size failure threshold get params +func (o *StorageServiceBatchSizeFailureThresholdGetParams) WithHTTPClient(client *http.Client) *StorageServiceBatchSizeFailureThresholdGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service batch size failure threshold get params +func (o *StorageServiceBatchSizeFailureThresholdGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceBatchSizeFailureThresholdGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_get_responses.go new file mode 100644 index 00000000000..5c3595be0e6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceBatchSizeFailureThresholdGetReader is a Reader for the StorageServiceBatchSizeFailureThresholdGet structure. +type StorageServiceBatchSizeFailureThresholdGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceBatchSizeFailureThresholdGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceBatchSizeFailureThresholdGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceBatchSizeFailureThresholdGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceBatchSizeFailureThresholdGetOK creates a StorageServiceBatchSizeFailureThresholdGetOK with default headers values +func NewStorageServiceBatchSizeFailureThresholdGetOK() *StorageServiceBatchSizeFailureThresholdGetOK { + return &StorageServiceBatchSizeFailureThresholdGetOK{} +} + +/* +StorageServiceBatchSizeFailureThresholdGetOK handles this case with default header values. + +Success +*/ +type StorageServiceBatchSizeFailureThresholdGetOK struct { + Payload int32 +} + +func (o *StorageServiceBatchSizeFailureThresholdGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceBatchSizeFailureThresholdGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceBatchSizeFailureThresholdGetDefault creates a StorageServiceBatchSizeFailureThresholdGetDefault with default headers values +func NewStorageServiceBatchSizeFailureThresholdGetDefault(code int) *StorageServiceBatchSizeFailureThresholdGetDefault { + return &StorageServiceBatchSizeFailureThresholdGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceBatchSizeFailureThresholdGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceBatchSizeFailureThresholdGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service batch size failure threshold get default response +func (o *StorageServiceBatchSizeFailureThresholdGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceBatchSizeFailureThresholdGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceBatchSizeFailureThresholdGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceBatchSizeFailureThresholdGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_post_parameters.go new file mode 100644 index 00000000000..39e45e6cebe --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceBatchSizeFailureThresholdPostParams creates a new StorageServiceBatchSizeFailureThresholdPostParams object +// with the default values initialized. +func NewStorageServiceBatchSizeFailureThresholdPostParams() *StorageServiceBatchSizeFailureThresholdPostParams { + var () + return &StorageServiceBatchSizeFailureThresholdPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceBatchSizeFailureThresholdPostParamsWithTimeout creates a new StorageServiceBatchSizeFailureThresholdPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceBatchSizeFailureThresholdPostParamsWithTimeout(timeout time.Duration) *StorageServiceBatchSizeFailureThresholdPostParams { + var () + return &StorageServiceBatchSizeFailureThresholdPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceBatchSizeFailureThresholdPostParamsWithContext creates a new StorageServiceBatchSizeFailureThresholdPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceBatchSizeFailureThresholdPostParamsWithContext(ctx context.Context) *StorageServiceBatchSizeFailureThresholdPostParams { + var () + return &StorageServiceBatchSizeFailureThresholdPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceBatchSizeFailureThresholdPostParamsWithHTTPClient creates a new StorageServiceBatchSizeFailureThresholdPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceBatchSizeFailureThresholdPostParamsWithHTTPClient(client *http.Client) *StorageServiceBatchSizeFailureThresholdPostParams { + var () + return &StorageServiceBatchSizeFailureThresholdPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceBatchSizeFailureThresholdPostParams contains all the parameters to send to the API endpoint +for the storage service batch size failure threshold post operation typically these are written to a http.Request +*/ +type StorageServiceBatchSizeFailureThresholdPostParams struct { + + /*Threshold + batch size debug threshold + + */ + Threshold int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service batch size failure threshold post params +func (o *StorageServiceBatchSizeFailureThresholdPostParams) WithTimeout(timeout time.Duration) *StorageServiceBatchSizeFailureThresholdPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service batch size failure threshold post params +func (o *StorageServiceBatchSizeFailureThresholdPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service batch size failure threshold post params +func (o *StorageServiceBatchSizeFailureThresholdPostParams) WithContext(ctx context.Context) *StorageServiceBatchSizeFailureThresholdPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service batch size failure threshold post params +func (o *StorageServiceBatchSizeFailureThresholdPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service batch size failure threshold post params +func (o *StorageServiceBatchSizeFailureThresholdPostParams) WithHTTPClient(client *http.Client) *StorageServiceBatchSizeFailureThresholdPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service batch size failure threshold post params +func (o *StorageServiceBatchSizeFailureThresholdPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithThreshold adds the threshold to the storage service batch size failure threshold post params +func (o *StorageServiceBatchSizeFailureThresholdPostParams) WithThreshold(threshold int32) *StorageServiceBatchSizeFailureThresholdPostParams { + o.SetThreshold(threshold) + return o +} + +// SetThreshold adds the threshold to the storage service batch size failure threshold post params +func (o *StorageServiceBatchSizeFailureThresholdPostParams) SetThreshold(threshold int32) { + o.Threshold = threshold +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceBatchSizeFailureThresholdPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param threshold + qrThreshold := o.Threshold + qThreshold := swag.FormatInt32(qrThreshold) + if qThreshold != "" { + if err := r.SetQueryParam("threshold", qThreshold); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_post_responses.go new file mode 100644 index 00000000000..0fb9d8b447f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_batch_size_failure_threshold_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceBatchSizeFailureThresholdPostReader is a Reader for the StorageServiceBatchSizeFailureThresholdPost structure. +type StorageServiceBatchSizeFailureThresholdPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceBatchSizeFailureThresholdPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceBatchSizeFailureThresholdPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceBatchSizeFailureThresholdPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceBatchSizeFailureThresholdPostOK creates a StorageServiceBatchSizeFailureThresholdPostOK with default headers values +func NewStorageServiceBatchSizeFailureThresholdPostOK() *StorageServiceBatchSizeFailureThresholdPostOK { + return &StorageServiceBatchSizeFailureThresholdPostOK{} +} + +/* +StorageServiceBatchSizeFailureThresholdPostOK handles this case with default header values. + +Success +*/ +type StorageServiceBatchSizeFailureThresholdPostOK struct { +} + +func (o *StorageServiceBatchSizeFailureThresholdPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceBatchSizeFailureThresholdPostDefault creates a StorageServiceBatchSizeFailureThresholdPostDefault with default headers values +func NewStorageServiceBatchSizeFailureThresholdPostDefault(code int) *StorageServiceBatchSizeFailureThresholdPostDefault { + return &StorageServiceBatchSizeFailureThresholdPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceBatchSizeFailureThresholdPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceBatchSizeFailureThresholdPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service batch size failure threshold post default response +func (o *StorageServiceBatchSizeFailureThresholdPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceBatchSizeFailureThresholdPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceBatchSizeFailureThresholdPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceBatchSizeFailureThresholdPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_async_by_path_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_async_by_path_get_parameters.go new file mode 100644 index 00000000000..b59a49f3ae6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_async_by_path_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceBulkLoadAsyncByPathGetParams creates a new StorageServiceBulkLoadAsyncByPathGetParams object +// with the default values initialized. +func NewStorageServiceBulkLoadAsyncByPathGetParams() *StorageServiceBulkLoadAsyncByPathGetParams { + var () + return &StorageServiceBulkLoadAsyncByPathGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceBulkLoadAsyncByPathGetParamsWithTimeout creates a new StorageServiceBulkLoadAsyncByPathGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceBulkLoadAsyncByPathGetParamsWithTimeout(timeout time.Duration) *StorageServiceBulkLoadAsyncByPathGetParams { + var () + return &StorageServiceBulkLoadAsyncByPathGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceBulkLoadAsyncByPathGetParamsWithContext creates a new StorageServiceBulkLoadAsyncByPathGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceBulkLoadAsyncByPathGetParamsWithContext(ctx context.Context) *StorageServiceBulkLoadAsyncByPathGetParams { + var () + return &StorageServiceBulkLoadAsyncByPathGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceBulkLoadAsyncByPathGetParamsWithHTTPClient creates a new StorageServiceBulkLoadAsyncByPathGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceBulkLoadAsyncByPathGetParamsWithHTTPClient(client *http.Client) *StorageServiceBulkLoadAsyncByPathGetParams { + var () + return &StorageServiceBulkLoadAsyncByPathGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceBulkLoadAsyncByPathGetParams contains all the parameters to send to the API endpoint +for the storage service bulk load async by path get operation typically these are written to a http.Request +*/ +type StorageServiceBulkLoadAsyncByPathGetParams struct { + + /*Path + Path to directory to load from + + */ + Path string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service bulk load async by path get params +func (o *StorageServiceBulkLoadAsyncByPathGetParams) WithTimeout(timeout time.Duration) *StorageServiceBulkLoadAsyncByPathGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service bulk load async by path get params +func (o *StorageServiceBulkLoadAsyncByPathGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service bulk load async by path get params +func (o *StorageServiceBulkLoadAsyncByPathGetParams) WithContext(ctx context.Context) *StorageServiceBulkLoadAsyncByPathGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service bulk load async by path get params +func (o *StorageServiceBulkLoadAsyncByPathGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service bulk load async by path get params +func (o *StorageServiceBulkLoadAsyncByPathGetParams) WithHTTPClient(client *http.Client) *StorageServiceBulkLoadAsyncByPathGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service bulk load async by path get params +func (o *StorageServiceBulkLoadAsyncByPathGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithPath adds the path to the storage service bulk load async by path get params +func (o *StorageServiceBulkLoadAsyncByPathGetParams) WithPath(path string) *StorageServiceBulkLoadAsyncByPathGetParams { + o.SetPath(path) + return o +} + +// SetPath adds the path to the storage service bulk load async by path get params +func (o *StorageServiceBulkLoadAsyncByPathGetParams) SetPath(path string) { + o.Path = path +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceBulkLoadAsyncByPathGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param path + if err := r.SetPathParam("path", o.Path); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_async_by_path_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_async_by_path_get_responses.go new file mode 100644 index 00000000000..1951a32a3c1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_async_by_path_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceBulkLoadAsyncByPathGetReader is a Reader for the StorageServiceBulkLoadAsyncByPathGet structure. +type StorageServiceBulkLoadAsyncByPathGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceBulkLoadAsyncByPathGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceBulkLoadAsyncByPathGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceBulkLoadAsyncByPathGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceBulkLoadAsyncByPathGetOK creates a StorageServiceBulkLoadAsyncByPathGetOK with default headers values +func NewStorageServiceBulkLoadAsyncByPathGetOK() *StorageServiceBulkLoadAsyncByPathGetOK { + return &StorageServiceBulkLoadAsyncByPathGetOK{} +} + +/* +StorageServiceBulkLoadAsyncByPathGetOK handles this case with default header values. + +Success +*/ +type StorageServiceBulkLoadAsyncByPathGetOK struct { + Payload string +} + +func (o *StorageServiceBulkLoadAsyncByPathGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceBulkLoadAsyncByPathGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceBulkLoadAsyncByPathGetDefault creates a StorageServiceBulkLoadAsyncByPathGetDefault with default headers values +func NewStorageServiceBulkLoadAsyncByPathGetDefault(code int) *StorageServiceBulkLoadAsyncByPathGetDefault { + return &StorageServiceBulkLoadAsyncByPathGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceBulkLoadAsyncByPathGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceBulkLoadAsyncByPathGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service bulk load async by path get default response +func (o *StorageServiceBulkLoadAsyncByPathGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceBulkLoadAsyncByPathGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceBulkLoadAsyncByPathGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceBulkLoadAsyncByPathGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_by_path_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_by_path_post_parameters.go new file mode 100644 index 00000000000..80eb4eff24f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_by_path_post_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceBulkLoadByPathPostParams creates a new StorageServiceBulkLoadByPathPostParams object +// with the default values initialized. +func NewStorageServiceBulkLoadByPathPostParams() *StorageServiceBulkLoadByPathPostParams { + var () + return &StorageServiceBulkLoadByPathPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceBulkLoadByPathPostParamsWithTimeout creates a new StorageServiceBulkLoadByPathPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceBulkLoadByPathPostParamsWithTimeout(timeout time.Duration) *StorageServiceBulkLoadByPathPostParams { + var () + return &StorageServiceBulkLoadByPathPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceBulkLoadByPathPostParamsWithContext creates a new StorageServiceBulkLoadByPathPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceBulkLoadByPathPostParamsWithContext(ctx context.Context) *StorageServiceBulkLoadByPathPostParams { + var () + return &StorageServiceBulkLoadByPathPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceBulkLoadByPathPostParamsWithHTTPClient creates a new StorageServiceBulkLoadByPathPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceBulkLoadByPathPostParamsWithHTTPClient(client *http.Client) *StorageServiceBulkLoadByPathPostParams { + var () + return &StorageServiceBulkLoadByPathPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceBulkLoadByPathPostParams contains all the parameters to send to the API endpoint +for the storage service bulk load by path post operation typically these are written to a http.Request +*/ +type StorageServiceBulkLoadByPathPostParams struct { + + /*Path + Path to directory to load from + + */ + Path string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service bulk load by path post params +func (o *StorageServiceBulkLoadByPathPostParams) WithTimeout(timeout time.Duration) *StorageServiceBulkLoadByPathPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service bulk load by path post params +func (o *StorageServiceBulkLoadByPathPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service bulk load by path post params +func (o *StorageServiceBulkLoadByPathPostParams) WithContext(ctx context.Context) *StorageServiceBulkLoadByPathPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service bulk load by path post params +func (o *StorageServiceBulkLoadByPathPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service bulk load by path post params +func (o *StorageServiceBulkLoadByPathPostParams) WithHTTPClient(client *http.Client) *StorageServiceBulkLoadByPathPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service bulk load by path post params +func (o *StorageServiceBulkLoadByPathPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithPath adds the path to the storage service bulk load by path post params +func (o *StorageServiceBulkLoadByPathPostParams) WithPath(path string) *StorageServiceBulkLoadByPathPostParams { + o.SetPath(path) + return o +} + +// SetPath adds the path to the storage service bulk load by path post params +func (o *StorageServiceBulkLoadByPathPostParams) SetPath(path string) { + o.Path = path +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceBulkLoadByPathPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param path + if err := r.SetPathParam("path", o.Path); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_by_path_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_by_path_post_responses.go new file mode 100644 index 00000000000..f21a602b6f6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_bulk_load_by_path_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceBulkLoadByPathPostReader is a Reader for the StorageServiceBulkLoadByPathPost structure. +type StorageServiceBulkLoadByPathPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceBulkLoadByPathPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceBulkLoadByPathPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceBulkLoadByPathPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceBulkLoadByPathPostOK creates a StorageServiceBulkLoadByPathPostOK with default headers values +func NewStorageServiceBulkLoadByPathPostOK() *StorageServiceBulkLoadByPathPostOK { + return &StorageServiceBulkLoadByPathPostOK{} +} + +/* +StorageServiceBulkLoadByPathPostOK handles this case with default header values. + +Success +*/ +type StorageServiceBulkLoadByPathPostOK struct { +} + +func (o *StorageServiceBulkLoadByPathPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceBulkLoadByPathPostDefault creates a StorageServiceBulkLoadByPathPostDefault with default headers values +func NewStorageServiceBulkLoadByPathPostDefault(code int) *StorageServiceBulkLoadByPathPostDefault { + return &StorageServiceBulkLoadByPathPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceBulkLoadByPathPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceBulkLoadByPathPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service bulk load by path post default response +func (o *StorageServiceBulkLoadByPathPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceBulkLoadByPathPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceBulkLoadByPathPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceBulkLoadByPathPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_cluster_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_cluster_name_get_parameters.go new file mode 100644 index 00000000000..b51a1695d99 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_cluster_name_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceClusterNameGetParams creates a new StorageServiceClusterNameGetParams object +// with the default values initialized. +func NewStorageServiceClusterNameGetParams() *StorageServiceClusterNameGetParams { + + return &StorageServiceClusterNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceClusterNameGetParamsWithTimeout creates a new StorageServiceClusterNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceClusterNameGetParamsWithTimeout(timeout time.Duration) *StorageServiceClusterNameGetParams { + + return &StorageServiceClusterNameGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceClusterNameGetParamsWithContext creates a new StorageServiceClusterNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceClusterNameGetParamsWithContext(ctx context.Context) *StorageServiceClusterNameGetParams { + + return &StorageServiceClusterNameGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceClusterNameGetParamsWithHTTPClient creates a new StorageServiceClusterNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceClusterNameGetParamsWithHTTPClient(client *http.Client) *StorageServiceClusterNameGetParams { + + return &StorageServiceClusterNameGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceClusterNameGetParams contains all the parameters to send to the API endpoint +for the storage service cluster name get operation typically these are written to a http.Request +*/ +type StorageServiceClusterNameGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service cluster name get params +func (o *StorageServiceClusterNameGetParams) WithTimeout(timeout time.Duration) *StorageServiceClusterNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service cluster name get params +func (o *StorageServiceClusterNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service cluster name get params +func (o *StorageServiceClusterNameGetParams) WithContext(ctx context.Context) *StorageServiceClusterNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service cluster name get params +func (o *StorageServiceClusterNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service cluster name get params +func (o *StorageServiceClusterNameGetParams) WithHTTPClient(client *http.Client) *StorageServiceClusterNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service cluster name get params +func (o *StorageServiceClusterNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceClusterNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_cluster_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_cluster_name_get_responses.go new file mode 100644 index 00000000000..120f63bdbdf --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_cluster_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceClusterNameGetReader is a Reader for the StorageServiceClusterNameGet structure. +type StorageServiceClusterNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceClusterNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceClusterNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceClusterNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceClusterNameGetOK creates a StorageServiceClusterNameGetOK with default headers values +func NewStorageServiceClusterNameGetOK() *StorageServiceClusterNameGetOK { + return &StorageServiceClusterNameGetOK{} +} + +/* +StorageServiceClusterNameGetOK handles this case with default header values. + +Success +*/ +type StorageServiceClusterNameGetOK struct { + Payload string +} + +func (o *StorageServiceClusterNameGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceClusterNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceClusterNameGetDefault creates a StorageServiceClusterNameGetDefault with default headers values +func NewStorageServiceClusterNameGetDefault(code int) *StorageServiceClusterNameGetDefault { + return &StorageServiceClusterNameGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceClusterNameGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceClusterNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service cluster name get default response +func (o *StorageServiceClusterNameGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceClusterNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceClusterNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceClusterNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_commitlog_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_commitlog_get_parameters.go new file mode 100644 index 00000000000..ad32804c422 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_commitlog_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceCommitlogGetParams creates a new StorageServiceCommitlogGetParams object +// with the default values initialized. +func NewStorageServiceCommitlogGetParams() *StorageServiceCommitlogGetParams { + + return &StorageServiceCommitlogGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceCommitlogGetParamsWithTimeout creates a new StorageServiceCommitlogGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceCommitlogGetParamsWithTimeout(timeout time.Duration) *StorageServiceCommitlogGetParams { + + return &StorageServiceCommitlogGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceCommitlogGetParamsWithContext creates a new StorageServiceCommitlogGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceCommitlogGetParamsWithContext(ctx context.Context) *StorageServiceCommitlogGetParams { + + return &StorageServiceCommitlogGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceCommitlogGetParamsWithHTTPClient creates a new StorageServiceCommitlogGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceCommitlogGetParamsWithHTTPClient(client *http.Client) *StorageServiceCommitlogGetParams { + + return &StorageServiceCommitlogGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceCommitlogGetParams contains all the parameters to send to the API endpoint +for the storage service commitlog get operation typically these are written to a http.Request +*/ +type StorageServiceCommitlogGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service commitlog get params +func (o *StorageServiceCommitlogGetParams) WithTimeout(timeout time.Duration) *StorageServiceCommitlogGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service commitlog get params +func (o *StorageServiceCommitlogGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service commitlog get params +func (o *StorageServiceCommitlogGetParams) WithContext(ctx context.Context) *StorageServiceCommitlogGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service commitlog get params +func (o *StorageServiceCommitlogGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service commitlog get params +func (o *StorageServiceCommitlogGetParams) WithHTTPClient(client *http.Client) *StorageServiceCommitlogGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service commitlog get params +func (o *StorageServiceCommitlogGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceCommitlogGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_commitlog_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_commitlog_get_responses.go new file mode 100644 index 00000000000..3cd340cf42b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_commitlog_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceCommitlogGetReader is a Reader for the StorageServiceCommitlogGet structure. +type StorageServiceCommitlogGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceCommitlogGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceCommitlogGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceCommitlogGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceCommitlogGetOK creates a StorageServiceCommitlogGetOK with default headers values +func NewStorageServiceCommitlogGetOK() *StorageServiceCommitlogGetOK { + return &StorageServiceCommitlogGetOK{} +} + +/* +StorageServiceCommitlogGetOK handles this case with default header values. + +Success +*/ +type StorageServiceCommitlogGetOK struct { + Payload string +} + +func (o *StorageServiceCommitlogGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceCommitlogGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceCommitlogGetDefault creates a StorageServiceCommitlogGetDefault with default headers values +func NewStorageServiceCommitlogGetDefault(code int) *StorageServiceCommitlogGetDefault { + return &StorageServiceCommitlogGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceCommitlogGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceCommitlogGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service commitlog get default response +func (o *StorageServiceCommitlogGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceCommitlogGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceCommitlogGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceCommitlogGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_get_parameters.go new file mode 100644 index 00000000000..5f43f01acfd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceCompactionThroughputGetParams creates a new StorageServiceCompactionThroughputGetParams object +// with the default values initialized. +func NewStorageServiceCompactionThroughputGetParams() *StorageServiceCompactionThroughputGetParams { + + return &StorageServiceCompactionThroughputGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceCompactionThroughputGetParamsWithTimeout creates a new StorageServiceCompactionThroughputGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceCompactionThroughputGetParamsWithTimeout(timeout time.Duration) *StorageServiceCompactionThroughputGetParams { + + return &StorageServiceCompactionThroughputGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceCompactionThroughputGetParamsWithContext creates a new StorageServiceCompactionThroughputGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceCompactionThroughputGetParamsWithContext(ctx context.Context) *StorageServiceCompactionThroughputGetParams { + + return &StorageServiceCompactionThroughputGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceCompactionThroughputGetParamsWithHTTPClient creates a new StorageServiceCompactionThroughputGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceCompactionThroughputGetParamsWithHTTPClient(client *http.Client) *StorageServiceCompactionThroughputGetParams { + + return &StorageServiceCompactionThroughputGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceCompactionThroughputGetParams contains all the parameters to send to the API endpoint +for the storage service compaction throughput get operation typically these are written to a http.Request +*/ +type StorageServiceCompactionThroughputGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service compaction throughput get params +func (o *StorageServiceCompactionThroughputGetParams) WithTimeout(timeout time.Duration) *StorageServiceCompactionThroughputGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service compaction throughput get params +func (o *StorageServiceCompactionThroughputGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service compaction throughput get params +func (o *StorageServiceCompactionThroughputGetParams) WithContext(ctx context.Context) *StorageServiceCompactionThroughputGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service compaction throughput get params +func (o *StorageServiceCompactionThroughputGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service compaction throughput get params +func (o *StorageServiceCompactionThroughputGetParams) WithHTTPClient(client *http.Client) *StorageServiceCompactionThroughputGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service compaction throughput get params +func (o *StorageServiceCompactionThroughputGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceCompactionThroughputGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_get_responses.go new file mode 100644 index 00000000000..31536bf45fc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceCompactionThroughputGetReader is a Reader for the StorageServiceCompactionThroughputGet structure. +type StorageServiceCompactionThroughputGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceCompactionThroughputGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceCompactionThroughputGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceCompactionThroughputGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceCompactionThroughputGetOK creates a StorageServiceCompactionThroughputGetOK with default headers values +func NewStorageServiceCompactionThroughputGetOK() *StorageServiceCompactionThroughputGetOK { + return &StorageServiceCompactionThroughputGetOK{} +} + +/* +StorageServiceCompactionThroughputGetOK handles this case with default header values. + +Success +*/ +type StorageServiceCompactionThroughputGetOK struct { + Payload int32 +} + +func (o *StorageServiceCompactionThroughputGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceCompactionThroughputGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceCompactionThroughputGetDefault creates a StorageServiceCompactionThroughputGetDefault with default headers values +func NewStorageServiceCompactionThroughputGetDefault(code int) *StorageServiceCompactionThroughputGetDefault { + return &StorageServiceCompactionThroughputGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceCompactionThroughputGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceCompactionThroughputGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service compaction throughput get default response +func (o *StorageServiceCompactionThroughputGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceCompactionThroughputGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceCompactionThroughputGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceCompactionThroughputGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_post_parameters.go new file mode 100644 index 00000000000..d41f18c9c89 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceCompactionThroughputPostParams creates a new StorageServiceCompactionThroughputPostParams object +// with the default values initialized. +func NewStorageServiceCompactionThroughputPostParams() *StorageServiceCompactionThroughputPostParams { + var () + return &StorageServiceCompactionThroughputPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceCompactionThroughputPostParamsWithTimeout creates a new StorageServiceCompactionThroughputPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceCompactionThroughputPostParamsWithTimeout(timeout time.Duration) *StorageServiceCompactionThroughputPostParams { + var () + return &StorageServiceCompactionThroughputPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceCompactionThroughputPostParamsWithContext creates a new StorageServiceCompactionThroughputPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceCompactionThroughputPostParamsWithContext(ctx context.Context) *StorageServiceCompactionThroughputPostParams { + var () + return &StorageServiceCompactionThroughputPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceCompactionThroughputPostParamsWithHTTPClient creates a new StorageServiceCompactionThroughputPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceCompactionThroughputPostParamsWithHTTPClient(client *http.Client) *StorageServiceCompactionThroughputPostParams { + var () + return &StorageServiceCompactionThroughputPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceCompactionThroughputPostParams contains all the parameters to send to the API endpoint +for the storage service compaction throughput post operation typically these are written to a http.Request +*/ +type StorageServiceCompactionThroughputPostParams struct { + + /*Value + compaction throughput + + */ + Value int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service compaction throughput post params +func (o *StorageServiceCompactionThroughputPostParams) WithTimeout(timeout time.Duration) *StorageServiceCompactionThroughputPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service compaction throughput post params +func (o *StorageServiceCompactionThroughputPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service compaction throughput post params +func (o *StorageServiceCompactionThroughputPostParams) WithContext(ctx context.Context) *StorageServiceCompactionThroughputPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service compaction throughput post params +func (o *StorageServiceCompactionThroughputPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service compaction throughput post params +func (o *StorageServiceCompactionThroughputPostParams) WithHTTPClient(client *http.Client) *StorageServiceCompactionThroughputPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service compaction throughput post params +func (o *StorageServiceCompactionThroughputPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithValue adds the value to the storage service compaction throughput post params +func (o *StorageServiceCompactionThroughputPostParams) WithValue(value int32) *StorageServiceCompactionThroughputPostParams { + o.SetValue(value) + return o +} + +// SetValue adds the value to the storage service compaction throughput post params +func (o *StorageServiceCompactionThroughputPostParams) SetValue(value int32) { + o.Value = value +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceCompactionThroughputPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param value + qrValue := o.Value + qValue := swag.FormatInt32(qrValue) + if qValue != "" { + if err := r.SetQueryParam("value", qValue); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_post_responses.go new file mode 100644 index 00000000000..cbaee26e86b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_compaction_throughput_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceCompactionThroughputPostReader is a Reader for the StorageServiceCompactionThroughputPost structure. +type StorageServiceCompactionThroughputPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceCompactionThroughputPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceCompactionThroughputPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceCompactionThroughputPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceCompactionThroughputPostOK creates a StorageServiceCompactionThroughputPostOK with default headers values +func NewStorageServiceCompactionThroughputPostOK() *StorageServiceCompactionThroughputPostOK { + return &StorageServiceCompactionThroughputPostOK{} +} + +/* +StorageServiceCompactionThroughputPostOK handles this case with default header values. + +Success +*/ +type StorageServiceCompactionThroughputPostOK struct { +} + +func (o *StorageServiceCompactionThroughputPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceCompactionThroughputPostDefault creates a StorageServiceCompactionThroughputPostDefault with default headers values +func NewStorageServiceCompactionThroughputPostDefault(code int) *StorageServiceCompactionThroughputPostDefault { + return &StorageServiceCompactionThroughputPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceCompactionThroughputPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceCompactionThroughputPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service compaction throughput post default response +func (o *StorageServiceCompactionThroughputPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceCompactionThroughputPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceCompactionThroughputPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceCompactionThroughputPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_data_file_locations_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_data_file_locations_get_parameters.go new file mode 100644 index 00000000000..696b3f8e50d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_data_file_locations_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceDataFileLocationsGetParams creates a new StorageServiceDataFileLocationsGetParams object +// with the default values initialized. +func NewStorageServiceDataFileLocationsGetParams() *StorageServiceDataFileLocationsGetParams { + + return &StorageServiceDataFileLocationsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceDataFileLocationsGetParamsWithTimeout creates a new StorageServiceDataFileLocationsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceDataFileLocationsGetParamsWithTimeout(timeout time.Duration) *StorageServiceDataFileLocationsGetParams { + + return &StorageServiceDataFileLocationsGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceDataFileLocationsGetParamsWithContext creates a new StorageServiceDataFileLocationsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceDataFileLocationsGetParamsWithContext(ctx context.Context) *StorageServiceDataFileLocationsGetParams { + + return &StorageServiceDataFileLocationsGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceDataFileLocationsGetParamsWithHTTPClient creates a new StorageServiceDataFileLocationsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceDataFileLocationsGetParamsWithHTTPClient(client *http.Client) *StorageServiceDataFileLocationsGetParams { + + return &StorageServiceDataFileLocationsGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceDataFileLocationsGetParams contains all the parameters to send to the API endpoint +for the storage service data file locations get operation typically these are written to a http.Request +*/ +type StorageServiceDataFileLocationsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service data file locations get params +func (o *StorageServiceDataFileLocationsGetParams) WithTimeout(timeout time.Duration) *StorageServiceDataFileLocationsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service data file locations get params +func (o *StorageServiceDataFileLocationsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service data file locations get params +func (o *StorageServiceDataFileLocationsGetParams) WithContext(ctx context.Context) *StorageServiceDataFileLocationsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service data file locations get params +func (o *StorageServiceDataFileLocationsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service data file locations get params +func (o *StorageServiceDataFileLocationsGetParams) WithHTTPClient(client *http.Client) *StorageServiceDataFileLocationsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service data file locations get params +func (o *StorageServiceDataFileLocationsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceDataFileLocationsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_data_file_locations_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_data_file_locations_get_responses.go new file mode 100644 index 00000000000..73a3f955ea1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_data_file_locations_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceDataFileLocationsGetReader is a Reader for the StorageServiceDataFileLocationsGet structure. +type StorageServiceDataFileLocationsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceDataFileLocationsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceDataFileLocationsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceDataFileLocationsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceDataFileLocationsGetOK creates a StorageServiceDataFileLocationsGetOK with default headers values +func NewStorageServiceDataFileLocationsGetOK() *StorageServiceDataFileLocationsGetOK { + return &StorageServiceDataFileLocationsGetOK{} +} + +/* +StorageServiceDataFileLocationsGetOK handles this case with default header values. + +Success +*/ +type StorageServiceDataFileLocationsGetOK struct { + Payload []string +} + +func (o *StorageServiceDataFileLocationsGetOK) GetPayload() []string { + return o.Payload +} + +func (o *StorageServiceDataFileLocationsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceDataFileLocationsGetDefault creates a StorageServiceDataFileLocationsGetDefault with default headers values +func NewStorageServiceDataFileLocationsGetDefault(code int) *StorageServiceDataFileLocationsGetDefault { + return &StorageServiceDataFileLocationsGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceDataFileLocationsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceDataFileLocationsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service data file locations get default response +func (o *StorageServiceDataFileLocationsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceDataFileLocationsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceDataFileLocationsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceDataFileLocationsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_decommission_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_decommission_post_parameters.go new file mode 100644 index 00000000000..16981ed389a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_decommission_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceDecommissionPostParams creates a new StorageServiceDecommissionPostParams object +// with the default values initialized. +func NewStorageServiceDecommissionPostParams() *StorageServiceDecommissionPostParams { + + return &StorageServiceDecommissionPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceDecommissionPostParamsWithTimeout creates a new StorageServiceDecommissionPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceDecommissionPostParamsWithTimeout(timeout time.Duration) *StorageServiceDecommissionPostParams { + + return &StorageServiceDecommissionPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceDecommissionPostParamsWithContext creates a new StorageServiceDecommissionPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceDecommissionPostParamsWithContext(ctx context.Context) *StorageServiceDecommissionPostParams { + + return &StorageServiceDecommissionPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceDecommissionPostParamsWithHTTPClient creates a new StorageServiceDecommissionPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceDecommissionPostParamsWithHTTPClient(client *http.Client) *StorageServiceDecommissionPostParams { + + return &StorageServiceDecommissionPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceDecommissionPostParams contains all the parameters to send to the API endpoint +for the storage service decommission post operation typically these are written to a http.Request +*/ +type StorageServiceDecommissionPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service decommission post params +func (o *StorageServiceDecommissionPostParams) WithTimeout(timeout time.Duration) *StorageServiceDecommissionPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service decommission post params +func (o *StorageServiceDecommissionPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service decommission post params +func (o *StorageServiceDecommissionPostParams) WithContext(ctx context.Context) *StorageServiceDecommissionPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service decommission post params +func (o *StorageServiceDecommissionPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service decommission post params +func (o *StorageServiceDecommissionPostParams) WithHTTPClient(client *http.Client) *StorageServiceDecommissionPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service decommission post params +func (o *StorageServiceDecommissionPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceDecommissionPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_decommission_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_decommission_post_responses.go new file mode 100644 index 00000000000..9548b168b41 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_decommission_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceDecommissionPostReader is a Reader for the StorageServiceDecommissionPost structure. +type StorageServiceDecommissionPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceDecommissionPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceDecommissionPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceDecommissionPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceDecommissionPostOK creates a StorageServiceDecommissionPostOK with default headers values +func NewStorageServiceDecommissionPostOK() *StorageServiceDecommissionPostOK { + return &StorageServiceDecommissionPostOK{} +} + +/* +StorageServiceDecommissionPostOK handles this case with default header values. + +Success +*/ +type StorageServiceDecommissionPostOK struct { +} + +func (o *StorageServiceDecommissionPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceDecommissionPostDefault creates a StorageServiceDecommissionPostDefault with default headers values +func NewStorageServiceDecommissionPostDefault(code int) *StorageServiceDecommissionPostDefault { + return &StorageServiceDecommissionPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceDecommissionPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceDecommissionPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service decommission post default response +func (o *StorageServiceDecommissionPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceDecommissionPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceDecommissionPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceDecommissionPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_deliver_hints_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_deliver_hints_post_parameters.go new file mode 100644 index 00000000000..5cdb7174ad6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_deliver_hints_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceDeliverHintsPostParams creates a new StorageServiceDeliverHintsPostParams object +// with the default values initialized. +func NewStorageServiceDeliverHintsPostParams() *StorageServiceDeliverHintsPostParams { + var () + return &StorageServiceDeliverHintsPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceDeliverHintsPostParamsWithTimeout creates a new StorageServiceDeliverHintsPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceDeliverHintsPostParamsWithTimeout(timeout time.Duration) *StorageServiceDeliverHintsPostParams { + var () + return &StorageServiceDeliverHintsPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceDeliverHintsPostParamsWithContext creates a new StorageServiceDeliverHintsPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceDeliverHintsPostParamsWithContext(ctx context.Context) *StorageServiceDeliverHintsPostParams { + var () + return &StorageServiceDeliverHintsPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceDeliverHintsPostParamsWithHTTPClient creates a new StorageServiceDeliverHintsPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceDeliverHintsPostParamsWithHTTPClient(client *http.Client) *StorageServiceDeliverHintsPostParams { + var () + return &StorageServiceDeliverHintsPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceDeliverHintsPostParams contains all the parameters to send to the API endpoint +for the storage service deliver hints post operation typically these are written to a http.Request +*/ +type StorageServiceDeliverHintsPostParams struct { + + /*Host + The host name + + */ + Host string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service deliver hints post params +func (o *StorageServiceDeliverHintsPostParams) WithTimeout(timeout time.Duration) *StorageServiceDeliverHintsPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service deliver hints post params +func (o *StorageServiceDeliverHintsPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service deliver hints post params +func (o *StorageServiceDeliverHintsPostParams) WithContext(ctx context.Context) *StorageServiceDeliverHintsPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service deliver hints post params +func (o *StorageServiceDeliverHintsPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service deliver hints post params +func (o *StorageServiceDeliverHintsPostParams) WithHTTPClient(client *http.Client) *StorageServiceDeliverHintsPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service deliver hints post params +func (o *StorageServiceDeliverHintsPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithHost adds the host to the storage service deliver hints post params +func (o *StorageServiceDeliverHintsPostParams) WithHost(host string) *StorageServiceDeliverHintsPostParams { + o.SetHost(host) + return o +} + +// SetHost adds the host to the storage service deliver hints post params +func (o *StorageServiceDeliverHintsPostParams) SetHost(host string) { + o.Host = host +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceDeliverHintsPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param host + qrHost := o.Host + qHost := qrHost + if qHost != "" { + if err := r.SetQueryParam("host", qHost); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_deliver_hints_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_deliver_hints_post_responses.go new file mode 100644 index 00000000000..532f57b2692 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_deliver_hints_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceDeliverHintsPostReader is a Reader for the StorageServiceDeliverHintsPost structure. +type StorageServiceDeliverHintsPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceDeliverHintsPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceDeliverHintsPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceDeliverHintsPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceDeliverHintsPostOK creates a StorageServiceDeliverHintsPostOK with default headers values +func NewStorageServiceDeliverHintsPostOK() *StorageServiceDeliverHintsPostOK { + return &StorageServiceDeliverHintsPostOK{} +} + +/* +StorageServiceDeliverHintsPostOK handles this case with default header values. + +Success +*/ +type StorageServiceDeliverHintsPostOK struct { +} + +func (o *StorageServiceDeliverHintsPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceDeliverHintsPostDefault creates a StorageServiceDeliverHintsPostDefault with default headers values +func NewStorageServiceDeliverHintsPostDefault(code int) *StorageServiceDeliverHintsPostDefault { + return &StorageServiceDeliverHintsPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceDeliverHintsPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceDeliverHintsPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service deliver hints post default response +func (o *StorageServiceDeliverHintsPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceDeliverHintsPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceDeliverHintsPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceDeliverHintsPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_describe_ring_by_keyspace_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_describe_ring_by_keyspace_get_parameters.go new file mode 100644 index 00000000000..3daff0b7cdb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_describe_ring_by_keyspace_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceDescribeRingByKeyspaceGetParams creates a new StorageServiceDescribeRingByKeyspaceGetParams object +// with the default values initialized. +func NewStorageServiceDescribeRingByKeyspaceGetParams() *StorageServiceDescribeRingByKeyspaceGetParams { + var () + return &StorageServiceDescribeRingByKeyspaceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceDescribeRingByKeyspaceGetParamsWithTimeout creates a new StorageServiceDescribeRingByKeyspaceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceDescribeRingByKeyspaceGetParamsWithTimeout(timeout time.Duration) *StorageServiceDescribeRingByKeyspaceGetParams { + var () + return &StorageServiceDescribeRingByKeyspaceGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceDescribeRingByKeyspaceGetParamsWithContext creates a new StorageServiceDescribeRingByKeyspaceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceDescribeRingByKeyspaceGetParamsWithContext(ctx context.Context) *StorageServiceDescribeRingByKeyspaceGetParams { + var () + return &StorageServiceDescribeRingByKeyspaceGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceDescribeRingByKeyspaceGetParamsWithHTTPClient creates a new StorageServiceDescribeRingByKeyspaceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceDescribeRingByKeyspaceGetParamsWithHTTPClient(client *http.Client) *StorageServiceDescribeRingByKeyspaceGetParams { + var () + return &StorageServiceDescribeRingByKeyspaceGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceDescribeRingByKeyspaceGetParams contains all the parameters to send to the API endpoint +for the storage service describe ring by keyspace get operation typically these are written to a http.Request +*/ +type StorageServiceDescribeRingByKeyspaceGetParams struct { + + /*Keyspace + The keyspace to fetch information about + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service describe ring by keyspace get params +func (o *StorageServiceDescribeRingByKeyspaceGetParams) WithTimeout(timeout time.Duration) *StorageServiceDescribeRingByKeyspaceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service describe ring by keyspace get params +func (o *StorageServiceDescribeRingByKeyspaceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service describe ring by keyspace get params +func (o *StorageServiceDescribeRingByKeyspaceGetParams) WithContext(ctx context.Context) *StorageServiceDescribeRingByKeyspaceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service describe ring by keyspace get params +func (o *StorageServiceDescribeRingByKeyspaceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service describe ring by keyspace get params +func (o *StorageServiceDescribeRingByKeyspaceGetParams) WithHTTPClient(client *http.Client) *StorageServiceDescribeRingByKeyspaceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service describe ring by keyspace get params +func (o *StorageServiceDescribeRingByKeyspaceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithKeyspace adds the keyspace to the storage service describe ring by keyspace get params +func (o *StorageServiceDescribeRingByKeyspaceGetParams) WithKeyspace(keyspace string) *StorageServiceDescribeRingByKeyspaceGetParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service describe ring by keyspace get params +func (o *StorageServiceDescribeRingByKeyspaceGetParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceDescribeRingByKeyspaceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_describe_ring_by_keyspace_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_describe_ring_by_keyspace_get_responses.go new file mode 100644 index 00000000000..a414363a07d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_describe_ring_by_keyspace_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceDescribeRingByKeyspaceGetReader is a Reader for the StorageServiceDescribeRingByKeyspaceGet structure. +type StorageServiceDescribeRingByKeyspaceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceDescribeRingByKeyspaceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceDescribeRingByKeyspaceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceDescribeRingByKeyspaceGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceDescribeRingByKeyspaceGetOK creates a StorageServiceDescribeRingByKeyspaceGetOK with default headers values +func NewStorageServiceDescribeRingByKeyspaceGetOK() *StorageServiceDescribeRingByKeyspaceGetOK { + return &StorageServiceDescribeRingByKeyspaceGetOK{} +} + +/* +StorageServiceDescribeRingByKeyspaceGetOK handles this case with default header values. + +Success +*/ +type StorageServiceDescribeRingByKeyspaceGetOK struct { + Payload []*models.TokenRange +} + +func (o *StorageServiceDescribeRingByKeyspaceGetOK) GetPayload() []*models.TokenRange { + return o.Payload +} + +func (o *StorageServiceDescribeRingByKeyspaceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceDescribeRingByKeyspaceGetDefault creates a StorageServiceDescribeRingByKeyspaceGetDefault with default headers values +func NewStorageServiceDescribeRingByKeyspaceGetDefault(code int) *StorageServiceDescribeRingByKeyspaceGetDefault { + return &StorageServiceDescribeRingByKeyspaceGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceDescribeRingByKeyspaceGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceDescribeRingByKeyspaceGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service describe ring by keyspace get default response +func (o *StorageServiceDescribeRingByKeyspaceGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceDescribeRingByKeyspaceGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceDescribeRingByKeyspaceGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceDescribeRingByKeyspaceGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_get_parameters.go new file mode 100644 index 00000000000..ad399d765ab --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceDrainGetParams creates a new StorageServiceDrainGetParams object +// with the default values initialized. +func NewStorageServiceDrainGetParams() *StorageServiceDrainGetParams { + + return &StorageServiceDrainGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceDrainGetParamsWithTimeout creates a new StorageServiceDrainGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceDrainGetParamsWithTimeout(timeout time.Duration) *StorageServiceDrainGetParams { + + return &StorageServiceDrainGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceDrainGetParamsWithContext creates a new StorageServiceDrainGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceDrainGetParamsWithContext(ctx context.Context) *StorageServiceDrainGetParams { + + return &StorageServiceDrainGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceDrainGetParamsWithHTTPClient creates a new StorageServiceDrainGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceDrainGetParamsWithHTTPClient(client *http.Client) *StorageServiceDrainGetParams { + + return &StorageServiceDrainGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceDrainGetParams contains all the parameters to send to the API endpoint +for the storage service drain get operation typically these are written to a http.Request +*/ +type StorageServiceDrainGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service drain get params +func (o *StorageServiceDrainGetParams) WithTimeout(timeout time.Duration) *StorageServiceDrainGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service drain get params +func (o *StorageServiceDrainGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service drain get params +func (o *StorageServiceDrainGetParams) WithContext(ctx context.Context) *StorageServiceDrainGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service drain get params +func (o *StorageServiceDrainGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service drain get params +func (o *StorageServiceDrainGetParams) WithHTTPClient(client *http.Client) *StorageServiceDrainGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service drain get params +func (o *StorageServiceDrainGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceDrainGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_get_responses.go new file mode 100644 index 00000000000..92f5ba8c9a5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceDrainGetReader is a Reader for the StorageServiceDrainGet structure. +type StorageServiceDrainGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceDrainGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceDrainGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceDrainGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceDrainGetOK creates a StorageServiceDrainGetOK with default headers values +func NewStorageServiceDrainGetOK() *StorageServiceDrainGetOK { + return &StorageServiceDrainGetOK{} +} + +/* +StorageServiceDrainGetOK handles this case with default header values. + +Success +*/ +type StorageServiceDrainGetOK struct { + Payload string +} + +func (o *StorageServiceDrainGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceDrainGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceDrainGetDefault creates a StorageServiceDrainGetDefault with default headers values +func NewStorageServiceDrainGetDefault(code int) *StorageServiceDrainGetDefault { + return &StorageServiceDrainGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceDrainGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceDrainGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service drain get default response +func (o *StorageServiceDrainGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceDrainGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceDrainGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceDrainGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_post_parameters.go new file mode 100644 index 00000000000..b9530a13adb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceDrainPostParams creates a new StorageServiceDrainPostParams object +// with the default values initialized. +func NewStorageServiceDrainPostParams() *StorageServiceDrainPostParams { + + return &StorageServiceDrainPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceDrainPostParamsWithTimeout creates a new StorageServiceDrainPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceDrainPostParamsWithTimeout(timeout time.Duration) *StorageServiceDrainPostParams { + + return &StorageServiceDrainPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceDrainPostParamsWithContext creates a new StorageServiceDrainPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceDrainPostParamsWithContext(ctx context.Context) *StorageServiceDrainPostParams { + + return &StorageServiceDrainPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceDrainPostParamsWithHTTPClient creates a new StorageServiceDrainPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceDrainPostParamsWithHTTPClient(client *http.Client) *StorageServiceDrainPostParams { + + return &StorageServiceDrainPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceDrainPostParams contains all the parameters to send to the API endpoint +for the storage service drain post operation typically these are written to a http.Request +*/ +type StorageServiceDrainPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service drain post params +func (o *StorageServiceDrainPostParams) WithTimeout(timeout time.Duration) *StorageServiceDrainPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service drain post params +func (o *StorageServiceDrainPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service drain post params +func (o *StorageServiceDrainPostParams) WithContext(ctx context.Context) *StorageServiceDrainPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service drain post params +func (o *StorageServiceDrainPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service drain post params +func (o *StorageServiceDrainPostParams) WithHTTPClient(client *http.Client) *StorageServiceDrainPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service drain post params +func (o *StorageServiceDrainPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceDrainPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_post_responses.go new file mode 100644 index 00000000000..7110764f8e1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_drain_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceDrainPostReader is a Reader for the StorageServiceDrainPost structure. +type StorageServiceDrainPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceDrainPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceDrainPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceDrainPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceDrainPostOK creates a StorageServiceDrainPostOK with default headers values +func NewStorageServiceDrainPostOK() *StorageServiceDrainPostOK { + return &StorageServiceDrainPostOK{} +} + +/* +StorageServiceDrainPostOK handles this case with default header values. + +Success +*/ +type StorageServiceDrainPostOK struct { +} + +func (o *StorageServiceDrainPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceDrainPostDefault creates a StorageServiceDrainPostDefault with default headers values +func NewStorageServiceDrainPostDefault(code int) *StorageServiceDrainPostDefault { + return &StorageServiceDrainPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceDrainPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceDrainPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service drain post default response +func (o *StorageServiceDrainPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceDrainPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceDrainPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceDrainPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_remove_completion_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_remove_completion_post_parameters.go new file mode 100644 index 00000000000..3f884d29d5d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_remove_completion_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceForceRemoveCompletionPostParams creates a new StorageServiceForceRemoveCompletionPostParams object +// with the default values initialized. +func NewStorageServiceForceRemoveCompletionPostParams() *StorageServiceForceRemoveCompletionPostParams { + + return &StorageServiceForceRemoveCompletionPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceForceRemoveCompletionPostParamsWithTimeout creates a new StorageServiceForceRemoveCompletionPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceForceRemoveCompletionPostParamsWithTimeout(timeout time.Duration) *StorageServiceForceRemoveCompletionPostParams { + + return &StorageServiceForceRemoveCompletionPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceForceRemoveCompletionPostParamsWithContext creates a new StorageServiceForceRemoveCompletionPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceForceRemoveCompletionPostParamsWithContext(ctx context.Context) *StorageServiceForceRemoveCompletionPostParams { + + return &StorageServiceForceRemoveCompletionPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceForceRemoveCompletionPostParamsWithHTTPClient creates a new StorageServiceForceRemoveCompletionPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceForceRemoveCompletionPostParamsWithHTTPClient(client *http.Client) *StorageServiceForceRemoveCompletionPostParams { + + return &StorageServiceForceRemoveCompletionPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceForceRemoveCompletionPostParams contains all the parameters to send to the API endpoint +for the storage service force remove completion post operation typically these are written to a http.Request +*/ +type StorageServiceForceRemoveCompletionPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service force remove completion post params +func (o *StorageServiceForceRemoveCompletionPostParams) WithTimeout(timeout time.Duration) *StorageServiceForceRemoveCompletionPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service force remove completion post params +func (o *StorageServiceForceRemoveCompletionPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service force remove completion post params +func (o *StorageServiceForceRemoveCompletionPostParams) WithContext(ctx context.Context) *StorageServiceForceRemoveCompletionPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service force remove completion post params +func (o *StorageServiceForceRemoveCompletionPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service force remove completion post params +func (o *StorageServiceForceRemoveCompletionPostParams) WithHTTPClient(client *http.Client) *StorageServiceForceRemoveCompletionPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service force remove completion post params +func (o *StorageServiceForceRemoveCompletionPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceForceRemoveCompletionPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_remove_completion_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_remove_completion_post_responses.go new file mode 100644 index 00000000000..c544ee2e60b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_remove_completion_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceForceRemoveCompletionPostReader is a Reader for the StorageServiceForceRemoveCompletionPost structure. +type StorageServiceForceRemoveCompletionPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceForceRemoveCompletionPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceForceRemoveCompletionPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceForceRemoveCompletionPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceForceRemoveCompletionPostOK creates a StorageServiceForceRemoveCompletionPostOK with default headers values +func NewStorageServiceForceRemoveCompletionPostOK() *StorageServiceForceRemoveCompletionPostOK { + return &StorageServiceForceRemoveCompletionPostOK{} +} + +/* +StorageServiceForceRemoveCompletionPostOK handles this case with default header values. + +Success +*/ +type StorageServiceForceRemoveCompletionPostOK struct { +} + +func (o *StorageServiceForceRemoveCompletionPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceForceRemoveCompletionPostDefault creates a StorageServiceForceRemoveCompletionPostDefault with default headers values +func NewStorageServiceForceRemoveCompletionPostDefault(code int) *StorageServiceForceRemoveCompletionPostDefault { + return &StorageServiceForceRemoveCompletionPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceForceRemoveCompletionPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceForceRemoveCompletionPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service force remove completion post default response +func (o *StorageServiceForceRemoveCompletionPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceForceRemoveCompletionPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceForceRemoveCompletionPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceForceRemoveCompletionPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_post_parameters.go new file mode 100644 index 00000000000..6cf8db11c75 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceForceTerminatePostParams creates a new StorageServiceForceTerminatePostParams object +// with the default values initialized. +func NewStorageServiceForceTerminatePostParams() *StorageServiceForceTerminatePostParams { + + return &StorageServiceForceTerminatePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceForceTerminatePostParamsWithTimeout creates a new StorageServiceForceTerminatePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceForceTerminatePostParamsWithTimeout(timeout time.Duration) *StorageServiceForceTerminatePostParams { + + return &StorageServiceForceTerminatePostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceForceTerminatePostParamsWithContext creates a new StorageServiceForceTerminatePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceForceTerminatePostParamsWithContext(ctx context.Context) *StorageServiceForceTerminatePostParams { + + return &StorageServiceForceTerminatePostParams{ + + Context: ctx, + } +} + +// NewStorageServiceForceTerminatePostParamsWithHTTPClient creates a new StorageServiceForceTerminatePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceForceTerminatePostParamsWithHTTPClient(client *http.Client) *StorageServiceForceTerminatePostParams { + + return &StorageServiceForceTerminatePostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceForceTerminatePostParams contains all the parameters to send to the API endpoint +for the storage service force terminate post operation typically these are written to a http.Request +*/ +type StorageServiceForceTerminatePostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service force terminate post params +func (o *StorageServiceForceTerminatePostParams) WithTimeout(timeout time.Duration) *StorageServiceForceTerminatePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service force terminate post params +func (o *StorageServiceForceTerminatePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service force terminate post params +func (o *StorageServiceForceTerminatePostParams) WithContext(ctx context.Context) *StorageServiceForceTerminatePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service force terminate post params +func (o *StorageServiceForceTerminatePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service force terminate post params +func (o *StorageServiceForceTerminatePostParams) WithHTTPClient(client *http.Client) *StorageServiceForceTerminatePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service force terminate post params +func (o *StorageServiceForceTerminatePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceForceTerminatePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_post_responses.go new file mode 100644 index 00000000000..69ec350b603 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceForceTerminatePostReader is a Reader for the StorageServiceForceTerminatePost structure. +type StorageServiceForceTerminatePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceForceTerminatePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceForceTerminatePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceForceTerminatePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceForceTerminatePostOK creates a StorageServiceForceTerminatePostOK with default headers values +func NewStorageServiceForceTerminatePostOK() *StorageServiceForceTerminatePostOK { + return &StorageServiceForceTerminatePostOK{} +} + +/* +StorageServiceForceTerminatePostOK handles this case with default header values. + +Success +*/ +type StorageServiceForceTerminatePostOK struct { +} + +func (o *StorageServiceForceTerminatePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceForceTerminatePostDefault creates a StorageServiceForceTerminatePostDefault with default headers values +func NewStorageServiceForceTerminatePostDefault(code int) *StorageServiceForceTerminatePostDefault { + return &StorageServiceForceTerminatePostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceForceTerminatePostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceForceTerminatePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service force terminate post default response +func (o *StorageServiceForceTerminatePostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceForceTerminatePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceForceTerminatePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceForceTerminatePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_repair_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_repair_post_parameters.go new file mode 100644 index 00000000000..73a0fe37bd8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_repair_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceForceTerminateRepairPostParams creates a new StorageServiceForceTerminateRepairPostParams object +// with the default values initialized. +func NewStorageServiceForceTerminateRepairPostParams() *StorageServiceForceTerminateRepairPostParams { + + return &StorageServiceForceTerminateRepairPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceForceTerminateRepairPostParamsWithTimeout creates a new StorageServiceForceTerminateRepairPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceForceTerminateRepairPostParamsWithTimeout(timeout time.Duration) *StorageServiceForceTerminateRepairPostParams { + + return &StorageServiceForceTerminateRepairPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceForceTerminateRepairPostParamsWithContext creates a new StorageServiceForceTerminateRepairPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceForceTerminateRepairPostParamsWithContext(ctx context.Context) *StorageServiceForceTerminateRepairPostParams { + + return &StorageServiceForceTerminateRepairPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceForceTerminateRepairPostParamsWithHTTPClient creates a new StorageServiceForceTerminateRepairPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceForceTerminateRepairPostParamsWithHTTPClient(client *http.Client) *StorageServiceForceTerminateRepairPostParams { + + return &StorageServiceForceTerminateRepairPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceForceTerminateRepairPostParams contains all the parameters to send to the API endpoint +for the storage service force terminate repair post operation typically these are written to a http.Request +*/ +type StorageServiceForceTerminateRepairPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service force terminate repair post params +func (o *StorageServiceForceTerminateRepairPostParams) WithTimeout(timeout time.Duration) *StorageServiceForceTerminateRepairPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service force terminate repair post params +func (o *StorageServiceForceTerminateRepairPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service force terminate repair post params +func (o *StorageServiceForceTerminateRepairPostParams) WithContext(ctx context.Context) *StorageServiceForceTerminateRepairPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service force terminate repair post params +func (o *StorageServiceForceTerminateRepairPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service force terminate repair post params +func (o *StorageServiceForceTerminateRepairPostParams) WithHTTPClient(client *http.Client) *StorageServiceForceTerminateRepairPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service force terminate repair post params +func (o *StorageServiceForceTerminateRepairPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceForceTerminateRepairPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_repair_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_repair_post_responses.go new file mode 100644 index 00000000000..b16ed6a3771 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_force_terminate_repair_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceForceTerminateRepairPostReader is a Reader for the StorageServiceForceTerminateRepairPost structure. +type StorageServiceForceTerminateRepairPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceForceTerminateRepairPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceForceTerminateRepairPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceForceTerminateRepairPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceForceTerminateRepairPostOK creates a StorageServiceForceTerminateRepairPostOK with default headers values +func NewStorageServiceForceTerminateRepairPostOK() *StorageServiceForceTerminateRepairPostOK { + return &StorageServiceForceTerminateRepairPostOK{} +} + +/* +StorageServiceForceTerminateRepairPostOK handles this case with default header values. + +Success +*/ +type StorageServiceForceTerminateRepairPostOK struct { +} + +func (o *StorageServiceForceTerminateRepairPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceForceTerminateRepairPostDefault creates a StorageServiceForceTerminateRepairPostDefault with default headers values +func NewStorageServiceForceTerminateRepairPostDefault(code int) *StorageServiceForceTerminateRepairPostDefault { + return &StorageServiceForceTerminateRepairPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceForceTerminateRepairPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceForceTerminateRepairPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service force terminate repair post default response +func (o *StorageServiceForceTerminateRepairPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceForceTerminateRepairPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceForceTerminateRepairPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceForceTerminateRepairPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_generation_number_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_generation_number_get_parameters.go new file mode 100644 index 00000000000..60248d2e0ad --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_generation_number_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceGenerationNumberGetParams creates a new StorageServiceGenerationNumberGetParams object +// with the default values initialized. +func NewStorageServiceGenerationNumberGetParams() *StorageServiceGenerationNumberGetParams { + + return &StorageServiceGenerationNumberGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceGenerationNumberGetParamsWithTimeout creates a new StorageServiceGenerationNumberGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceGenerationNumberGetParamsWithTimeout(timeout time.Duration) *StorageServiceGenerationNumberGetParams { + + return &StorageServiceGenerationNumberGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceGenerationNumberGetParamsWithContext creates a new StorageServiceGenerationNumberGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceGenerationNumberGetParamsWithContext(ctx context.Context) *StorageServiceGenerationNumberGetParams { + + return &StorageServiceGenerationNumberGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceGenerationNumberGetParamsWithHTTPClient creates a new StorageServiceGenerationNumberGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceGenerationNumberGetParamsWithHTTPClient(client *http.Client) *StorageServiceGenerationNumberGetParams { + + return &StorageServiceGenerationNumberGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceGenerationNumberGetParams contains all the parameters to send to the API endpoint +for the storage service generation number get operation typically these are written to a http.Request +*/ +type StorageServiceGenerationNumberGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service generation number get params +func (o *StorageServiceGenerationNumberGetParams) WithTimeout(timeout time.Duration) *StorageServiceGenerationNumberGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service generation number get params +func (o *StorageServiceGenerationNumberGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service generation number get params +func (o *StorageServiceGenerationNumberGetParams) WithContext(ctx context.Context) *StorageServiceGenerationNumberGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service generation number get params +func (o *StorageServiceGenerationNumberGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service generation number get params +func (o *StorageServiceGenerationNumberGetParams) WithHTTPClient(client *http.Client) *StorageServiceGenerationNumberGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service generation number get params +func (o *StorageServiceGenerationNumberGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceGenerationNumberGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_generation_number_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_generation_number_get_responses.go new file mode 100644 index 00000000000..048da44a179 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_generation_number_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceGenerationNumberGetReader is a Reader for the StorageServiceGenerationNumberGet structure. +type StorageServiceGenerationNumberGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceGenerationNumberGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceGenerationNumberGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceGenerationNumberGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceGenerationNumberGetOK creates a StorageServiceGenerationNumberGetOK with default headers values +func NewStorageServiceGenerationNumberGetOK() *StorageServiceGenerationNumberGetOK { + return &StorageServiceGenerationNumberGetOK{} +} + +/* +StorageServiceGenerationNumberGetOK handles this case with default header values. + +Success +*/ +type StorageServiceGenerationNumberGetOK struct { + Payload int32 +} + +func (o *StorageServiceGenerationNumberGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceGenerationNumberGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceGenerationNumberGetDefault creates a StorageServiceGenerationNumberGetDefault with default headers values +func NewStorageServiceGenerationNumberGetDefault(code int) *StorageServiceGenerationNumberGetDefault { + return &StorageServiceGenerationNumberGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceGenerationNumberGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceGenerationNumberGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service generation number get default response +func (o *StorageServiceGenerationNumberGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceGenerationNumberGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceGenerationNumberGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceGenerationNumberGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_delete_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_delete_parameters.go new file mode 100644 index 00000000000..d78ce43d52b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_delete_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceGossipingDeleteParams creates a new StorageServiceGossipingDeleteParams object +// with the default values initialized. +func NewStorageServiceGossipingDeleteParams() *StorageServiceGossipingDeleteParams { + + return &StorageServiceGossipingDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceGossipingDeleteParamsWithTimeout creates a new StorageServiceGossipingDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceGossipingDeleteParamsWithTimeout(timeout time.Duration) *StorageServiceGossipingDeleteParams { + + return &StorageServiceGossipingDeleteParams{ + + timeout: timeout, + } +} + +// NewStorageServiceGossipingDeleteParamsWithContext creates a new StorageServiceGossipingDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceGossipingDeleteParamsWithContext(ctx context.Context) *StorageServiceGossipingDeleteParams { + + return &StorageServiceGossipingDeleteParams{ + + Context: ctx, + } +} + +// NewStorageServiceGossipingDeleteParamsWithHTTPClient creates a new StorageServiceGossipingDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceGossipingDeleteParamsWithHTTPClient(client *http.Client) *StorageServiceGossipingDeleteParams { + + return &StorageServiceGossipingDeleteParams{ + HTTPClient: client, + } +} + +/* +StorageServiceGossipingDeleteParams contains all the parameters to send to the API endpoint +for the storage service gossiping delete operation typically these are written to a http.Request +*/ +type StorageServiceGossipingDeleteParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service gossiping delete params +func (o *StorageServiceGossipingDeleteParams) WithTimeout(timeout time.Duration) *StorageServiceGossipingDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service gossiping delete params +func (o *StorageServiceGossipingDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service gossiping delete params +func (o *StorageServiceGossipingDeleteParams) WithContext(ctx context.Context) *StorageServiceGossipingDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service gossiping delete params +func (o *StorageServiceGossipingDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service gossiping delete params +func (o *StorageServiceGossipingDeleteParams) WithHTTPClient(client *http.Client) *StorageServiceGossipingDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service gossiping delete params +func (o *StorageServiceGossipingDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceGossipingDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_delete_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_delete_responses.go new file mode 100644 index 00000000000..739ab09329c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_delete_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceGossipingDeleteReader is a Reader for the StorageServiceGossipingDelete structure. +type StorageServiceGossipingDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceGossipingDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceGossipingDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceGossipingDeleteDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceGossipingDeleteOK creates a StorageServiceGossipingDeleteOK with default headers values +func NewStorageServiceGossipingDeleteOK() *StorageServiceGossipingDeleteOK { + return &StorageServiceGossipingDeleteOK{} +} + +/* +StorageServiceGossipingDeleteOK handles this case with default header values. + +Success +*/ +type StorageServiceGossipingDeleteOK struct { +} + +func (o *StorageServiceGossipingDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceGossipingDeleteDefault creates a StorageServiceGossipingDeleteDefault with default headers values +func NewStorageServiceGossipingDeleteDefault(code int) *StorageServiceGossipingDeleteDefault { + return &StorageServiceGossipingDeleteDefault{ + _statusCode: code, + } +} + +/* +StorageServiceGossipingDeleteDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceGossipingDeleteDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service gossiping delete default response +func (o *StorageServiceGossipingDeleteDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceGossipingDeleteDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceGossipingDeleteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceGossipingDeleteDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_get_parameters.go new file mode 100644 index 00000000000..b1c6b4ab3a8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceGossipingGetParams creates a new StorageServiceGossipingGetParams object +// with the default values initialized. +func NewStorageServiceGossipingGetParams() *StorageServiceGossipingGetParams { + + return &StorageServiceGossipingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceGossipingGetParamsWithTimeout creates a new StorageServiceGossipingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceGossipingGetParamsWithTimeout(timeout time.Duration) *StorageServiceGossipingGetParams { + + return &StorageServiceGossipingGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceGossipingGetParamsWithContext creates a new StorageServiceGossipingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceGossipingGetParamsWithContext(ctx context.Context) *StorageServiceGossipingGetParams { + + return &StorageServiceGossipingGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceGossipingGetParamsWithHTTPClient creates a new StorageServiceGossipingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceGossipingGetParamsWithHTTPClient(client *http.Client) *StorageServiceGossipingGetParams { + + return &StorageServiceGossipingGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceGossipingGetParams contains all the parameters to send to the API endpoint +for the storage service gossiping get operation typically these are written to a http.Request +*/ +type StorageServiceGossipingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service gossiping get params +func (o *StorageServiceGossipingGetParams) WithTimeout(timeout time.Duration) *StorageServiceGossipingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service gossiping get params +func (o *StorageServiceGossipingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service gossiping get params +func (o *StorageServiceGossipingGetParams) WithContext(ctx context.Context) *StorageServiceGossipingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service gossiping get params +func (o *StorageServiceGossipingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service gossiping get params +func (o *StorageServiceGossipingGetParams) WithHTTPClient(client *http.Client) *StorageServiceGossipingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service gossiping get params +func (o *StorageServiceGossipingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceGossipingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_get_responses.go new file mode 100644 index 00000000000..a2174275697 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceGossipingGetReader is a Reader for the StorageServiceGossipingGet structure. +type StorageServiceGossipingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceGossipingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceGossipingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceGossipingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceGossipingGetOK creates a StorageServiceGossipingGetOK with default headers values +func NewStorageServiceGossipingGetOK() *StorageServiceGossipingGetOK { + return &StorageServiceGossipingGetOK{} +} + +/* +StorageServiceGossipingGetOK handles this case with default header values. + +Success +*/ +type StorageServiceGossipingGetOK struct { + Payload bool +} + +func (o *StorageServiceGossipingGetOK) GetPayload() bool { + return o.Payload +} + +func (o *StorageServiceGossipingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceGossipingGetDefault creates a StorageServiceGossipingGetDefault with default headers values +func NewStorageServiceGossipingGetDefault(code int) *StorageServiceGossipingGetDefault { + return &StorageServiceGossipingGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceGossipingGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceGossipingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service gossiping get default response +func (o *StorageServiceGossipingGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceGossipingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceGossipingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceGossipingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_post_parameters.go new file mode 100644 index 00000000000..e03284b05ee --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceGossipingPostParams creates a new StorageServiceGossipingPostParams object +// with the default values initialized. +func NewStorageServiceGossipingPostParams() *StorageServiceGossipingPostParams { + + return &StorageServiceGossipingPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceGossipingPostParamsWithTimeout creates a new StorageServiceGossipingPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceGossipingPostParamsWithTimeout(timeout time.Duration) *StorageServiceGossipingPostParams { + + return &StorageServiceGossipingPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceGossipingPostParamsWithContext creates a new StorageServiceGossipingPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceGossipingPostParamsWithContext(ctx context.Context) *StorageServiceGossipingPostParams { + + return &StorageServiceGossipingPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceGossipingPostParamsWithHTTPClient creates a new StorageServiceGossipingPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceGossipingPostParamsWithHTTPClient(client *http.Client) *StorageServiceGossipingPostParams { + + return &StorageServiceGossipingPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceGossipingPostParams contains all the parameters to send to the API endpoint +for the storage service gossiping post operation typically these are written to a http.Request +*/ +type StorageServiceGossipingPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service gossiping post params +func (o *StorageServiceGossipingPostParams) WithTimeout(timeout time.Duration) *StorageServiceGossipingPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service gossiping post params +func (o *StorageServiceGossipingPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service gossiping post params +func (o *StorageServiceGossipingPostParams) WithContext(ctx context.Context) *StorageServiceGossipingPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service gossiping post params +func (o *StorageServiceGossipingPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service gossiping post params +func (o *StorageServiceGossipingPostParams) WithHTTPClient(client *http.Client) *StorageServiceGossipingPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service gossiping post params +func (o *StorageServiceGossipingPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceGossipingPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_post_responses.go new file mode 100644 index 00000000000..83b134204f0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_gossiping_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceGossipingPostReader is a Reader for the StorageServiceGossipingPost structure. +type StorageServiceGossipingPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceGossipingPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceGossipingPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceGossipingPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceGossipingPostOK creates a StorageServiceGossipingPostOK with default headers values +func NewStorageServiceGossipingPostOK() *StorageServiceGossipingPostOK { + return &StorageServiceGossipingPostOK{} +} + +/* +StorageServiceGossipingPostOK handles this case with default header values. + +Success +*/ +type StorageServiceGossipingPostOK struct { +} + +func (o *StorageServiceGossipingPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceGossipingPostDefault creates a StorageServiceGossipingPostDefault with default headers values +func NewStorageServiceGossipingPostDefault(code int) *StorageServiceGossipingPostDefault { + return &StorageServiceGossipingPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceGossipingPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceGossipingPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service gossiping post default response +func (o *StorageServiceGossipingPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceGossipingPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceGossipingPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceGossipingPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hinted_handoff_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hinted_handoff_post_parameters.go new file mode 100644 index 00000000000..ee72e071c33 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hinted_handoff_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceHintedHandoffPostParams creates a new StorageServiceHintedHandoffPostParams object +// with the default values initialized. +func NewStorageServiceHintedHandoffPostParams() *StorageServiceHintedHandoffPostParams { + var () + return &StorageServiceHintedHandoffPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceHintedHandoffPostParamsWithTimeout creates a new StorageServiceHintedHandoffPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceHintedHandoffPostParamsWithTimeout(timeout time.Duration) *StorageServiceHintedHandoffPostParams { + var () + return &StorageServiceHintedHandoffPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceHintedHandoffPostParamsWithContext creates a new StorageServiceHintedHandoffPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceHintedHandoffPostParamsWithContext(ctx context.Context) *StorageServiceHintedHandoffPostParams { + var () + return &StorageServiceHintedHandoffPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceHintedHandoffPostParamsWithHTTPClient creates a new StorageServiceHintedHandoffPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceHintedHandoffPostParamsWithHTTPClient(client *http.Client) *StorageServiceHintedHandoffPostParams { + var () + return &StorageServiceHintedHandoffPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceHintedHandoffPostParams contains all the parameters to send to the API endpoint +for the storage service hinted handoff post operation typically these are written to a http.Request +*/ +type StorageServiceHintedHandoffPostParams struct { + + /*Throttle + throttle in kb + + */ + Throttle int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service hinted handoff post params +func (o *StorageServiceHintedHandoffPostParams) WithTimeout(timeout time.Duration) *StorageServiceHintedHandoffPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service hinted handoff post params +func (o *StorageServiceHintedHandoffPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service hinted handoff post params +func (o *StorageServiceHintedHandoffPostParams) WithContext(ctx context.Context) *StorageServiceHintedHandoffPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service hinted handoff post params +func (o *StorageServiceHintedHandoffPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service hinted handoff post params +func (o *StorageServiceHintedHandoffPostParams) WithHTTPClient(client *http.Client) *StorageServiceHintedHandoffPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service hinted handoff post params +func (o *StorageServiceHintedHandoffPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithThrottle adds the throttle to the storage service hinted handoff post params +func (o *StorageServiceHintedHandoffPostParams) WithThrottle(throttle int32) *StorageServiceHintedHandoffPostParams { + o.SetThrottle(throttle) + return o +} + +// SetThrottle adds the throttle to the storage service hinted handoff post params +func (o *StorageServiceHintedHandoffPostParams) SetThrottle(throttle int32) { + o.Throttle = throttle +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceHintedHandoffPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param throttle + qrThrottle := o.Throttle + qThrottle := swag.FormatInt32(qrThrottle) + if qThrottle != "" { + if err := r.SetQueryParam("throttle", qThrottle); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hinted_handoff_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hinted_handoff_post_responses.go new file mode 100644 index 00000000000..8da55314bf4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hinted_handoff_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceHintedHandoffPostReader is a Reader for the StorageServiceHintedHandoffPost structure. +type StorageServiceHintedHandoffPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceHintedHandoffPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceHintedHandoffPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceHintedHandoffPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceHintedHandoffPostOK creates a StorageServiceHintedHandoffPostOK with default headers values +func NewStorageServiceHintedHandoffPostOK() *StorageServiceHintedHandoffPostOK { + return &StorageServiceHintedHandoffPostOK{} +} + +/* +StorageServiceHintedHandoffPostOK handles this case with default header values. + +Success +*/ +type StorageServiceHintedHandoffPostOK struct { +} + +func (o *StorageServiceHintedHandoffPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceHintedHandoffPostDefault creates a StorageServiceHintedHandoffPostDefault with default headers values +func NewStorageServiceHintedHandoffPostDefault(code int) *StorageServiceHintedHandoffPostDefault { + return &StorageServiceHintedHandoffPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceHintedHandoffPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceHintedHandoffPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service hinted handoff post default response +func (o *StorageServiceHintedHandoffPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceHintedHandoffPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceHintedHandoffPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceHintedHandoffPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_host_id_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_host_id_get_parameters.go new file mode 100644 index 00000000000..58c81ef765a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_host_id_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceHostIDGetParams creates a new StorageServiceHostIDGetParams object +// with the default values initialized. +func NewStorageServiceHostIDGetParams() *StorageServiceHostIDGetParams { + + return &StorageServiceHostIDGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceHostIDGetParamsWithTimeout creates a new StorageServiceHostIDGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceHostIDGetParamsWithTimeout(timeout time.Duration) *StorageServiceHostIDGetParams { + + return &StorageServiceHostIDGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceHostIDGetParamsWithContext creates a new StorageServiceHostIDGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceHostIDGetParamsWithContext(ctx context.Context) *StorageServiceHostIDGetParams { + + return &StorageServiceHostIDGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceHostIDGetParamsWithHTTPClient creates a new StorageServiceHostIDGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceHostIDGetParamsWithHTTPClient(client *http.Client) *StorageServiceHostIDGetParams { + + return &StorageServiceHostIDGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceHostIDGetParams contains all the parameters to send to the API endpoint +for the storage service host Id get operation typically these are written to a http.Request +*/ +type StorageServiceHostIDGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service host Id get params +func (o *StorageServiceHostIDGetParams) WithTimeout(timeout time.Duration) *StorageServiceHostIDGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service host Id get params +func (o *StorageServiceHostIDGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service host Id get params +func (o *StorageServiceHostIDGetParams) WithContext(ctx context.Context) *StorageServiceHostIDGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service host Id get params +func (o *StorageServiceHostIDGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service host Id get params +func (o *StorageServiceHostIDGetParams) WithHTTPClient(client *http.Client) *StorageServiceHostIDGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service host Id get params +func (o *StorageServiceHostIDGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceHostIDGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_host_id_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_host_id_get_responses.go new file mode 100644 index 00000000000..a5daece41f9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_host_id_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceHostIDGetReader is a Reader for the StorageServiceHostIDGet structure. +type StorageServiceHostIDGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceHostIDGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceHostIDGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceHostIDGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceHostIDGetOK creates a StorageServiceHostIDGetOK with default headers values +func NewStorageServiceHostIDGetOK() *StorageServiceHostIDGetOK { + return &StorageServiceHostIDGetOK{} +} + +/* +StorageServiceHostIDGetOK handles this case with default header values. + +Success +*/ +type StorageServiceHostIDGetOK struct { + Payload []*models.Mapper +} + +func (o *StorageServiceHostIDGetOK) GetPayload() []*models.Mapper { + return o.Payload +} + +func (o *StorageServiceHostIDGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceHostIDGetDefault creates a StorageServiceHostIDGetDefault with default headers values +func NewStorageServiceHostIDGetDefault(code int) *StorageServiceHostIDGetDefault { + return &StorageServiceHostIDGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceHostIDGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceHostIDGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service host Id get default response +func (o *StorageServiceHostIDGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceHostIDGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceHostIDGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceHostIDGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hostid_local_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hostid_local_get_parameters.go new file mode 100644 index 00000000000..e607fe51eb3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hostid_local_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceHostidLocalGetParams creates a new StorageServiceHostidLocalGetParams object +// with the default values initialized. +func NewStorageServiceHostidLocalGetParams() *StorageServiceHostidLocalGetParams { + + return &StorageServiceHostidLocalGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceHostidLocalGetParamsWithTimeout creates a new StorageServiceHostidLocalGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceHostidLocalGetParamsWithTimeout(timeout time.Duration) *StorageServiceHostidLocalGetParams { + + return &StorageServiceHostidLocalGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceHostidLocalGetParamsWithContext creates a new StorageServiceHostidLocalGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceHostidLocalGetParamsWithContext(ctx context.Context) *StorageServiceHostidLocalGetParams { + + return &StorageServiceHostidLocalGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceHostidLocalGetParamsWithHTTPClient creates a new StorageServiceHostidLocalGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceHostidLocalGetParamsWithHTTPClient(client *http.Client) *StorageServiceHostidLocalGetParams { + + return &StorageServiceHostidLocalGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceHostidLocalGetParams contains all the parameters to send to the API endpoint +for the storage service hostid local get operation typically these are written to a http.Request +*/ +type StorageServiceHostidLocalGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service hostid local get params +func (o *StorageServiceHostidLocalGetParams) WithTimeout(timeout time.Duration) *StorageServiceHostidLocalGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service hostid local get params +func (o *StorageServiceHostidLocalGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service hostid local get params +func (o *StorageServiceHostidLocalGetParams) WithContext(ctx context.Context) *StorageServiceHostidLocalGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service hostid local get params +func (o *StorageServiceHostidLocalGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service hostid local get params +func (o *StorageServiceHostidLocalGetParams) WithHTTPClient(client *http.Client) *StorageServiceHostidLocalGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service hostid local get params +func (o *StorageServiceHostidLocalGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceHostidLocalGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hostid_local_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hostid_local_get_responses.go new file mode 100644 index 00000000000..81ed25e2b93 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_hostid_local_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceHostidLocalGetReader is a Reader for the StorageServiceHostidLocalGet structure. +type StorageServiceHostidLocalGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceHostidLocalGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceHostidLocalGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceHostidLocalGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceHostidLocalGetOK creates a StorageServiceHostidLocalGetOK with default headers values +func NewStorageServiceHostidLocalGetOK() *StorageServiceHostidLocalGetOK { + return &StorageServiceHostidLocalGetOK{} +} + +/* +StorageServiceHostidLocalGetOK handles this case with default header values. + +Success +*/ +type StorageServiceHostidLocalGetOK struct { + Payload string +} + +func (o *StorageServiceHostidLocalGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceHostidLocalGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceHostidLocalGetDefault creates a StorageServiceHostidLocalGetDefault with default headers values +func NewStorageServiceHostidLocalGetDefault(code int) *StorageServiceHostidLocalGetDefault { + return &StorageServiceHostidLocalGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceHostidLocalGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceHostidLocalGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service hostid local get default response +func (o *StorageServiceHostidLocalGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceHostidLocalGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceHostidLocalGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceHostidLocalGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_get_parameters.go new file mode 100644 index 00000000000..6fd1f51873c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceIncrementalBackupsGetParams creates a new StorageServiceIncrementalBackupsGetParams object +// with the default values initialized. +func NewStorageServiceIncrementalBackupsGetParams() *StorageServiceIncrementalBackupsGetParams { + + return &StorageServiceIncrementalBackupsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceIncrementalBackupsGetParamsWithTimeout creates a new StorageServiceIncrementalBackupsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceIncrementalBackupsGetParamsWithTimeout(timeout time.Duration) *StorageServiceIncrementalBackupsGetParams { + + return &StorageServiceIncrementalBackupsGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceIncrementalBackupsGetParamsWithContext creates a new StorageServiceIncrementalBackupsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceIncrementalBackupsGetParamsWithContext(ctx context.Context) *StorageServiceIncrementalBackupsGetParams { + + return &StorageServiceIncrementalBackupsGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceIncrementalBackupsGetParamsWithHTTPClient creates a new StorageServiceIncrementalBackupsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceIncrementalBackupsGetParamsWithHTTPClient(client *http.Client) *StorageServiceIncrementalBackupsGetParams { + + return &StorageServiceIncrementalBackupsGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceIncrementalBackupsGetParams contains all the parameters to send to the API endpoint +for the storage service incremental backups get operation typically these are written to a http.Request +*/ +type StorageServiceIncrementalBackupsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service incremental backups get params +func (o *StorageServiceIncrementalBackupsGetParams) WithTimeout(timeout time.Duration) *StorageServiceIncrementalBackupsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service incremental backups get params +func (o *StorageServiceIncrementalBackupsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service incremental backups get params +func (o *StorageServiceIncrementalBackupsGetParams) WithContext(ctx context.Context) *StorageServiceIncrementalBackupsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service incremental backups get params +func (o *StorageServiceIncrementalBackupsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service incremental backups get params +func (o *StorageServiceIncrementalBackupsGetParams) WithHTTPClient(client *http.Client) *StorageServiceIncrementalBackupsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service incremental backups get params +func (o *StorageServiceIncrementalBackupsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceIncrementalBackupsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_get_responses.go new file mode 100644 index 00000000000..e93d95e78c8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceIncrementalBackupsGetReader is a Reader for the StorageServiceIncrementalBackupsGet structure. +type StorageServiceIncrementalBackupsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceIncrementalBackupsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceIncrementalBackupsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceIncrementalBackupsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceIncrementalBackupsGetOK creates a StorageServiceIncrementalBackupsGetOK with default headers values +func NewStorageServiceIncrementalBackupsGetOK() *StorageServiceIncrementalBackupsGetOK { + return &StorageServiceIncrementalBackupsGetOK{} +} + +/* +StorageServiceIncrementalBackupsGetOK handles this case with default header values. + +Success +*/ +type StorageServiceIncrementalBackupsGetOK struct { + Payload bool +} + +func (o *StorageServiceIncrementalBackupsGetOK) GetPayload() bool { + return o.Payload +} + +func (o *StorageServiceIncrementalBackupsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceIncrementalBackupsGetDefault creates a StorageServiceIncrementalBackupsGetDefault with default headers values +func NewStorageServiceIncrementalBackupsGetDefault(code int) *StorageServiceIncrementalBackupsGetDefault { + return &StorageServiceIncrementalBackupsGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceIncrementalBackupsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceIncrementalBackupsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service incremental backups get default response +func (o *StorageServiceIncrementalBackupsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceIncrementalBackupsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceIncrementalBackupsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceIncrementalBackupsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_post_parameters.go new file mode 100644 index 00000000000..cba4087b108 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceIncrementalBackupsPostParams creates a new StorageServiceIncrementalBackupsPostParams object +// with the default values initialized. +func NewStorageServiceIncrementalBackupsPostParams() *StorageServiceIncrementalBackupsPostParams { + var () + return &StorageServiceIncrementalBackupsPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceIncrementalBackupsPostParamsWithTimeout creates a new StorageServiceIncrementalBackupsPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceIncrementalBackupsPostParamsWithTimeout(timeout time.Duration) *StorageServiceIncrementalBackupsPostParams { + var () + return &StorageServiceIncrementalBackupsPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceIncrementalBackupsPostParamsWithContext creates a new StorageServiceIncrementalBackupsPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceIncrementalBackupsPostParamsWithContext(ctx context.Context) *StorageServiceIncrementalBackupsPostParams { + var () + return &StorageServiceIncrementalBackupsPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceIncrementalBackupsPostParamsWithHTTPClient creates a new StorageServiceIncrementalBackupsPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceIncrementalBackupsPostParamsWithHTTPClient(client *http.Client) *StorageServiceIncrementalBackupsPostParams { + var () + return &StorageServiceIncrementalBackupsPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceIncrementalBackupsPostParams contains all the parameters to send to the API endpoint +for the storage service incremental backups post operation typically these are written to a http.Request +*/ +type StorageServiceIncrementalBackupsPostParams struct { + + /*Value + Set to true for incremental backup enabled + + */ + Value bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service incremental backups post params +func (o *StorageServiceIncrementalBackupsPostParams) WithTimeout(timeout time.Duration) *StorageServiceIncrementalBackupsPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service incremental backups post params +func (o *StorageServiceIncrementalBackupsPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service incremental backups post params +func (o *StorageServiceIncrementalBackupsPostParams) WithContext(ctx context.Context) *StorageServiceIncrementalBackupsPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service incremental backups post params +func (o *StorageServiceIncrementalBackupsPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service incremental backups post params +func (o *StorageServiceIncrementalBackupsPostParams) WithHTTPClient(client *http.Client) *StorageServiceIncrementalBackupsPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service incremental backups post params +func (o *StorageServiceIncrementalBackupsPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithValue adds the value to the storage service incremental backups post params +func (o *StorageServiceIncrementalBackupsPostParams) WithValue(value bool) *StorageServiceIncrementalBackupsPostParams { + o.SetValue(value) + return o +} + +// SetValue adds the value to the storage service incremental backups post params +func (o *StorageServiceIncrementalBackupsPostParams) SetValue(value bool) { + o.Value = value +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceIncrementalBackupsPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param value + qrValue := o.Value + qValue := swag.FormatBool(qrValue) + if qValue != "" { + if err := r.SetQueryParam("value", qValue); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_post_responses.go new file mode 100644 index 00000000000..5524d7efd51 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_incremental_backups_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceIncrementalBackupsPostReader is a Reader for the StorageServiceIncrementalBackupsPost structure. +type StorageServiceIncrementalBackupsPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceIncrementalBackupsPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceIncrementalBackupsPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceIncrementalBackupsPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceIncrementalBackupsPostOK creates a StorageServiceIncrementalBackupsPostOK with default headers values +func NewStorageServiceIncrementalBackupsPostOK() *StorageServiceIncrementalBackupsPostOK { + return &StorageServiceIncrementalBackupsPostOK{} +} + +/* +StorageServiceIncrementalBackupsPostOK handles this case with default header values. + +Success +*/ +type StorageServiceIncrementalBackupsPostOK struct { +} + +func (o *StorageServiceIncrementalBackupsPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceIncrementalBackupsPostDefault creates a StorageServiceIncrementalBackupsPostDefault with default headers values +func NewStorageServiceIncrementalBackupsPostDefault(code int) *StorageServiceIncrementalBackupsPostDefault { + return &StorageServiceIncrementalBackupsPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceIncrementalBackupsPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceIncrementalBackupsPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service incremental backups post default response +func (o *StorageServiceIncrementalBackupsPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceIncrementalBackupsPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceIncrementalBackupsPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceIncrementalBackupsPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_initialized_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_initialized_get_parameters.go new file mode 100644 index 00000000000..3c26148ea8e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_initialized_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceIsInitializedGetParams creates a new StorageServiceIsInitializedGetParams object +// with the default values initialized. +func NewStorageServiceIsInitializedGetParams() *StorageServiceIsInitializedGetParams { + + return &StorageServiceIsInitializedGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceIsInitializedGetParamsWithTimeout creates a new StorageServiceIsInitializedGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceIsInitializedGetParamsWithTimeout(timeout time.Duration) *StorageServiceIsInitializedGetParams { + + return &StorageServiceIsInitializedGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceIsInitializedGetParamsWithContext creates a new StorageServiceIsInitializedGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceIsInitializedGetParamsWithContext(ctx context.Context) *StorageServiceIsInitializedGetParams { + + return &StorageServiceIsInitializedGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceIsInitializedGetParamsWithHTTPClient creates a new StorageServiceIsInitializedGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceIsInitializedGetParamsWithHTTPClient(client *http.Client) *StorageServiceIsInitializedGetParams { + + return &StorageServiceIsInitializedGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceIsInitializedGetParams contains all the parameters to send to the API endpoint +for the storage service is initialized get operation typically these are written to a http.Request +*/ +type StorageServiceIsInitializedGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service is initialized get params +func (o *StorageServiceIsInitializedGetParams) WithTimeout(timeout time.Duration) *StorageServiceIsInitializedGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service is initialized get params +func (o *StorageServiceIsInitializedGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service is initialized get params +func (o *StorageServiceIsInitializedGetParams) WithContext(ctx context.Context) *StorageServiceIsInitializedGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service is initialized get params +func (o *StorageServiceIsInitializedGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service is initialized get params +func (o *StorageServiceIsInitializedGetParams) WithHTTPClient(client *http.Client) *StorageServiceIsInitializedGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service is initialized get params +func (o *StorageServiceIsInitializedGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceIsInitializedGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_initialized_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_initialized_get_responses.go new file mode 100644 index 00000000000..8fa4c15cc50 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_initialized_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceIsInitializedGetReader is a Reader for the StorageServiceIsInitializedGet structure. +type StorageServiceIsInitializedGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceIsInitializedGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceIsInitializedGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceIsInitializedGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceIsInitializedGetOK creates a StorageServiceIsInitializedGetOK with default headers values +func NewStorageServiceIsInitializedGetOK() *StorageServiceIsInitializedGetOK { + return &StorageServiceIsInitializedGetOK{} +} + +/* +StorageServiceIsInitializedGetOK handles this case with default header values. + +Success +*/ +type StorageServiceIsInitializedGetOK struct { + Payload bool +} + +func (o *StorageServiceIsInitializedGetOK) GetPayload() bool { + return o.Payload +} + +func (o *StorageServiceIsInitializedGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceIsInitializedGetDefault creates a StorageServiceIsInitializedGetDefault with default headers values +func NewStorageServiceIsInitializedGetDefault(code int) *StorageServiceIsInitializedGetDefault { + return &StorageServiceIsInitializedGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceIsInitializedGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceIsInitializedGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service is initialized get default response +func (o *StorageServiceIsInitializedGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceIsInitializedGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceIsInitializedGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceIsInitializedGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_starting_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_starting_get_parameters.go new file mode 100644 index 00000000000..8f19e3456e9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_starting_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceIsStartingGetParams creates a new StorageServiceIsStartingGetParams object +// with the default values initialized. +func NewStorageServiceIsStartingGetParams() *StorageServiceIsStartingGetParams { + + return &StorageServiceIsStartingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceIsStartingGetParamsWithTimeout creates a new StorageServiceIsStartingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceIsStartingGetParamsWithTimeout(timeout time.Duration) *StorageServiceIsStartingGetParams { + + return &StorageServiceIsStartingGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceIsStartingGetParamsWithContext creates a new StorageServiceIsStartingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceIsStartingGetParamsWithContext(ctx context.Context) *StorageServiceIsStartingGetParams { + + return &StorageServiceIsStartingGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceIsStartingGetParamsWithHTTPClient creates a new StorageServiceIsStartingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceIsStartingGetParamsWithHTTPClient(client *http.Client) *StorageServiceIsStartingGetParams { + + return &StorageServiceIsStartingGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceIsStartingGetParams contains all the parameters to send to the API endpoint +for the storage service is starting get operation typically these are written to a http.Request +*/ +type StorageServiceIsStartingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service is starting get params +func (o *StorageServiceIsStartingGetParams) WithTimeout(timeout time.Duration) *StorageServiceIsStartingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service is starting get params +func (o *StorageServiceIsStartingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service is starting get params +func (o *StorageServiceIsStartingGetParams) WithContext(ctx context.Context) *StorageServiceIsStartingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service is starting get params +func (o *StorageServiceIsStartingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service is starting get params +func (o *StorageServiceIsStartingGetParams) WithHTTPClient(client *http.Client) *StorageServiceIsStartingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service is starting get params +func (o *StorageServiceIsStartingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceIsStartingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_starting_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_starting_get_responses.go new file mode 100644 index 00000000000..cceabf028f0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_is_starting_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceIsStartingGetReader is a Reader for the StorageServiceIsStartingGet structure. +type StorageServiceIsStartingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceIsStartingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceIsStartingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceIsStartingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceIsStartingGetOK creates a StorageServiceIsStartingGetOK with default headers values +func NewStorageServiceIsStartingGetOK() *StorageServiceIsStartingGetOK { + return &StorageServiceIsStartingGetOK{} +} + +/* +StorageServiceIsStartingGetOK handles this case with default header values. + +Success +*/ +type StorageServiceIsStartingGetOK struct { + Payload bool +} + +func (o *StorageServiceIsStartingGetOK) GetPayload() bool { + return o.Payload +} + +func (o *StorageServiceIsStartingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceIsStartingGetDefault creates a StorageServiceIsStartingGetDefault with default headers values +func NewStorageServiceIsStartingGetDefault(code int) *StorageServiceIsStartingGetDefault { + return &StorageServiceIsStartingGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceIsStartingGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceIsStartingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service is starting get default response +func (o *StorageServiceIsStartingGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceIsStartingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceIsStartingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceIsStartingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_get_parameters.go new file mode 100644 index 00000000000..857c38eb388 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceJoinRingGetParams creates a new StorageServiceJoinRingGetParams object +// with the default values initialized. +func NewStorageServiceJoinRingGetParams() *StorageServiceJoinRingGetParams { + + return &StorageServiceJoinRingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceJoinRingGetParamsWithTimeout creates a new StorageServiceJoinRingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceJoinRingGetParamsWithTimeout(timeout time.Duration) *StorageServiceJoinRingGetParams { + + return &StorageServiceJoinRingGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceJoinRingGetParamsWithContext creates a new StorageServiceJoinRingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceJoinRingGetParamsWithContext(ctx context.Context) *StorageServiceJoinRingGetParams { + + return &StorageServiceJoinRingGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceJoinRingGetParamsWithHTTPClient creates a new StorageServiceJoinRingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceJoinRingGetParamsWithHTTPClient(client *http.Client) *StorageServiceJoinRingGetParams { + + return &StorageServiceJoinRingGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceJoinRingGetParams contains all the parameters to send to the API endpoint +for the storage service join ring get operation typically these are written to a http.Request +*/ +type StorageServiceJoinRingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service join ring get params +func (o *StorageServiceJoinRingGetParams) WithTimeout(timeout time.Duration) *StorageServiceJoinRingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service join ring get params +func (o *StorageServiceJoinRingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service join ring get params +func (o *StorageServiceJoinRingGetParams) WithContext(ctx context.Context) *StorageServiceJoinRingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service join ring get params +func (o *StorageServiceJoinRingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service join ring get params +func (o *StorageServiceJoinRingGetParams) WithHTTPClient(client *http.Client) *StorageServiceJoinRingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service join ring get params +func (o *StorageServiceJoinRingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceJoinRingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_get_responses.go new file mode 100644 index 00000000000..9dd853238b4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceJoinRingGetReader is a Reader for the StorageServiceJoinRingGet structure. +type StorageServiceJoinRingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceJoinRingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceJoinRingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceJoinRingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceJoinRingGetOK creates a StorageServiceJoinRingGetOK with default headers values +func NewStorageServiceJoinRingGetOK() *StorageServiceJoinRingGetOK { + return &StorageServiceJoinRingGetOK{} +} + +/* +StorageServiceJoinRingGetOK handles this case with default header values. + +Success +*/ +type StorageServiceJoinRingGetOK struct { + Payload bool +} + +func (o *StorageServiceJoinRingGetOK) GetPayload() bool { + return o.Payload +} + +func (o *StorageServiceJoinRingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceJoinRingGetDefault creates a StorageServiceJoinRingGetDefault with default headers values +func NewStorageServiceJoinRingGetDefault(code int) *StorageServiceJoinRingGetDefault { + return &StorageServiceJoinRingGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceJoinRingGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceJoinRingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service join ring get default response +func (o *StorageServiceJoinRingGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceJoinRingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceJoinRingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceJoinRingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_post_parameters.go new file mode 100644 index 00000000000..2b6b20c32a9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceJoinRingPostParams creates a new StorageServiceJoinRingPostParams object +// with the default values initialized. +func NewStorageServiceJoinRingPostParams() *StorageServiceJoinRingPostParams { + + return &StorageServiceJoinRingPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceJoinRingPostParamsWithTimeout creates a new StorageServiceJoinRingPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceJoinRingPostParamsWithTimeout(timeout time.Duration) *StorageServiceJoinRingPostParams { + + return &StorageServiceJoinRingPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceJoinRingPostParamsWithContext creates a new StorageServiceJoinRingPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceJoinRingPostParamsWithContext(ctx context.Context) *StorageServiceJoinRingPostParams { + + return &StorageServiceJoinRingPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceJoinRingPostParamsWithHTTPClient creates a new StorageServiceJoinRingPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceJoinRingPostParamsWithHTTPClient(client *http.Client) *StorageServiceJoinRingPostParams { + + return &StorageServiceJoinRingPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceJoinRingPostParams contains all the parameters to send to the API endpoint +for the storage service join ring post operation typically these are written to a http.Request +*/ +type StorageServiceJoinRingPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service join ring post params +func (o *StorageServiceJoinRingPostParams) WithTimeout(timeout time.Duration) *StorageServiceJoinRingPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service join ring post params +func (o *StorageServiceJoinRingPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service join ring post params +func (o *StorageServiceJoinRingPostParams) WithContext(ctx context.Context) *StorageServiceJoinRingPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service join ring post params +func (o *StorageServiceJoinRingPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service join ring post params +func (o *StorageServiceJoinRingPostParams) WithHTTPClient(client *http.Client) *StorageServiceJoinRingPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service join ring post params +func (o *StorageServiceJoinRingPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceJoinRingPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_post_responses.go new file mode 100644 index 00000000000..82bd31781a4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_join_ring_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceJoinRingPostReader is a Reader for the StorageServiceJoinRingPost structure. +type StorageServiceJoinRingPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceJoinRingPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceJoinRingPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceJoinRingPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceJoinRingPostOK creates a StorageServiceJoinRingPostOK with default headers values +func NewStorageServiceJoinRingPostOK() *StorageServiceJoinRingPostOK { + return &StorageServiceJoinRingPostOK{} +} + +/* +StorageServiceJoinRingPostOK handles this case with default header values. + +Success +*/ +type StorageServiceJoinRingPostOK struct { +} + +func (o *StorageServiceJoinRingPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceJoinRingPostDefault creates a StorageServiceJoinRingPostDefault with default headers values +func NewStorageServiceJoinRingPostDefault(code int) *StorageServiceJoinRingPostDefault { + return &StorageServiceJoinRingPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceJoinRingPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceJoinRingPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service join ring post default response +func (o *StorageServiceJoinRingPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceJoinRingPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceJoinRingPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceJoinRingPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_cleanup_by_keyspace_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_cleanup_by_keyspace_post_parameters.go new file mode 100644 index 00000000000..158b0e1a3c8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_cleanup_by_keyspace_post_parameters.go @@ -0,0 +1,168 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceKeyspaceCleanupByKeyspacePostParams creates a new StorageServiceKeyspaceCleanupByKeyspacePostParams object +// with the default values initialized. +func NewStorageServiceKeyspaceCleanupByKeyspacePostParams() *StorageServiceKeyspaceCleanupByKeyspacePostParams { + var () + return &StorageServiceKeyspaceCleanupByKeyspacePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceKeyspaceCleanupByKeyspacePostParamsWithTimeout creates a new StorageServiceKeyspaceCleanupByKeyspacePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceKeyspaceCleanupByKeyspacePostParamsWithTimeout(timeout time.Duration) *StorageServiceKeyspaceCleanupByKeyspacePostParams { + var () + return &StorageServiceKeyspaceCleanupByKeyspacePostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceKeyspaceCleanupByKeyspacePostParamsWithContext creates a new StorageServiceKeyspaceCleanupByKeyspacePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceKeyspaceCleanupByKeyspacePostParamsWithContext(ctx context.Context) *StorageServiceKeyspaceCleanupByKeyspacePostParams { + var () + return &StorageServiceKeyspaceCleanupByKeyspacePostParams{ + + Context: ctx, + } +} + +// NewStorageServiceKeyspaceCleanupByKeyspacePostParamsWithHTTPClient creates a new StorageServiceKeyspaceCleanupByKeyspacePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceKeyspaceCleanupByKeyspacePostParamsWithHTTPClient(client *http.Client) *StorageServiceKeyspaceCleanupByKeyspacePostParams { + var () + return &StorageServiceKeyspaceCleanupByKeyspacePostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceKeyspaceCleanupByKeyspacePostParams contains all the parameters to send to the API endpoint +for the storage service keyspace cleanup by keyspace post operation typically these are written to a http.Request +*/ +type StorageServiceKeyspaceCleanupByKeyspacePostParams struct { + + /*Cf + Comma seperated column family names + + */ + Cf *string + /*Keyspace + The keyspace to query about + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service keyspace cleanup by keyspace post params +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) WithTimeout(timeout time.Duration) *StorageServiceKeyspaceCleanupByKeyspacePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service keyspace cleanup by keyspace post params +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service keyspace cleanup by keyspace post params +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) WithContext(ctx context.Context) *StorageServiceKeyspaceCleanupByKeyspacePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service keyspace cleanup by keyspace post params +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service keyspace cleanup by keyspace post params +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) WithHTTPClient(client *http.Client) *StorageServiceKeyspaceCleanupByKeyspacePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service keyspace cleanup by keyspace post params +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service keyspace cleanup by keyspace post params +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) WithCf(cf *string) *StorageServiceKeyspaceCleanupByKeyspacePostParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service keyspace cleanup by keyspace post params +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) SetCf(cf *string) { + o.Cf = cf +} + +// WithKeyspace adds the keyspace to the storage service keyspace cleanup by keyspace post params +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) WithKeyspace(keyspace string) *StorageServiceKeyspaceCleanupByKeyspacePostParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service keyspace cleanup by keyspace post params +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceKeyspaceCleanupByKeyspacePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Cf != nil { + + // query param cf + var qrCf string + if o.Cf != nil { + qrCf = *o.Cf + } + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_cleanup_by_keyspace_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_cleanup_by_keyspace_post_responses.go new file mode 100644 index 00000000000..fedf0c771c2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_cleanup_by_keyspace_post_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceKeyspaceCleanupByKeyspacePostReader is a Reader for the StorageServiceKeyspaceCleanupByKeyspacePost structure. +type StorageServiceKeyspaceCleanupByKeyspacePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceKeyspaceCleanupByKeyspacePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceKeyspaceCleanupByKeyspacePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceKeyspaceCleanupByKeyspacePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceKeyspaceCleanupByKeyspacePostOK creates a StorageServiceKeyspaceCleanupByKeyspacePostOK with default headers values +func NewStorageServiceKeyspaceCleanupByKeyspacePostOK() *StorageServiceKeyspaceCleanupByKeyspacePostOK { + return &StorageServiceKeyspaceCleanupByKeyspacePostOK{} +} + +/* +StorageServiceKeyspaceCleanupByKeyspacePostOK handles this case with default header values. + +Success +*/ +type StorageServiceKeyspaceCleanupByKeyspacePostOK struct { + Payload int32 +} + +func (o *StorageServiceKeyspaceCleanupByKeyspacePostOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceKeyspaceCleanupByKeyspacePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceKeyspaceCleanupByKeyspacePostDefault creates a StorageServiceKeyspaceCleanupByKeyspacePostDefault with default headers values +func NewStorageServiceKeyspaceCleanupByKeyspacePostDefault(code int) *StorageServiceKeyspaceCleanupByKeyspacePostDefault { + return &StorageServiceKeyspaceCleanupByKeyspacePostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceKeyspaceCleanupByKeyspacePostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceKeyspaceCleanupByKeyspacePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service keyspace cleanup by keyspace post default response +func (o *StorageServiceKeyspaceCleanupByKeyspacePostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceKeyspaceCleanupByKeyspacePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceKeyspaceCleanupByKeyspacePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceKeyspaceCleanupByKeyspacePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_compaction_by_keyspace_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_compaction_by_keyspace_post_parameters.go new file mode 100644 index 00000000000..7ebb83df5d4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_compaction_by_keyspace_post_parameters.go @@ -0,0 +1,168 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceKeyspaceCompactionByKeyspacePostParams creates a new StorageServiceKeyspaceCompactionByKeyspacePostParams object +// with the default values initialized. +func NewStorageServiceKeyspaceCompactionByKeyspacePostParams() *StorageServiceKeyspaceCompactionByKeyspacePostParams { + var () + return &StorageServiceKeyspaceCompactionByKeyspacePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceKeyspaceCompactionByKeyspacePostParamsWithTimeout creates a new StorageServiceKeyspaceCompactionByKeyspacePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceKeyspaceCompactionByKeyspacePostParamsWithTimeout(timeout time.Duration) *StorageServiceKeyspaceCompactionByKeyspacePostParams { + var () + return &StorageServiceKeyspaceCompactionByKeyspacePostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceKeyspaceCompactionByKeyspacePostParamsWithContext creates a new StorageServiceKeyspaceCompactionByKeyspacePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceKeyspaceCompactionByKeyspacePostParamsWithContext(ctx context.Context) *StorageServiceKeyspaceCompactionByKeyspacePostParams { + var () + return &StorageServiceKeyspaceCompactionByKeyspacePostParams{ + + Context: ctx, + } +} + +// NewStorageServiceKeyspaceCompactionByKeyspacePostParamsWithHTTPClient creates a new StorageServiceKeyspaceCompactionByKeyspacePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceKeyspaceCompactionByKeyspacePostParamsWithHTTPClient(client *http.Client) *StorageServiceKeyspaceCompactionByKeyspacePostParams { + var () + return &StorageServiceKeyspaceCompactionByKeyspacePostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceKeyspaceCompactionByKeyspacePostParams contains all the parameters to send to the API endpoint +for the storage service keyspace compaction by keyspace post operation typically these are written to a http.Request +*/ +type StorageServiceKeyspaceCompactionByKeyspacePostParams struct { + + /*Cf + Comma seperated column family names + + */ + Cf *string + /*Keyspace + The keyspace to query about + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service keyspace compaction by keyspace post params +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) WithTimeout(timeout time.Duration) *StorageServiceKeyspaceCompactionByKeyspacePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service keyspace compaction by keyspace post params +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service keyspace compaction by keyspace post params +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) WithContext(ctx context.Context) *StorageServiceKeyspaceCompactionByKeyspacePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service keyspace compaction by keyspace post params +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service keyspace compaction by keyspace post params +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) WithHTTPClient(client *http.Client) *StorageServiceKeyspaceCompactionByKeyspacePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service keyspace compaction by keyspace post params +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service keyspace compaction by keyspace post params +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) WithCf(cf *string) *StorageServiceKeyspaceCompactionByKeyspacePostParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service keyspace compaction by keyspace post params +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) SetCf(cf *string) { + o.Cf = cf +} + +// WithKeyspace adds the keyspace to the storage service keyspace compaction by keyspace post params +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) WithKeyspace(keyspace string) *StorageServiceKeyspaceCompactionByKeyspacePostParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service keyspace compaction by keyspace post params +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceKeyspaceCompactionByKeyspacePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Cf != nil { + + // query param cf + var qrCf string + if o.Cf != nil { + qrCf = *o.Cf + } + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_compaction_by_keyspace_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_compaction_by_keyspace_post_responses.go new file mode 100644 index 00000000000..d9f3d092ad8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_compaction_by_keyspace_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceKeyspaceCompactionByKeyspacePostReader is a Reader for the StorageServiceKeyspaceCompactionByKeyspacePost structure. +type StorageServiceKeyspaceCompactionByKeyspacePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceKeyspaceCompactionByKeyspacePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceKeyspaceCompactionByKeyspacePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceKeyspaceCompactionByKeyspacePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceKeyspaceCompactionByKeyspacePostOK creates a StorageServiceKeyspaceCompactionByKeyspacePostOK with default headers values +func NewStorageServiceKeyspaceCompactionByKeyspacePostOK() *StorageServiceKeyspaceCompactionByKeyspacePostOK { + return &StorageServiceKeyspaceCompactionByKeyspacePostOK{} +} + +/* +StorageServiceKeyspaceCompactionByKeyspacePostOK handles this case with default header values. + +Success +*/ +type StorageServiceKeyspaceCompactionByKeyspacePostOK struct { +} + +func (o *StorageServiceKeyspaceCompactionByKeyspacePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceKeyspaceCompactionByKeyspacePostDefault creates a StorageServiceKeyspaceCompactionByKeyspacePostDefault with default headers values +func NewStorageServiceKeyspaceCompactionByKeyspacePostDefault(code int) *StorageServiceKeyspaceCompactionByKeyspacePostDefault { + return &StorageServiceKeyspaceCompactionByKeyspacePostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceKeyspaceCompactionByKeyspacePostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceKeyspaceCompactionByKeyspacePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service keyspace compaction by keyspace post default response +func (o *StorageServiceKeyspaceCompactionByKeyspacePostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceKeyspaceCompactionByKeyspacePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceKeyspaceCompactionByKeyspacePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceKeyspaceCompactionByKeyspacePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_flush_by_keyspace_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_flush_by_keyspace_post_parameters.go new file mode 100644 index 00000000000..d29e140298c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_flush_by_keyspace_post_parameters.go @@ -0,0 +1,168 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceKeyspaceFlushByKeyspacePostParams creates a new StorageServiceKeyspaceFlushByKeyspacePostParams object +// with the default values initialized. +func NewStorageServiceKeyspaceFlushByKeyspacePostParams() *StorageServiceKeyspaceFlushByKeyspacePostParams { + var () + return &StorageServiceKeyspaceFlushByKeyspacePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceKeyspaceFlushByKeyspacePostParamsWithTimeout creates a new StorageServiceKeyspaceFlushByKeyspacePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceKeyspaceFlushByKeyspacePostParamsWithTimeout(timeout time.Duration) *StorageServiceKeyspaceFlushByKeyspacePostParams { + var () + return &StorageServiceKeyspaceFlushByKeyspacePostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceKeyspaceFlushByKeyspacePostParamsWithContext creates a new StorageServiceKeyspaceFlushByKeyspacePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceKeyspaceFlushByKeyspacePostParamsWithContext(ctx context.Context) *StorageServiceKeyspaceFlushByKeyspacePostParams { + var () + return &StorageServiceKeyspaceFlushByKeyspacePostParams{ + + Context: ctx, + } +} + +// NewStorageServiceKeyspaceFlushByKeyspacePostParamsWithHTTPClient creates a new StorageServiceKeyspaceFlushByKeyspacePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceKeyspaceFlushByKeyspacePostParamsWithHTTPClient(client *http.Client) *StorageServiceKeyspaceFlushByKeyspacePostParams { + var () + return &StorageServiceKeyspaceFlushByKeyspacePostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceKeyspaceFlushByKeyspacePostParams contains all the parameters to send to the API endpoint +for the storage service keyspace flush by keyspace post operation typically these are written to a http.Request +*/ +type StorageServiceKeyspaceFlushByKeyspacePostParams struct { + + /*Cf + Comma seperated column family names + + */ + Cf *string + /*Keyspace + The keyspace to flush + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service keyspace flush by keyspace post params +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) WithTimeout(timeout time.Duration) *StorageServiceKeyspaceFlushByKeyspacePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service keyspace flush by keyspace post params +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service keyspace flush by keyspace post params +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) WithContext(ctx context.Context) *StorageServiceKeyspaceFlushByKeyspacePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service keyspace flush by keyspace post params +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service keyspace flush by keyspace post params +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) WithHTTPClient(client *http.Client) *StorageServiceKeyspaceFlushByKeyspacePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service keyspace flush by keyspace post params +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service keyspace flush by keyspace post params +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) WithCf(cf *string) *StorageServiceKeyspaceFlushByKeyspacePostParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service keyspace flush by keyspace post params +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) SetCf(cf *string) { + o.Cf = cf +} + +// WithKeyspace adds the keyspace to the storage service keyspace flush by keyspace post params +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) WithKeyspace(keyspace string) *StorageServiceKeyspaceFlushByKeyspacePostParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service keyspace flush by keyspace post params +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceKeyspaceFlushByKeyspacePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Cf != nil { + + // query param cf + var qrCf string + if o.Cf != nil { + qrCf = *o.Cf + } + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_flush_by_keyspace_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_flush_by_keyspace_post_responses.go new file mode 100644 index 00000000000..61843ee1f5c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_flush_by_keyspace_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceKeyspaceFlushByKeyspacePostReader is a Reader for the StorageServiceKeyspaceFlushByKeyspacePost structure. +type StorageServiceKeyspaceFlushByKeyspacePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceKeyspaceFlushByKeyspacePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceKeyspaceFlushByKeyspacePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceKeyspaceFlushByKeyspacePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceKeyspaceFlushByKeyspacePostOK creates a StorageServiceKeyspaceFlushByKeyspacePostOK with default headers values +func NewStorageServiceKeyspaceFlushByKeyspacePostOK() *StorageServiceKeyspaceFlushByKeyspacePostOK { + return &StorageServiceKeyspaceFlushByKeyspacePostOK{} +} + +/* +StorageServiceKeyspaceFlushByKeyspacePostOK handles this case with default header values. + +Success +*/ +type StorageServiceKeyspaceFlushByKeyspacePostOK struct { +} + +func (o *StorageServiceKeyspaceFlushByKeyspacePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceKeyspaceFlushByKeyspacePostDefault creates a StorageServiceKeyspaceFlushByKeyspacePostDefault with default headers values +func NewStorageServiceKeyspaceFlushByKeyspacePostDefault(code int) *StorageServiceKeyspaceFlushByKeyspacePostDefault { + return &StorageServiceKeyspaceFlushByKeyspacePostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceKeyspaceFlushByKeyspacePostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceKeyspaceFlushByKeyspacePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service keyspace flush by keyspace post default response +func (o *StorageServiceKeyspaceFlushByKeyspacePostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceKeyspaceFlushByKeyspacePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceKeyspaceFlushByKeyspacePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceKeyspaceFlushByKeyspacePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_scrub_by_keyspace_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_scrub_by_keyspace_get_parameters.go new file mode 100644 index 00000000000..184094d80d8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_scrub_by_keyspace_get_parameters.go @@ -0,0 +1,233 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceKeyspaceScrubByKeyspaceGetParams creates a new StorageServiceKeyspaceScrubByKeyspaceGetParams object +// with the default values initialized. +func NewStorageServiceKeyspaceScrubByKeyspaceGetParams() *StorageServiceKeyspaceScrubByKeyspaceGetParams { + var () + return &StorageServiceKeyspaceScrubByKeyspaceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceKeyspaceScrubByKeyspaceGetParamsWithTimeout creates a new StorageServiceKeyspaceScrubByKeyspaceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceKeyspaceScrubByKeyspaceGetParamsWithTimeout(timeout time.Duration) *StorageServiceKeyspaceScrubByKeyspaceGetParams { + var () + return &StorageServiceKeyspaceScrubByKeyspaceGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceKeyspaceScrubByKeyspaceGetParamsWithContext creates a new StorageServiceKeyspaceScrubByKeyspaceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceKeyspaceScrubByKeyspaceGetParamsWithContext(ctx context.Context) *StorageServiceKeyspaceScrubByKeyspaceGetParams { + var () + return &StorageServiceKeyspaceScrubByKeyspaceGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceKeyspaceScrubByKeyspaceGetParamsWithHTTPClient creates a new StorageServiceKeyspaceScrubByKeyspaceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceKeyspaceScrubByKeyspaceGetParamsWithHTTPClient(client *http.Client) *StorageServiceKeyspaceScrubByKeyspaceGetParams { + var () + return &StorageServiceKeyspaceScrubByKeyspaceGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceKeyspaceScrubByKeyspaceGetParams contains all the parameters to send to the API endpoint +for the storage service keyspace scrub by keyspace get operation typically these are written to a http.Request +*/ +type StorageServiceKeyspaceScrubByKeyspaceGetParams struct { + + /*Cf + Comma seperated column family names + + */ + Cf *string + /*DisableSnapshot + When set to true, disable snapshot + + */ + DisableSnapshot *bool + /*Keyspace + The keyspace to query about + + */ + Keyspace string + /*SkipCorrupted + When set to true, skip corrupted + + */ + SkipCorrupted *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) WithTimeout(timeout time.Duration) *StorageServiceKeyspaceScrubByKeyspaceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) WithContext(ctx context.Context) *StorageServiceKeyspaceScrubByKeyspaceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) WithHTTPClient(client *http.Client) *StorageServiceKeyspaceScrubByKeyspaceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) WithCf(cf *string) *StorageServiceKeyspaceScrubByKeyspaceGetParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) SetCf(cf *string) { + o.Cf = cf +} + +// WithDisableSnapshot adds the disableSnapshot to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) WithDisableSnapshot(disableSnapshot *bool) *StorageServiceKeyspaceScrubByKeyspaceGetParams { + o.SetDisableSnapshot(disableSnapshot) + return o +} + +// SetDisableSnapshot adds the disableSnapshot to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) SetDisableSnapshot(disableSnapshot *bool) { + o.DisableSnapshot = disableSnapshot +} + +// WithKeyspace adds the keyspace to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) WithKeyspace(keyspace string) *StorageServiceKeyspaceScrubByKeyspaceGetParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WithSkipCorrupted adds the skipCorrupted to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) WithSkipCorrupted(skipCorrupted *bool) *StorageServiceKeyspaceScrubByKeyspaceGetParams { + o.SetSkipCorrupted(skipCorrupted) + return o +} + +// SetSkipCorrupted adds the skipCorrupted to the storage service keyspace scrub by keyspace get params +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) SetSkipCorrupted(skipCorrupted *bool) { + o.SkipCorrupted = skipCorrupted +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceKeyspaceScrubByKeyspaceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Cf != nil { + + // query param cf + var qrCf string + if o.Cf != nil { + qrCf = *o.Cf + } + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + } + + if o.DisableSnapshot != nil { + + // query param disable_snapshot + var qrDisableSnapshot bool + if o.DisableSnapshot != nil { + qrDisableSnapshot = *o.DisableSnapshot + } + qDisableSnapshot := swag.FormatBool(qrDisableSnapshot) + if qDisableSnapshot != "" { + if err := r.SetQueryParam("disable_snapshot", qDisableSnapshot); err != nil { + return err + } + } + + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if o.SkipCorrupted != nil { + + // query param skip_corrupted + var qrSkipCorrupted bool + if o.SkipCorrupted != nil { + qrSkipCorrupted = *o.SkipCorrupted + } + qSkipCorrupted := swag.FormatBool(qrSkipCorrupted) + if qSkipCorrupted != "" { + if err := r.SetQueryParam("skip_corrupted", qSkipCorrupted); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_scrub_by_keyspace_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_scrub_by_keyspace_get_responses.go new file mode 100644 index 00000000000..63f79e5e9ce --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_scrub_by_keyspace_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceKeyspaceScrubByKeyspaceGetReader is a Reader for the StorageServiceKeyspaceScrubByKeyspaceGet structure. +type StorageServiceKeyspaceScrubByKeyspaceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceKeyspaceScrubByKeyspaceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceKeyspaceScrubByKeyspaceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceKeyspaceScrubByKeyspaceGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceKeyspaceScrubByKeyspaceGetOK creates a StorageServiceKeyspaceScrubByKeyspaceGetOK with default headers values +func NewStorageServiceKeyspaceScrubByKeyspaceGetOK() *StorageServiceKeyspaceScrubByKeyspaceGetOK { + return &StorageServiceKeyspaceScrubByKeyspaceGetOK{} +} + +/* +StorageServiceKeyspaceScrubByKeyspaceGetOK handles this case with default header values. + +Success +*/ +type StorageServiceKeyspaceScrubByKeyspaceGetOK struct { + Payload int32 +} + +func (o *StorageServiceKeyspaceScrubByKeyspaceGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceKeyspaceScrubByKeyspaceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceKeyspaceScrubByKeyspaceGetDefault creates a StorageServiceKeyspaceScrubByKeyspaceGetDefault with default headers values +func NewStorageServiceKeyspaceScrubByKeyspaceGetDefault(code int) *StorageServiceKeyspaceScrubByKeyspaceGetDefault { + return &StorageServiceKeyspaceScrubByKeyspaceGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceKeyspaceScrubByKeyspaceGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceKeyspaceScrubByKeyspaceGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service keyspace scrub by keyspace get default response +func (o *StorageServiceKeyspaceScrubByKeyspaceGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceKeyspaceScrubByKeyspaceGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceKeyspaceScrubByKeyspaceGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceKeyspaceScrubByKeyspaceGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_upgrade_sstables_by_keyspace_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_upgrade_sstables_by_keyspace_get_parameters.go new file mode 100644 index 00000000000..e97ae032088 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_upgrade_sstables_by_keyspace_get_parameters.go @@ -0,0 +1,201 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams creates a new StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams object +// with the default values initialized. +func NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams() *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams { + var () + return &StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParamsWithTimeout creates a new StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParamsWithTimeout(timeout time.Duration) *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams { + var () + return &StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParamsWithContext creates a new StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParamsWithContext(ctx context.Context) *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams { + var () + return &StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParamsWithHTTPClient creates a new StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParamsWithHTTPClient(client *http.Client) *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams { + var () + return &StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams contains all the parameters to send to the API endpoint +for the storage service keyspace upgrade sstables by keyspace get operation typically these are written to a http.Request +*/ +type StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams struct { + + /*Cf + Comma seperated column family names + + */ + Cf *string + /*ExcludeCurrentVersion + When set to true exclude current version + + */ + ExcludeCurrentVersion *bool + /*Keyspace + The keyspace + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) WithTimeout(timeout time.Duration) *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) WithContext(ctx context.Context) *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) WithHTTPClient(client *http.Client) *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) WithCf(cf *string) *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) SetCf(cf *string) { + o.Cf = cf +} + +// WithExcludeCurrentVersion adds the excludeCurrentVersion to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) WithExcludeCurrentVersion(excludeCurrentVersion *bool) *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams { + o.SetExcludeCurrentVersion(excludeCurrentVersion) + return o +} + +// SetExcludeCurrentVersion adds the excludeCurrentVersion to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) SetExcludeCurrentVersion(excludeCurrentVersion *bool) { + o.ExcludeCurrentVersion = excludeCurrentVersion +} + +// WithKeyspace adds the keyspace to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) WithKeyspace(keyspace string) *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service keyspace upgrade sstables by keyspace get params +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Cf != nil { + + // query param cf + var qrCf string + if o.Cf != nil { + qrCf = *o.Cf + } + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + } + + if o.ExcludeCurrentVersion != nil { + + // query param exclude_current_version + var qrExcludeCurrentVersion bool + if o.ExcludeCurrentVersion != nil { + qrExcludeCurrentVersion = *o.ExcludeCurrentVersion + } + qExcludeCurrentVersion := swag.FormatBool(qrExcludeCurrentVersion) + if qExcludeCurrentVersion != "" { + if err := r.SetQueryParam("exclude_current_version", qExcludeCurrentVersion); err != nil { + return err + } + } + + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_upgrade_sstables_by_keyspace_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_upgrade_sstables_by_keyspace_get_responses.go new file mode 100644 index 00000000000..4d3a905ec5c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspace_upgrade_sstables_by_keyspace_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetReader is a Reader for the StorageServiceKeyspaceUpgradeSstablesByKeyspaceGet structure. +type StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK creates a StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK with default headers values +func NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK() *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK { + return &StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK{} +} + +/* +StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK handles this case with default header values. + +Success +*/ +type StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK struct { + Payload int32 +} + +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault creates a StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault with default headers values +func NewStorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault(code int) *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault { + return &StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service keyspace upgrade sstables by keyspace get default response +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceKeyspaceUpgradeSstablesByKeyspaceGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspaces_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspaces_get_parameters.go new file mode 100644 index 00000000000..c5b949204b2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspaces_get_parameters.go @@ -0,0 +1,147 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceKeyspacesGetParams creates a new StorageServiceKeyspacesGetParams object +// with the default values initialized. +func NewStorageServiceKeyspacesGetParams() *StorageServiceKeyspacesGetParams { + var () + return &StorageServiceKeyspacesGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceKeyspacesGetParamsWithTimeout creates a new StorageServiceKeyspacesGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceKeyspacesGetParamsWithTimeout(timeout time.Duration) *StorageServiceKeyspacesGetParams { + var () + return &StorageServiceKeyspacesGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceKeyspacesGetParamsWithContext creates a new StorageServiceKeyspacesGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceKeyspacesGetParamsWithContext(ctx context.Context) *StorageServiceKeyspacesGetParams { + var () + return &StorageServiceKeyspacesGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceKeyspacesGetParamsWithHTTPClient creates a new StorageServiceKeyspacesGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceKeyspacesGetParamsWithHTTPClient(client *http.Client) *StorageServiceKeyspacesGetParams { + var () + return &StorageServiceKeyspacesGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceKeyspacesGetParams contains all the parameters to send to the API endpoint +for the storage service keyspaces get operation typically these are written to a http.Request +*/ +type StorageServiceKeyspacesGetParams struct { + + /*Type + Which keyspaces to return + + */ + Type *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service keyspaces get params +func (o *StorageServiceKeyspacesGetParams) WithTimeout(timeout time.Duration) *StorageServiceKeyspacesGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service keyspaces get params +func (o *StorageServiceKeyspacesGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service keyspaces get params +func (o *StorageServiceKeyspacesGetParams) WithContext(ctx context.Context) *StorageServiceKeyspacesGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service keyspaces get params +func (o *StorageServiceKeyspacesGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service keyspaces get params +func (o *StorageServiceKeyspacesGetParams) WithHTTPClient(client *http.Client) *StorageServiceKeyspacesGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service keyspaces get params +func (o *StorageServiceKeyspacesGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithType adds the typeVar to the storage service keyspaces get params +func (o *StorageServiceKeyspacesGetParams) WithType(typeVar *string) *StorageServiceKeyspacesGetParams { + o.SetType(typeVar) + return o +} + +// SetType adds the type to the storage service keyspaces get params +func (o *StorageServiceKeyspacesGetParams) SetType(typeVar *string) { + o.Type = typeVar +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceKeyspacesGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Type != nil { + + // query param type + var qrType string + if o.Type != nil { + qrType = *o.Type + } + qType := qrType + if qType != "" { + if err := r.SetQueryParam("type", qType); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspaces_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspaces_get_responses.go new file mode 100644 index 00000000000..5aae381982f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_keyspaces_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceKeyspacesGetReader is a Reader for the StorageServiceKeyspacesGet structure. +type StorageServiceKeyspacesGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceKeyspacesGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceKeyspacesGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceKeyspacesGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceKeyspacesGetOK creates a StorageServiceKeyspacesGetOK with default headers values +func NewStorageServiceKeyspacesGetOK() *StorageServiceKeyspacesGetOK { + return &StorageServiceKeyspacesGetOK{} +} + +/* +StorageServiceKeyspacesGetOK handles this case with default header values. + +Success +*/ +type StorageServiceKeyspacesGetOK struct { + Payload []string +} + +func (o *StorageServiceKeyspacesGetOK) GetPayload() []string { + return o.Payload +} + +func (o *StorageServiceKeyspacesGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceKeyspacesGetDefault creates a StorageServiceKeyspacesGetDefault with default headers values +func NewStorageServiceKeyspacesGetDefault(code int) *StorageServiceKeyspacesGetDefault { + return &StorageServiceKeyspacesGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceKeyspacesGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceKeyspacesGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service keyspaces get default response +func (o *StorageServiceKeyspacesGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceKeyspacesGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceKeyspacesGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceKeyspacesGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_get_parameters.go new file mode 100644 index 00000000000..5b755932de7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceLoadGetParams creates a new StorageServiceLoadGetParams object +// with the default values initialized. +func NewStorageServiceLoadGetParams() *StorageServiceLoadGetParams { + + return &StorageServiceLoadGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceLoadGetParamsWithTimeout creates a new StorageServiceLoadGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceLoadGetParamsWithTimeout(timeout time.Duration) *StorageServiceLoadGetParams { + + return &StorageServiceLoadGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceLoadGetParamsWithContext creates a new StorageServiceLoadGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceLoadGetParamsWithContext(ctx context.Context) *StorageServiceLoadGetParams { + + return &StorageServiceLoadGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceLoadGetParamsWithHTTPClient creates a new StorageServiceLoadGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceLoadGetParamsWithHTTPClient(client *http.Client) *StorageServiceLoadGetParams { + + return &StorageServiceLoadGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceLoadGetParams contains all the parameters to send to the API endpoint +for the storage service load get operation typically these are written to a http.Request +*/ +type StorageServiceLoadGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service load get params +func (o *StorageServiceLoadGetParams) WithTimeout(timeout time.Duration) *StorageServiceLoadGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service load get params +func (o *StorageServiceLoadGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service load get params +func (o *StorageServiceLoadGetParams) WithContext(ctx context.Context) *StorageServiceLoadGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service load get params +func (o *StorageServiceLoadGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service load get params +func (o *StorageServiceLoadGetParams) WithHTTPClient(client *http.Client) *StorageServiceLoadGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service load get params +func (o *StorageServiceLoadGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceLoadGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_get_responses.go new file mode 100644 index 00000000000..62ed6e3d6aa --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceLoadGetReader is a Reader for the StorageServiceLoadGet structure. +type StorageServiceLoadGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceLoadGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceLoadGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceLoadGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceLoadGetOK creates a StorageServiceLoadGetOK with default headers values +func NewStorageServiceLoadGetOK() *StorageServiceLoadGetOK { + return &StorageServiceLoadGetOK{} +} + +/* +StorageServiceLoadGetOK handles this case with default header values. + +Success +*/ +type StorageServiceLoadGetOK struct { + Payload interface{} +} + +func (o *StorageServiceLoadGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageServiceLoadGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceLoadGetDefault creates a StorageServiceLoadGetDefault with default headers values +func NewStorageServiceLoadGetDefault(code int) *StorageServiceLoadGetDefault { + return &StorageServiceLoadGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceLoadGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceLoadGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service load get default response +func (o *StorageServiceLoadGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceLoadGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceLoadGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceLoadGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_map_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_map_get_parameters.go new file mode 100644 index 00000000000..8dfa59af00d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_map_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceLoadMapGetParams creates a new StorageServiceLoadMapGetParams object +// with the default values initialized. +func NewStorageServiceLoadMapGetParams() *StorageServiceLoadMapGetParams { + + return &StorageServiceLoadMapGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceLoadMapGetParamsWithTimeout creates a new StorageServiceLoadMapGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceLoadMapGetParamsWithTimeout(timeout time.Duration) *StorageServiceLoadMapGetParams { + + return &StorageServiceLoadMapGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceLoadMapGetParamsWithContext creates a new StorageServiceLoadMapGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceLoadMapGetParamsWithContext(ctx context.Context) *StorageServiceLoadMapGetParams { + + return &StorageServiceLoadMapGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceLoadMapGetParamsWithHTTPClient creates a new StorageServiceLoadMapGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceLoadMapGetParamsWithHTTPClient(client *http.Client) *StorageServiceLoadMapGetParams { + + return &StorageServiceLoadMapGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceLoadMapGetParams contains all the parameters to send to the API endpoint +for the storage service load map get operation typically these are written to a http.Request +*/ +type StorageServiceLoadMapGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service load map get params +func (o *StorageServiceLoadMapGetParams) WithTimeout(timeout time.Duration) *StorageServiceLoadMapGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service load map get params +func (o *StorageServiceLoadMapGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service load map get params +func (o *StorageServiceLoadMapGetParams) WithContext(ctx context.Context) *StorageServiceLoadMapGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service load map get params +func (o *StorageServiceLoadMapGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service load map get params +func (o *StorageServiceLoadMapGetParams) WithHTTPClient(client *http.Client) *StorageServiceLoadMapGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service load map get params +func (o *StorageServiceLoadMapGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceLoadMapGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_map_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_map_get_responses.go new file mode 100644 index 00000000000..45ece05225e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_load_map_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceLoadMapGetReader is a Reader for the StorageServiceLoadMapGet structure. +type StorageServiceLoadMapGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceLoadMapGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceLoadMapGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceLoadMapGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceLoadMapGetOK creates a StorageServiceLoadMapGetOK with default headers values +func NewStorageServiceLoadMapGetOK() *StorageServiceLoadMapGetOK { + return &StorageServiceLoadMapGetOK{} +} + +/* +StorageServiceLoadMapGetOK handles this case with default header values. + +Success +*/ +type StorageServiceLoadMapGetOK struct { + Payload []*models.MapStringDouble +} + +func (o *StorageServiceLoadMapGetOK) GetPayload() []*models.MapStringDouble { + return o.Payload +} + +func (o *StorageServiceLoadMapGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceLoadMapGetDefault creates a StorageServiceLoadMapGetDefault with default headers values +func NewStorageServiceLoadMapGetDefault(code int) *StorageServiceLoadMapGetDefault { + return &StorageServiceLoadMapGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceLoadMapGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceLoadMapGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service load map get default response +func (o *StorageServiceLoadMapGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceLoadMapGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceLoadMapGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceLoadMapGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_get_parameters.go new file mode 100644 index 00000000000..f52d345887e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceLoggingLevelGetParams creates a new StorageServiceLoggingLevelGetParams object +// with the default values initialized. +func NewStorageServiceLoggingLevelGetParams() *StorageServiceLoggingLevelGetParams { + + return &StorageServiceLoggingLevelGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceLoggingLevelGetParamsWithTimeout creates a new StorageServiceLoggingLevelGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceLoggingLevelGetParamsWithTimeout(timeout time.Duration) *StorageServiceLoggingLevelGetParams { + + return &StorageServiceLoggingLevelGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceLoggingLevelGetParamsWithContext creates a new StorageServiceLoggingLevelGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceLoggingLevelGetParamsWithContext(ctx context.Context) *StorageServiceLoggingLevelGetParams { + + return &StorageServiceLoggingLevelGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceLoggingLevelGetParamsWithHTTPClient creates a new StorageServiceLoggingLevelGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceLoggingLevelGetParamsWithHTTPClient(client *http.Client) *StorageServiceLoggingLevelGetParams { + + return &StorageServiceLoggingLevelGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceLoggingLevelGetParams contains all the parameters to send to the API endpoint +for the storage service logging level get operation typically these are written to a http.Request +*/ +type StorageServiceLoggingLevelGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service logging level get params +func (o *StorageServiceLoggingLevelGetParams) WithTimeout(timeout time.Duration) *StorageServiceLoggingLevelGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service logging level get params +func (o *StorageServiceLoggingLevelGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service logging level get params +func (o *StorageServiceLoggingLevelGetParams) WithContext(ctx context.Context) *StorageServiceLoggingLevelGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service logging level get params +func (o *StorageServiceLoggingLevelGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service logging level get params +func (o *StorageServiceLoggingLevelGetParams) WithHTTPClient(client *http.Client) *StorageServiceLoggingLevelGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service logging level get params +func (o *StorageServiceLoggingLevelGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceLoggingLevelGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_get_responses.go new file mode 100644 index 00000000000..cde3b9924a7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceLoggingLevelGetReader is a Reader for the StorageServiceLoggingLevelGet structure. +type StorageServiceLoggingLevelGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceLoggingLevelGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceLoggingLevelGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceLoggingLevelGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceLoggingLevelGetOK creates a StorageServiceLoggingLevelGetOK with default headers values +func NewStorageServiceLoggingLevelGetOK() *StorageServiceLoggingLevelGetOK { + return &StorageServiceLoggingLevelGetOK{} +} + +/* +StorageServiceLoggingLevelGetOK handles this case with default header values. + +Success +*/ +type StorageServiceLoggingLevelGetOK struct { + Payload []*models.Mapper +} + +func (o *StorageServiceLoggingLevelGetOK) GetPayload() []*models.Mapper { + return o.Payload +} + +func (o *StorageServiceLoggingLevelGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceLoggingLevelGetDefault creates a StorageServiceLoggingLevelGetDefault with default headers values +func NewStorageServiceLoggingLevelGetDefault(code int) *StorageServiceLoggingLevelGetDefault { + return &StorageServiceLoggingLevelGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceLoggingLevelGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceLoggingLevelGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service logging level get default response +func (o *StorageServiceLoggingLevelGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceLoggingLevelGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceLoggingLevelGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceLoggingLevelGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_post_parameters.go new file mode 100644 index 00000000000..baa69463cc2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_post_parameters.go @@ -0,0 +1,165 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceLoggingLevelPostParams creates a new StorageServiceLoggingLevelPostParams object +// with the default values initialized. +func NewStorageServiceLoggingLevelPostParams() *StorageServiceLoggingLevelPostParams { + var () + return &StorageServiceLoggingLevelPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceLoggingLevelPostParamsWithTimeout creates a new StorageServiceLoggingLevelPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceLoggingLevelPostParamsWithTimeout(timeout time.Duration) *StorageServiceLoggingLevelPostParams { + var () + return &StorageServiceLoggingLevelPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceLoggingLevelPostParamsWithContext creates a new StorageServiceLoggingLevelPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceLoggingLevelPostParamsWithContext(ctx context.Context) *StorageServiceLoggingLevelPostParams { + var () + return &StorageServiceLoggingLevelPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceLoggingLevelPostParamsWithHTTPClient creates a new StorageServiceLoggingLevelPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceLoggingLevelPostParamsWithHTTPClient(client *http.Client) *StorageServiceLoggingLevelPostParams { + var () + return &StorageServiceLoggingLevelPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceLoggingLevelPostParams contains all the parameters to send to the API endpoint +for the storage service logging level post operation typically these are written to a http.Request +*/ +type StorageServiceLoggingLevelPostParams struct { + + /*ClassQualifier + The logger's classQualifer + + */ + ClassQualifier string + /*Level + The log level + + */ + Level string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service logging level post params +func (o *StorageServiceLoggingLevelPostParams) WithTimeout(timeout time.Duration) *StorageServiceLoggingLevelPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service logging level post params +func (o *StorageServiceLoggingLevelPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service logging level post params +func (o *StorageServiceLoggingLevelPostParams) WithContext(ctx context.Context) *StorageServiceLoggingLevelPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service logging level post params +func (o *StorageServiceLoggingLevelPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service logging level post params +func (o *StorageServiceLoggingLevelPostParams) WithHTTPClient(client *http.Client) *StorageServiceLoggingLevelPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service logging level post params +func (o *StorageServiceLoggingLevelPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithClassQualifier adds the classQualifier to the storage service logging level post params +func (o *StorageServiceLoggingLevelPostParams) WithClassQualifier(classQualifier string) *StorageServiceLoggingLevelPostParams { + o.SetClassQualifier(classQualifier) + return o +} + +// SetClassQualifier adds the classQualifier to the storage service logging level post params +func (o *StorageServiceLoggingLevelPostParams) SetClassQualifier(classQualifier string) { + o.ClassQualifier = classQualifier +} + +// WithLevel adds the level to the storage service logging level post params +func (o *StorageServiceLoggingLevelPostParams) WithLevel(level string) *StorageServiceLoggingLevelPostParams { + o.SetLevel(level) + return o +} + +// SetLevel adds the level to the storage service logging level post params +func (o *StorageServiceLoggingLevelPostParams) SetLevel(level string) { + o.Level = level +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceLoggingLevelPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param class_qualifier + qrClassQualifier := o.ClassQualifier + qClassQualifier := qrClassQualifier + if qClassQualifier != "" { + if err := r.SetQueryParam("class_qualifier", qClassQualifier); err != nil { + return err + } + } + + // query param level + qrLevel := o.Level + qLevel := qrLevel + if qLevel != "" { + if err := r.SetQueryParam("level", qLevel); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_post_responses.go new file mode 100644 index 00000000000..f9dae77e43a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_logging_level_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceLoggingLevelPostReader is a Reader for the StorageServiceLoggingLevelPost structure. +type StorageServiceLoggingLevelPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceLoggingLevelPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceLoggingLevelPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceLoggingLevelPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceLoggingLevelPostOK creates a StorageServiceLoggingLevelPostOK with default headers values +func NewStorageServiceLoggingLevelPostOK() *StorageServiceLoggingLevelPostOK { + return &StorageServiceLoggingLevelPostOK{} +} + +/* +StorageServiceLoggingLevelPostOK handles this case with default header values. + +Success +*/ +type StorageServiceLoggingLevelPostOK struct { +} + +func (o *StorageServiceLoggingLevelPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceLoggingLevelPostDefault creates a StorageServiceLoggingLevelPostDefault with default headers values +func NewStorageServiceLoggingLevelPostDefault(code int) *StorageServiceLoggingLevelPostDefault { + return &StorageServiceLoggingLevelPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceLoggingLevelPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceLoggingLevelPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service logging level post default response +func (o *StorageServiceLoggingLevelPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceLoggingLevelPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceLoggingLevelPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceLoggingLevelPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_exceptions_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_exceptions_get_parameters.go new file mode 100644 index 00000000000..95d6b8f7605 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_exceptions_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceMetricsExceptionsGetParams creates a new StorageServiceMetricsExceptionsGetParams object +// with the default values initialized. +func NewStorageServiceMetricsExceptionsGetParams() *StorageServiceMetricsExceptionsGetParams { + + return &StorageServiceMetricsExceptionsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceMetricsExceptionsGetParamsWithTimeout creates a new StorageServiceMetricsExceptionsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceMetricsExceptionsGetParamsWithTimeout(timeout time.Duration) *StorageServiceMetricsExceptionsGetParams { + + return &StorageServiceMetricsExceptionsGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceMetricsExceptionsGetParamsWithContext creates a new StorageServiceMetricsExceptionsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceMetricsExceptionsGetParamsWithContext(ctx context.Context) *StorageServiceMetricsExceptionsGetParams { + + return &StorageServiceMetricsExceptionsGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceMetricsExceptionsGetParamsWithHTTPClient creates a new StorageServiceMetricsExceptionsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceMetricsExceptionsGetParamsWithHTTPClient(client *http.Client) *StorageServiceMetricsExceptionsGetParams { + + return &StorageServiceMetricsExceptionsGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceMetricsExceptionsGetParams contains all the parameters to send to the API endpoint +for the storage service metrics exceptions get operation typically these are written to a http.Request +*/ +type StorageServiceMetricsExceptionsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service metrics exceptions get params +func (o *StorageServiceMetricsExceptionsGetParams) WithTimeout(timeout time.Duration) *StorageServiceMetricsExceptionsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service metrics exceptions get params +func (o *StorageServiceMetricsExceptionsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service metrics exceptions get params +func (o *StorageServiceMetricsExceptionsGetParams) WithContext(ctx context.Context) *StorageServiceMetricsExceptionsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service metrics exceptions get params +func (o *StorageServiceMetricsExceptionsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service metrics exceptions get params +func (o *StorageServiceMetricsExceptionsGetParams) WithHTTPClient(client *http.Client) *StorageServiceMetricsExceptionsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service metrics exceptions get params +func (o *StorageServiceMetricsExceptionsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceMetricsExceptionsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_exceptions_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_exceptions_get_responses.go new file mode 100644 index 00000000000..a9dc2a997d9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_exceptions_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceMetricsExceptionsGetReader is a Reader for the StorageServiceMetricsExceptionsGet structure. +type StorageServiceMetricsExceptionsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceMetricsExceptionsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceMetricsExceptionsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceMetricsExceptionsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceMetricsExceptionsGetOK creates a StorageServiceMetricsExceptionsGetOK with default headers values +func NewStorageServiceMetricsExceptionsGetOK() *StorageServiceMetricsExceptionsGetOK { + return &StorageServiceMetricsExceptionsGetOK{} +} + +/* +StorageServiceMetricsExceptionsGetOK handles this case with default header values. + +Success +*/ +type StorageServiceMetricsExceptionsGetOK struct { + Payload int32 +} + +func (o *StorageServiceMetricsExceptionsGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceMetricsExceptionsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceMetricsExceptionsGetDefault creates a StorageServiceMetricsExceptionsGetDefault with default headers values +func NewStorageServiceMetricsExceptionsGetDefault(code int) *StorageServiceMetricsExceptionsGetDefault { + return &StorageServiceMetricsExceptionsGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceMetricsExceptionsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceMetricsExceptionsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service metrics exceptions get default response +func (o *StorageServiceMetricsExceptionsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceMetricsExceptionsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceMetricsExceptionsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceMetricsExceptionsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_hints_in_progress_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_hints_in_progress_get_parameters.go new file mode 100644 index 00000000000..8828fde126a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_hints_in_progress_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceMetricsHintsInProgressGetParams creates a new StorageServiceMetricsHintsInProgressGetParams object +// with the default values initialized. +func NewStorageServiceMetricsHintsInProgressGetParams() *StorageServiceMetricsHintsInProgressGetParams { + + return &StorageServiceMetricsHintsInProgressGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceMetricsHintsInProgressGetParamsWithTimeout creates a new StorageServiceMetricsHintsInProgressGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceMetricsHintsInProgressGetParamsWithTimeout(timeout time.Duration) *StorageServiceMetricsHintsInProgressGetParams { + + return &StorageServiceMetricsHintsInProgressGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceMetricsHintsInProgressGetParamsWithContext creates a new StorageServiceMetricsHintsInProgressGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceMetricsHintsInProgressGetParamsWithContext(ctx context.Context) *StorageServiceMetricsHintsInProgressGetParams { + + return &StorageServiceMetricsHintsInProgressGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceMetricsHintsInProgressGetParamsWithHTTPClient creates a new StorageServiceMetricsHintsInProgressGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceMetricsHintsInProgressGetParamsWithHTTPClient(client *http.Client) *StorageServiceMetricsHintsInProgressGetParams { + + return &StorageServiceMetricsHintsInProgressGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceMetricsHintsInProgressGetParams contains all the parameters to send to the API endpoint +for the storage service metrics hints in progress get operation typically these are written to a http.Request +*/ +type StorageServiceMetricsHintsInProgressGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service metrics hints in progress get params +func (o *StorageServiceMetricsHintsInProgressGetParams) WithTimeout(timeout time.Duration) *StorageServiceMetricsHintsInProgressGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service metrics hints in progress get params +func (o *StorageServiceMetricsHintsInProgressGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service metrics hints in progress get params +func (o *StorageServiceMetricsHintsInProgressGetParams) WithContext(ctx context.Context) *StorageServiceMetricsHintsInProgressGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service metrics hints in progress get params +func (o *StorageServiceMetricsHintsInProgressGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service metrics hints in progress get params +func (o *StorageServiceMetricsHintsInProgressGetParams) WithHTTPClient(client *http.Client) *StorageServiceMetricsHintsInProgressGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service metrics hints in progress get params +func (o *StorageServiceMetricsHintsInProgressGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceMetricsHintsInProgressGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_hints_in_progress_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_hints_in_progress_get_responses.go new file mode 100644 index 00000000000..33d8e9fa52c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_hints_in_progress_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceMetricsHintsInProgressGetReader is a Reader for the StorageServiceMetricsHintsInProgressGet structure. +type StorageServiceMetricsHintsInProgressGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceMetricsHintsInProgressGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceMetricsHintsInProgressGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceMetricsHintsInProgressGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceMetricsHintsInProgressGetOK creates a StorageServiceMetricsHintsInProgressGetOK with default headers values +func NewStorageServiceMetricsHintsInProgressGetOK() *StorageServiceMetricsHintsInProgressGetOK { + return &StorageServiceMetricsHintsInProgressGetOK{} +} + +/* +StorageServiceMetricsHintsInProgressGetOK handles this case with default header values. + +Success +*/ +type StorageServiceMetricsHintsInProgressGetOK struct { + Payload int32 +} + +func (o *StorageServiceMetricsHintsInProgressGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceMetricsHintsInProgressGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceMetricsHintsInProgressGetDefault creates a StorageServiceMetricsHintsInProgressGetDefault with default headers values +func NewStorageServiceMetricsHintsInProgressGetDefault(code int) *StorageServiceMetricsHintsInProgressGetDefault { + return &StorageServiceMetricsHintsInProgressGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceMetricsHintsInProgressGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceMetricsHintsInProgressGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service metrics hints in progress get default response +func (o *StorageServiceMetricsHintsInProgressGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceMetricsHintsInProgressGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceMetricsHintsInProgressGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceMetricsHintsInProgressGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_load_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_load_get_parameters.go new file mode 100644 index 00000000000..2632ea1b792 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_load_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceMetricsLoadGetParams creates a new StorageServiceMetricsLoadGetParams object +// with the default values initialized. +func NewStorageServiceMetricsLoadGetParams() *StorageServiceMetricsLoadGetParams { + + return &StorageServiceMetricsLoadGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceMetricsLoadGetParamsWithTimeout creates a new StorageServiceMetricsLoadGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceMetricsLoadGetParamsWithTimeout(timeout time.Duration) *StorageServiceMetricsLoadGetParams { + + return &StorageServiceMetricsLoadGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceMetricsLoadGetParamsWithContext creates a new StorageServiceMetricsLoadGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceMetricsLoadGetParamsWithContext(ctx context.Context) *StorageServiceMetricsLoadGetParams { + + return &StorageServiceMetricsLoadGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceMetricsLoadGetParamsWithHTTPClient creates a new StorageServiceMetricsLoadGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceMetricsLoadGetParamsWithHTTPClient(client *http.Client) *StorageServiceMetricsLoadGetParams { + + return &StorageServiceMetricsLoadGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceMetricsLoadGetParams contains all the parameters to send to the API endpoint +for the storage service metrics load get operation typically these are written to a http.Request +*/ +type StorageServiceMetricsLoadGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service metrics load get params +func (o *StorageServiceMetricsLoadGetParams) WithTimeout(timeout time.Duration) *StorageServiceMetricsLoadGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service metrics load get params +func (o *StorageServiceMetricsLoadGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service metrics load get params +func (o *StorageServiceMetricsLoadGetParams) WithContext(ctx context.Context) *StorageServiceMetricsLoadGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service metrics load get params +func (o *StorageServiceMetricsLoadGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service metrics load get params +func (o *StorageServiceMetricsLoadGetParams) WithHTTPClient(client *http.Client) *StorageServiceMetricsLoadGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service metrics load get params +func (o *StorageServiceMetricsLoadGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceMetricsLoadGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_load_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_load_get_responses.go new file mode 100644 index 00000000000..e563f1288f3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_load_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceMetricsLoadGetReader is a Reader for the StorageServiceMetricsLoadGet structure. +type StorageServiceMetricsLoadGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceMetricsLoadGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceMetricsLoadGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceMetricsLoadGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceMetricsLoadGetOK creates a StorageServiceMetricsLoadGetOK with default headers values +func NewStorageServiceMetricsLoadGetOK() *StorageServiceMetricsLoadGetOK { + return &StorageServiceMetricsLoadGetOK{} +} + +/* +StorageServiceMetricsLoadGetOK handles this case with default header values. + +Success +*/ +type StorageServiceMetricsLoadGetOK struct { + Payload int32 +} + +func (o *StorageServiceMetricsLoadGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceMetricsLoadGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceMetricsLoadGetDefault creates a StorageServiceMetricsLoadGetDefault with default headers values +func NewStorageServiceMetricsLoadGetDefault(code int) *StorageServiceMetricsLoadGetDefault { + return &StorageServiceMetricsLoadGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceMetricsLoadGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceMetricsLoadGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service metrics load get default response +func (o *StorageServiceMetricsLoadGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceMetricsLoadGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceMetricsLoadGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceMetricsLoadGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_total_hints_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_total_hints_get_parameters.go new file mode 100644 index 00000000000..3e26ad677cc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_total_hints_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceMetricsTotalHintsGetParams creates a new StorageServiceMetricsTotalHintsGetParams object +// with the default values initialized. +func NewStorageServiceMetricsTotalHintsGetParams() *StorageServiceMetricsTotalHintsGetParams { + + return &StorageServiceMetricsTotalHintsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceMetricsTotalHintsGetParamsWithTimeout creates a new StorageServiceMetricsTotalHintsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceMetricsTotalHintsGetParamsWithTimeout(timeout time.Duration) *StorageServiceMetricsTotalHintsGetParams { + + return &StorageServiceMetricsTotalHintsGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceMetricsTotalHintsGetParamsWithContext creates a new StorageServiceMetricsTotalHintsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceMetricsTotalHintsGetParamsWithContext(ctx context.Context) *StorageServiceMetricsTotalHintsGetParams { + + return &StorageServiceMetricsTotalHintsGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceMetricsTotalHintsGetParamsWithHTTPClient creates a new StorageServiceMetricsTotalHintsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceMetricsTotalHintsGetParamsWithHTTPClient(client *http.Client) *StorageServiceMetricsTotalHintsGetParams { + + return &StorageServiceMetricsTotalHintsGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceMetricsTotalHintsGetParams contains all the parameters to send to the API endpoint +for the storage service metrics total hints get operation typically these are written to a http.Request +*/ +type StorageServiceMetricsTotalHintsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service metrics total hints get params +func (o *StorageServiceMetricsTotalHintsGetParams) WithTimeout(timeout time.Duration) *StorageServiceMetricsTotalHintsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service metrics total hints get params +func (o *StorageServiceMetricsTotalHintsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service metrics total hints get params +func (o *StorageServiceMetricsTotalHintsGetParams) WithContext(ctx context.Context) *StorageServiceMetricsTotalHintsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service metrics total hints get params +func (o *StorageServiceMetricsTotalHintsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service metrics total hints get params +func (o *StorageServiceMetricsTotalHintsGetParams) WithHTTPClient(client *http.Client) *StorageServiceMetricsTotalHintsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service metrics total hints get params +func (o *StorageServiceMetricsTotalHintsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceMetricsTotalHintsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_total_hints_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_total_hints_get_responses.go new file mode 100644 index 00000000000..22baca2d1ab --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_metrics_total_hints_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceMetricsTotalHintsGetReader is a Reader for the StorageServiceMetricsTotalHintsGet structure. +type StorageServiceMetricsTotalHintsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceMetricsTotalHintsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceMetricsTotalHintsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceMetricsTotalHintsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceMetricsTotalHintsGetOK creates a StorageServiceMetricsTotalHintsGetOK with default headers values +func NewStorageServiceMetricsTotalHintsGetOK() *StorageServiceMetricsTotalHintsGetOK { + return &StorageServiceMetricsTotalHintsGetOK{} +} + +/* +StorageServiceMetricsTotalHintsGetOK handles this case with default header values. + +Success +*/ +type StorageServiceMetricsTotalHintsGetOK struct { + Payload int32 +} + +func (o *StorageServiceMetricsTotalHintsGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceMetricsTotalHintsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceMetricsTotalHintsGetDefault creates a StorageServiceMetricsTotalHintsGetDefault with default headers values +func NewStorageServiceMetricsTotalHintsGetDefault(code int) *StorageServiceMetricsTotalHintsGetDefault { + return &StorageServiceMetricsTotalHintsGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceMetricsTotalHintsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceMetricsTotalHintsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service metrics total hints get default response +func (o *StorageServiceMetricsTotalHintsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceMetricsTotalHintsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceMetricsTotalHintsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceMetricsTotalHintsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_move_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_move_post_parameters.go new file mode 100644 index 00000000000..72590b8a182 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_move_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceMovePostParams creates a new StorageServiceMovePostParams object +// with the default values initialized. +func NewStorageServiceMovePostParams() *StorageServiceMovePostParams { + var () + return &StorageServiceMovePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceMovePostParamsWithTimeout creates a new StorageServiceMovePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceMovePostParamsWithTimeout(timeout time.Duration) *StorageServiceMovePostParams { + var () + return &StorageServiceMovePostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceMovePostParamsWithContext creates a new StorageServiceMovePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceMovePostParamsWithContext(ctx context.Context) *StorageServiceMovePostParams { + var () + return &StorageServiceMovePostParams{ + + Context: ctx, + } +} + +// NewStorageServiceMovePostParamsWithHTTPClient creates a new StorageServiceMovePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceMovePostParamsWithHTTPClient(client *http.Client) *StorageServiceMovePostParams { + var () + return &StorageServiceMovePostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceMovePostParams contains all the parameters to send to the API endpoint +for the storage service move post operation typically these are written to a http.Request +*/ +type StorageServiceMovePostParams struct { + + /*NewToken + token to move this node to + + */ + NewToken string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service move post params +func (o *StorageServiceMovePostParams) WithTimeout(timeout time.Duration) *StorageServiceMovePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service move post params +func (o *StorageServiceMovePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service move post params +func (o *StorageServiceMovePostParams) WithContext(ctx context.Context) *StorageServiceMovePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service move post params +func (o *StorageServiceMovePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service move post params +func (o *StorageServiceMovePostParams) WithHTTPClient(client *http.Client) *StorageServiceMovePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service move post params +func (o *StorageServiceMovePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithNewToken adds the newToken to the storage service move post params +func (o *StorageServiceMovePostParams) WithNewToken(newToken string) *StorageServiceMovePostParams { + o.SetNewToken(newToken) + return o +} + +// SetNewToken adds the newToken to the storage service move post params +func (o *StorageServiceMovePostParams) SetNewToken(newToken string) { + o.NewToken = newToken +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceMovePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param new_token + qrNewToken := o.NewToken + qNewToken := qrNewToken + if qNewToken != "" { + if err := r.SetQueryParam("new_token", qNewToken); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_move_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_move_post_responses.go new file mode 100644 index 00000000000..811c4ab9430 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_move_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceMovePostReader is a Reader for the StorageServiceMovePost structure. +type StorageServiceMovePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceMovePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceMovePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceMovePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceMovePostOK creates a StorageServiceMovePostOK with default headers values +func NewStorageServiceMovePostOK() *StorageServiceMovePostOK { + return &StorageServiceMovePostOK{} +} + +/* +StorageServiceMovePostOK handles this case with default header values. + +Success +*/ +type StorageServiceMovePostOK struct { +} + +func (o *StorageServiceMovePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceMovePostDefault creates a StorageServiceMovePostDefault with default headers values +func NewStorageServiceMovePostDefault(code int) *StorageServiceMovePostDefault { + return &StorageServiceMovePostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceMovePostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceMovePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service move post default response +func (o *StorageServiceMovePostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceMovePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceMovePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceMovePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_delete_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_delete_parameters.go new file mode 100644 index 00000000000..6c8d0b8871e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_delete_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceNativeTransportDeleteParams creates a new StorageServiceNativeTransportDeleteParams object +// with the default values initialized. +func NewStorageServiceNativeTransportDeleteParams() *StorageServiceNativeTransportDeleteParams { + + return &StorageServiceNativeTransportDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceNativeTransportDeleteParamsWithTimeout creates a new StorageServiceNativeTransportDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceNativeTransportDeleteParamsWithTimeout(timeout time.Duration) *StorageServiceNativeTransportDeleteParams { + + return &StorageServiceNativeTransportDeleteParams{ + + timeout: timeout, + } +} + +// NewStorageServiceNativeTransportDeleteParamsWithContext creates a new StorageServiceNativeTransportDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceNativeTransportDeleteParamsWithContext(ctx context.Context) *StorageServiceNativeTransportDeleteParams { + + return &StorageServiceNativeTransportDeleteParams{ + + Context: ctx, + } +} + +// NewStorageServiceNativeTransportDeleteParamsWithHTTPClient creates a new StorageServiceNativeTransportDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceNativeTransportDeleteParamsWithHTTPClient(client *http.Client) *StorageServiceNativeTransportDeleteParams { + + return &StorageServiceNativeTransportDeleteParams{ + HTTPClient: client, + } +} + +/* +StorageServiceNativeTransportDeleteParams contains all the parameters to send to the API endpoint +for the storage service native transport delete operation typically these are written to a http.Request +*/ +type StorageServiceNativeTransportDeleteParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service native transport delete params +func (o *StorageServiceNativeTransportDeleteParams) WithTimeout(timeout time.Duration) *StorageServiceNativeTransportDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service native transport delete params +func (o *StorageServiceNativeTransportDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service native transport delete params +func (o *StorageServiceNativeTransportDeleteParams) WithContext(ctx context.Context) *StorageServiceNativeTransportDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service native transport delete params +func (o *StorageServiceNativeTransportDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service native transport delete params +func (o *StorageServiceNativeTransportDeleteParams) WithHTTPClient(client *http.Client) *StorageServiceNativeTransportDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service native transport delete params +func (o *StorageServiceNativeTransportDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceNativeTransportDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_delete_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_delete_responses.go new file mode 100644 index 00000000000..8f5fbcc88dd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_delete_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceNativeTransportDeleteReader is a Reader for the StorageServiceNativeTransportDelete structure. +type StorageServiceNativeTransportDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceNativeTransportDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceNativeTransportDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceNativeTransportDeleteDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceNativeTransportDeleteOK creates a StorageServiceNativeTransportDeleteOK with default headers values +func NewStorageServiceNativeTransportDeleteOK() *StorageServiceNativeTransportDeleteOK { + return &StorageServiceNativeTransportDeleteOK{} +} + +/* +StorageServiceNativeTransportDeleteOK handles this case with default header values. + +Success +*/ +type StorageServiceNativeTransportDeleteOK struct { +} + +func (o *StorageServiceNativeTransportDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceNativeTransportDeleteDefault creates a StorageServiceNativeTransportDeleteDefault with default headers values +func NewStorageServiceNativeTransportDeleteDefault(code int) *StorageServiceNativeTransportDeleteDefault { + return &StorageServiceNativeTransportDeleteDefault{ + _statusCode: code, + } +} + +/* +StorageServiceNativeTransportDeleteDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceNativeTransportDeleteDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service native transport delete default response +func (o *StorageServiceNativeTransportDeleteDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceNativeTransportDeleteDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceNativeTransportDeleteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceNativeTransportDeleteDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_get_parameters.go new file mode 100644 index 00000000000..ad2914b9bfd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceNativeTransportGetParams creates a new StorageServiceNativeTransportGetParams object +// with the default values initialized. +func NewStorageServiceNativeTransportGetParams() *StorageServiceNativeTransportGetParams { + + return &StorageServiceNativeTransportGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceNativeTransportGetParamsWithTimeout creates a new StorageServiceNativeTransportGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceNativeTransportGetParamsWithTimeout(timeout time.Duration) *StorageServiceNativeTransportGetParams { + + return &StorageServiceNativeTransportGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceNativeTransportGetParamsWithContext creates a new StorageServiceNativeTransportGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceNativeTransportGetParamsWithContext(ctx context.Context) *StorageServiceNativeTransportGetParams { + + return &StorageServiceNativeTransportGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceNativeTransportGetParamsWithHTTPClient creates a new StorageServiceNativeTransportGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceNativeTransportGetParamsWithHTTPClient(client *http.Client) *StorageServiceNativeTransportGetParams { + + return &StorageServiceNativeTransportGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceNativeTransportGetParams contains all the parameters to send to the API endpoint +for the storage service native transport get operation typically these are written to a http.Request +*/ +type StorageServiceNativeTransportGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service native transport get params +func (o *StorageServiceNativeTransportGetParams) WithTimeout(timeout time.Duration) *StorageServiceNativeTransportGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service native transport get params +func (o *StorageServiceNativeTransportGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service native transport get params +func (o *StorageServiceNativeTransportGetParams) WithContext(ctx context.Context) *StorageServiceNativeTransportGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service native transport get params +func (o *StorageServiceNativeTransportGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service native transport get params +func (o *StorageServiceNativeTransportGetParams) WithHTTPClient(client *http.Client) *StorageServiceNativeTransportGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service native transport get params +func (o *StorageServiceNativeTransportGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceNativeTransportGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_get_responses.go new file mode 100644 index 00000000000..964fc4a06bb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceNativeTransportGetReader is a Reader for the StorageServiceNativeTransportGet structure. +type StorageServiceNativeTransportGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceNativeTransportGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceNativeTransportGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceNativeTransportGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceNativeTransportGetOK creates a StorageServiceNativeTransportGetOK with default headers values +func NewStorageServiceNativeTransportGetOK() *StorageServiceNativeTransportGetOK { + return &StorageServiceNativeTransportGetOK{} +} + +/* +StorageServiceNativeTransportGetOK handles this case with default header values. + +Success +*/ +type StorageServiceNativeTransportGetOK struct { + Payload bool +} + +func (o *StorageServiceNativeTransportGetOK) GetPayload() bool { + return o.Payload +} + +func (o *StorageServiceNativeTransportGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceNativeTransportGetDefault creates a StorageServiceNativeTransportGetDefault with default headers values +func NewStorageServiceNativeTransportGetDefault(code int) *StorageServiceNativeTransportGetDefault { + return &StorageServiceNativeTransportGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceNativeTransportGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceNativeTransportGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service native transport get default response +func (o *StorageServiceNativeTransportGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceNativeTransportGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceNativeTransportGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceNativeTransportGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_post_parameters.go new file mode 100644 index 00000000000..270786e2720 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceNativeTransportPostParams creates a new StorageServiceNativeTransportPostParams object +// with the default values initialized. +func NewStorageServiceNativeTransportPostParams() *StorageServiceNativeTransportPostParams { + + return &StorageServiceNativeTransportPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceNativeTransportPostParamsWithTimeout creates a new StorageServiceNativeTransportPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceNativeTransportPostParamsWithTimeout(timeout time.Duration) *StorageServiceNativeTransportPostParams { + + return &StorageServiceNativeTransportPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceNativeTransportPostParamsWithContext creates a new StorageServiceNativeTransportPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceNativeTransportPostParamsWithContext(ctx context.Context) *StorageServiceNativeTransportPostParams { + + return &StorageServiceNativeTransportPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceNativeTransportPostParamsWithHTTPClient creates a new StorageServiceNativeTransportPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceNativeTransportPostParamsWithHTTPClient(client *http.Client) *StorageServiceNativeTransportPostParams { + + return &StorageServiceNativeTransportPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceNativeTransportPostParams contains all the parameters to send to the API endpoint +for the storage service native transport post operation typically these are written to a http.Request +*/ +type StorageServiceNativeTransportPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service native transport post params +func (o *StorageServiceNativeTransportPostParams) WithTimeout(timeout time.Duration) *StorageServiceNativeTransportPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service native transport post params +func (o *StorageServiceNativeTransportPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service native transport post params +func (o *StorageServiceNativeTransportPostParams) WithContext(ctx context.Context) *StorageServiceNativeTransportPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service native transport post params +func (o *StorageServiceNativeTransportPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service native transport post params +func (o *StorageServiceNativeTransportPostParams) WithHTTPClient(client *http.Client) *StorageServiceNativeTransportPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service native transport post params +func (o *StorageServiceNativeTransportPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceNativeTransportPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_post_responses.go new file mode 100644 index 00000000000..96c994a1303 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_native_transport_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceNativeTransportPostReader is a Reader for the StorageServiceNativeTransportPost structure. +type StorageServiceNativeTransportPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceNativeTransportPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceNativeTransportPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceNativeTransportPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceNativeTransportPostOK creates a StorageServiceNativeTransportPostOK with default headers values +func NewStorageServiceNativeTransportPostOK() *StorageServiceNativeTransportPostOK { + return &StorageServiceNativeTransportPostOK{} +} + +/* +StorageServiceNativeTransportPostOK handles this case with default header values. + +Success +*/ +type StorageServiceNativeTransportPostOK struct { +} + +func (o *StorageServiceNativeTransportPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceNativeTransportPostDefault creates a StorageServiceNativeTransportPostDefault with default headers values +func NewStorageServiceNativeTransportPostDefault(code int) *StorageServiceNativeTransportPostDefault { + return &StorageServiceNativeTransportPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceNativeTransportPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceNativeTransportPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service native transport post default response +func (o *StorageServiceNativeTransportPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceNativeTransportPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceNativeTransportPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceNativeTransportPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_natural_endpoints_by_keyspace_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_natural_endpoints_by_keyspace_get_parameters.go new file mode 100644 index 00000000000..c6827063fc6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_natural_endpoints_by_keyspace_get_parameters.go @@ -0,0 +1,186 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceNaturalEndpointsByKeyspaceGetParams creates a new StorageServiceNaturalEndpointsByKeyspaceGetParams object +// with the default values initialized. +func NewStorageServiceNaturalEndpointsByKeyspaceGetParams() *StorageServiceNaturalEndpointsByKeyspaceGetParams { + var () + return &StorageServiceNaturalEndpointsByKeyspaceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceNaturalEndpointsByKeyspaceGetParamsWithTimeout creates a new StorageServiceNaturalEndpointsByKeyspaceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceNaturalEndpointsByKeyspaceGetParamsWithTimeout(timeout time.Duration) *StorageServiceNaturalEndpointsByKeyspaceGetParams { + var () + return &StorageServiceNaturalEndpointsByKeyspaceGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceNaturalEndpointsByKeyspaceGetParamsWithContext creates a new StorageServiceNaturalEndpointsByKeyspaceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceNaturalEndpointsByKeyspaceGetParamsWithContext(ctx context.Context) *StorageServiceNaturalEndpointsByKeyspaceGetParams { + var () + return &StorageServiceNaturalEndpointsByKeyspaceGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceNaturalEndpointsByKeyspaceGetParamsWithHTTPClient creates a new StorageServiceNaturalEndpointsByKeyspaceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceNaturalEndpointsByKeyspaceGetParamsWithHTTPClient(client *http.Client) *StorageServiceNaturalEndpointsByKeyspaceGetParams { + var () + return &StorageServiceNaturalEndpointsByKeyspaceGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceNaturalEndpointsByKeyspaceGetParams contains all the parameters to send to the API endpoint +for the storage service natural endpoints by keyspace get operation typically these are written to a http.Request +*/ +type StorageServiceNaturalEndpointsByKeyspaceGetParams struct { + + /*Cf + Column family name + + */ + Cf string + /*Key + key for which we need to find the endpoint return value - the endpoint responsible for this key + + */ + Key string + /*Keyspace + The keyspace to query about + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) WithTimeout(timeout time.Duration) *StorageServiceNaturalEndpointsByKeyspaceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) WithContext(ctx context.Context) *StorageServiceNaturalEndpointsByKeyspaceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) WithHTTPClient(client *http.Client) *StorageServiceNaturalEndpointsByKeyspaceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) WithCf(cf string) *StorageServiceNaturalEndpointsByKeyspaceGetParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) SetCf(cf string) { + o.Cf = cf +} + +// WithKey adds the key to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) WithKey(key string) *StorageServiceNaturalEndpointsByKeyspaceGetParams { + o.SetKey(key) + return o +} + +// SetKey adds the key to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) SetKey(key string) { + o.Key = key +} + +// WithKeyspace adds the keyspace to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) WithKeyspace(keyspace string) *StorageServiceNaturalEndpointsByKeyspaceGetParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service natural endpoints by keyspace get params +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceNaturalEndpointsByKeyspaceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param cf + qrCf := o.Cf + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + // query param key + qrKey := o.Key + qKey := qrKey + if qKey != "" { + if err := r.SetQueryParam("key", qKey); err != nil { + return err + } + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_natural_endpoints_by_keyspace_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_natural_endpoints_by_keyspace_get_responses.go new file mode 100644 index 00000000000..a6e4279bb87 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_natural_endpoints_by_keyspace_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceNaturalEndpointsByKeyspaceGetReader is a Reader for the StorageServiceNaturalEndpointsByKeyspaceGet structure. +type StorageServiceNaturalEndpointsByKeyspaceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceNaturalEndpointsByKeyspaceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceNaturalEndpointsByKeyspaceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceNaturalEndpointsByKeyspaceGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceNaturalEndpointsByKeyspaceGetOK creates a StorageServiceNaturalEndpointsByKeyspaceGetOK with default headers values +func NewStorageServiceNaturalEndpointsByKeyspaceGetOK() *StorageServiceNaturalEndpointsByKeyspaceGetOK { + return &StorageServiceNaturalEndpointsByKeyspaceGetOK{} +} + +/* +StorageServiceNaturalEndpointsByKeyspaceGetOK handles this case with default header values. + +Success +*/ +type StorageServiceNaturalEndpointsByKeyspaceGetOK struct { + Payload []string +} + +func (o *StorageServiceNaturalEndpointsByKeyspaceGetOK) GetPayload() []string { + return o.Payload +} + +func (o *StorageServiceNaturalEndpointsByKeyspaceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceNaturalEndpointsByKeyspaceGetDefault creates a StorageServiceNaturalEndpointsByKeyspaceGetDefault with default headers values +func NewStorageServiceNaturalEndpointsByKeyspaceGetDefault(code int) *StorageServiceNaturalEndpointsByKeyspaceGetDefault { + return &StorageServiceNaturalEndpointsByKeyspaceGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceNaturalEndpointsByKeyspaceGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceNaturalEndpointsByKeyspaceGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service natural endpoints by keyspace get default response +func (o *StorageServiceNaturalEndpointsByKeyspaceGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceNaturalEndpointsByKeyspaceGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceNaturalEndpointsByKeyspaceGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceNaturalEndpointsByKeyspaceGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_joining_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_joining_get_parameters.go new file mode 100644 index 00000000000..21efe66ad2b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_joining_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceNodesJoiningGetParams creates a new StorageServiceNodesJoiningGetParams object +// with the default values initialized. +func NewStorageServiceNodesJoiningGetParams() *StorageServiceNodesJoiningGetParams { + + return &StorageServiceNodesJoiningGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceNodesJoiningGetParamsWithTimeout creates a new StorageServiceNodesJoiningGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceNodesJoiningGetParamsWithTimeout(timeout time.Duration) *StorageServiceNodesJoiningGetParams { + + return &StorageServiceNodesJoiningGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceNodesJoiningGetParamsWithContext creates a new StorageServiceNodesJoiningGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceNodesJoiningGetParamsWithContext(ctx context.Context) *StorageServiceNodesJoiningGetParams { + + return &StorageServiceNodesJoiningGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceNodesJoiningGetParamsWithHTTPClient creates a new StorageServiceNodesJoiningGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceNodesJoiningGetParamsWithHTTPClient(client *http.Client) *StorageServiceNodesJoiningGetParams { + + return &StorageServiceNodesJoiningGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceNodesJoiningGetParams contains all the parameters to send to the API endpoint +for the storage service nodes joining get operation typically these are written to a http.Request +*/ +type StorageServiceNodesJoiningGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service nodes joining get params +func (o *StorageServiceNodesJoiningGetParams) WithTimeout(timeout time.Duration) *StorageServiceNodesJoiningGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service nodes joining get params +func (o *StorageServiceNodesJoiningGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service nodes joining get params +func (o *StorageServiceNodesJoiningGetParams) WithContext(ctx context.Context) *StorageServiceNodesJoiningGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service nodes joining get params +func (o *StorageServiceNodesJoiningGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service nodes joining get params +func (o *StorageServiceNodesJoiningGetParams) WithHTTPClient(client *http.Client) *StorageServiceNodesJoiningGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service nodes joining get params +func (o *StorageServiceNodesJoiningGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceNodesJoiningGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_joining_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_joining_get_responses.go new file mode 100644 index 00000000000..cb1f5dacbfc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_joining_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceNodesJoiningGetReader is a Reader for the StorageServiceNodesJoiningGet structure. +type StorageServiceNodesJoiningGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceNodesJoiningGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceNodesJoiningGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceNodesJoiningGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceNodesJoiningGetOK creates a StorageServiceNodesJoiningGetOK with default headers values +func NewStorageServiceNodesJoiningGetOK() *StorageServiceNodesJoiningGetOK { + return &StorageServiceNodesJoiningGetOK{} +} + +/* +StorageServiceNodesJoiningGetOK handles this case with default header values. + +Success +*/ +type StorageServiceNodesJoiningGetOK struct { + Payload []string +} + +func (o *StorageServiceNodesJoiningGetOK) GetPayload() []string { + return o.Payload +} + +func (o *StorageServiceNodesJoiningGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceNodesJoiningGetDefault creates a StorageServiceNodesJoiningGetDefault with default headers values +func NewStorageServiceNodesJoiningGetDefault(code int) *StorageServiceNodesJoiningGetDefault { + return &StorageServiceNodesJoiningGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceNodesJoiningGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceNodesJoiningGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service nodes joining get default response +func (o *StorageServiceNodesJoiningGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceNodesJoiningGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceNodesJoiningGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceNodesJoiningGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_leaving_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_leaving_get_parameters.go new file mode 100644 index 00000000000..23a43545206 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_leaving_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceNodesLeavingGetParams creates a new StorageServiceNodesLeavingGetParams object +// with the default values initialized. +func NewStorageServiceNodesLeavingGetParams() *StorageServiceNodesLeavingGetParams { + + return &StorageServiceNodesLeavingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceNodesLeavingGetParamsWithTimeout creates a new StorageServiceNodesLeavingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceNodesLeavingGetParamsWithTimeout(timeout time.Duration) *StorageServiceNodesLeavingGetParams { + + return &StorageServiceNodesLeavingGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceNodesLeavingGetParamsWithContext creates a new StorageServiceNodesLeavingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceNodesLeavingGetParamsWithContext(ctx context.Context) *StorageServiceNodesLeavingGetParams { + + return &StorageServiceNodesLeavingGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceNodesLeavingGetParamsWithHTTPClient creates a new StorageServiceNodesLeavingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceNodesLeavingGetParamsWithHTTPClient(client *http.Client) *StorageServiceNodesLeavingGetParams { + + return &StorageServiceNodesLeavingGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceNodesLeavingGetParams contains all the parameters to send to the API endpoint +for the storage service nodes leaving get operation typically these are written to a http.Request +*/ +type StorageServiceNodesLeavingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service nodes leaving get params +func (o *StorageServiceNodesLeavingGetParams) WithTimeout(timeout time.Duration) *StorageServiceNodesLeavingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service nodes leaving get params +func (o *StorageServiceNodesLeavingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service nodes leaving get params +func (o *StorageServiceNodesLeavingGetParams) WithContext(ctx context.Context) *StorageServiceNodesLeavingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service nodes leaving get params +func (o *StorageServiceNodesLeavingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service nodes leaving get params +func (o *StorageServiceNodesLeavingGetParams) WithHTTPClient(client *http.Client) *StorageServiceNodesLeavingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service nodes leaving get params +func (o *StorageServiceNodesLeavingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceNodesLeavingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_leaving_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_leaving_get_responses.go new file mode 100644 index 00000000000..14c17a751a1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_leaving_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceNodesLeavingGetReader is a Reader for the StorageServiceNodesLeavingGet structure. +type StorageServiceNodesLeavingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceNodesLeavingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceNodesLeavingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceNodesLeavingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceNodesLeavingGetOK creates a StorageServiceNodesLeavingGetOK with default headers values +func NewStorageServiceNodesLeavingGetOK() *StorageServiceNodesLeavingGetOK { + return &StorageServiceNodesLeavingGetOK{} +} + +/* +StorageServiceNodesLeavingGetOK handles this case with default header values. + +Success +*/ +type StorageServiceNodesLeavingGetOK struct { + Payload []string +} + +func (o *StorageServiceNodesLeavingGetOK) GetPayload() []string { + return o.Payload +} + +func (o *StorageServiceNodesLeavingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceNodesLeavingGetDefault creates a StorageServiceNodesLeavingGetDefault with default headers values +func NewStorageServiceNodesLeavingGetDefault(code int) *StorageServiceNodesLeavingGetDefault { + return &StorageServiceNodesLeavingGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceNodesLeavingGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceNodesLeavingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service nodes leaving get default response +func (o *StorageServiceNodesLeavingGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceNodesLeavingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceNodesLeavingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceNodesLeavingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_moving_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_moving_get_parameters.go new file mode 100644 index 00000000000..be9eeb46997 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_moving_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceNodesMovingGetParams creates a new StorageServiceNodesMovingGetParams object +// with the default values initialized. +func NewStorageServiceNodesMovingGetParams() *StorageServiceNodesMovingGetParams { + + return &StorageServiceNodesMovingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceNodesMovingGetParamsWithTimeout creates a new StorageServiceNodesMovingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceNodesMovingGetParamsWithTimeout(timeout time.Duration) *StorageServiceNodesMovingGetParams { + + return &StorageServiceNodesMovingGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceNodesMovingGetParamsWithContext creates a new StorageServiceNodesMovingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceNodesMovingGetParamsWithContext(ctx context.Context) *StorageServiceNodesMovingGetParams { + + return &StorageServiceNodesMovingGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceNodesMovingGetParamsWithHTTPClient creates a new StorageServiceNodesMovingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceNodesMovingGetParamsWithHTTPClient(client *http.Client) *StorageServiceNodesMovingGetParams { + + return &StorageServiceNodesMovingGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceNodesMovingGetParams contains all the parameters to send to the API endpoint +for the storage service nodes moving get operation typically these are written to a http.Request +*/ +type StorageServiceNodesMovingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service nodes moving get params +func (o *StorageServiceNodesMovingGetParams) WithTimeout(timeout time.Duration) *StorageServiceNodesMovingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service nodes moving get params +func (o *StorageServiceNodesMovingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service nodes moving get params +func (o *StorageServiceNodesMovingGetParams) WithContext(ctx context.Context) *StorageServiceNodesMovingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service nodes moving get params +func (o *StorageServiceNodesMovingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service nodes moving get params +func (o *StorageServiceNodesMovingGetParams) WithHTTPClient(client *http.Client) *StorageServiceNodesMovingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service nodes moving get params +func (o *StorageServiceNodesMovingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceNodesMovingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_moving_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_moving_get_responses.go new file mode 100644 index 00000000000..72b2e1acbc7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_nodes_moving_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceNodesMovingGetReader is a Reader for the StorageServiceNodesMovingGet structure. +type StorageServiceNodesMovingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceNodesMovingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceNodesMovingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceNodesMovingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceNodesMovingGetOK creates a StorageServiceNodesMovingGetOK with default headers values +func NewStorageServiceNodesMovingGetOK() *StorageServiceNodesMovingGetOK { + return &StorageServiceNodesMovingGetOK{} +} + +/* +StorageServiceNodesMovingGetOK handles this case with default header values. + +Success +*/ +type StorageServiceNodesMovingGetOK struct { + Payload []string +} + +func (o *StorageServiceNodesMovingGetOK) GetPayload() []string { + return o.Payload +} + +func (o *StorageServiceNodesMovingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceNodesMovingGetDefault creates a StorageServiceNodesMovingGetDefault with default headers values +func NewStorageServiceNodesMovingGetDefault(code int) *StorageServiceNodesMovingGetDefault { + return &StorageServiceNodesMovingGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceNodesMovingGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceNodesMovingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service nodes moving get default response +func (o *StorageServiceNodesMovingGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceNodesMovingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceNodesMovingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceNodesMovingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_operation_mode_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_operation_mode_get_parameters.go new file mode 100644 index 00000000000..0306bcae1c3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_operation_mode_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceOperationModeGetParams creates a new StorageServiceOperationModeGetParams object +// with the default values initialized. +func NewStorageServiceOperationModeGetParams() *StorageServiceOperationModeGetParams { + + return &StorageServiceOperationModeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceOperationModeGetParamsWithTimeout creates a new StorageServiceOperationModeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceOperationModeGetParamsWithTimeout(timeout time.Duration) *StorageServiceOperationModeGetParams { + + return &StorageServiceOperationModeGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceOperationModeGetParamsWithContext creates a new StorageServiceOperationModeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceOperationModeGetParamsWithContext(ctx context.Context) *StorageServiceOperationModeGetParams { + + return &StorageServiceOperationModeGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceOperationModeGetParamsWithHTTPClient creates a new StorageServiceOperationModeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceOperationModeGetParamsWithHTTPClient(client *http.Client) *StorageServiceOperationModeGetParams { + + return &StorageServiceOperationModeGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceOperationModeGetParams contains all the parameters to send to the API endpoint +for the storage service operation mode get operation typically these are written to a http.Request +*/ +type StorageServiceOperationModeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service operation mode get params +func (o *StorageServiceOperationModeGetParams) WithTimeout(timeout time.Duration) *StorageServiceOperationModeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service operation mode get params +func (o *StorageServiceOperationModeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service operation mode get params +func (o *StorageServiceOperationModeGetParams) WithContext(ctx context.Context) *StorageServiceOperationModeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service operation mode get params +func (o *StorageServiceOperationModeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service operation mode get params +func (o *StorageServiceOperationModeGetParams) WithHTTPClient(client *http.Client) *StorageServiceOperationModeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service operation mode get params +func (o *StorageServiceOperationModeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceOperationModeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_operation_mode_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_operation_mode_get_responses.go new file mode 100644 index 00000000000..e2171a2f3f6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_operation_mode_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceOperationModeGetReader is a Reader for the StorageServiceOperationModeGet structure. +type StorageServiceOperationModeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceOperationModeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceOperationModeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceOperationModeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceOperationModeGetOK creates a StorageServiceOperationModeGetOK with default headers values +func NewStorageServiceOperationModeGetOK() *StorageServiceOperationModeGetOK { + return &StorageServiceOperationModeGetOK{} +} + +/* +StorageServiceOperationModeGetOK handles this case with default header values. + +Success +*/ +type StorageServiceOperationModeGetOK struct { + Payload string +} + +func (o *StorageServiceOperationModeGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceOperationModeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceOperationModeGetDefault creates a StorageServiceOperationModeGetDefault with default headers values +func NewStorageServiceOperationModeGetDefault(code int) *StorageServiceOperationModeGetDefault { + return &StorageServiceOperationModeGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceOperationModeGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceOperationModeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service operation mode get default response +func (o *StorageServiceOperationModeGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceOperationModeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceOperationModeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceOperationModeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_by_keyspace_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_by_keyspace_get_parameters.go new file mode 100644 index 00000000000..8eb4fafe61d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_by_keyspace_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceOwnershipByKeyspaceGetParams creates a new StorageServiceOwnershipByKeyspaceGetParams object +// with the default values initialized. +func NewStorageServiceOwnershipByKeyspaceGetParams() *StorageServiceOwnershipByKeyspaceGetParams { + var () + return &StorageServiceOwnershipByKeyspaceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceOwnershipByKeyspaceGetParamsWithTimeout creates a new StorageServiceOwnershipByKeyspaceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceOwnershipByKeyspaceGetParamsWithTimeout(timeout time.Duration) *StorageServiceOwnershipByKeyspaceGetParams { + var () + return &StorageServiceOwnershipByKeyspaceGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceOwnershipByKeyspaceGetParamsWithContext creates a new StorageServiceOwnershipByKeyspaceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceOwnershipByKeyspaceGetParamsWithContext(ctx context.Context) *StorageServiceOwnershipByKeyspaceGetParams { + var () + return &StorageServiceOwnershipByKeyspaceGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceOwnershipByKeyspaceGetParamsWithHTTPClient creates a new StorageServiceOwnershipByKeyspaceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceOwnershipByKeyspaceGetParamsWithHTTPClient(client *http.Client) *StorageServiceOwnershipByKeyspaceGetParams { + var () + return &StorageServiceOwnershipByKeyspaceGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceOwnershipByKeyspaceGetParams contains all the parameters to send to the API endpoint +for the storage service ownership by keyspace get operation typically these are written to a http.Request +*/ +type StorageServiceOwnershipByKeyspaceGetParams struct { + + /*Keyspace + The keyspace to fetch information about + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service ownership by keyspace get params +func (o *StorageServiceOwnershipByKeyspaceGetParams) WithTimeout(timeout time.Duration) *StorageServiceOwnershipByKeyspaceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service ownership by keyspace get params +func (o *StorageServiceOwnershipByKeyspaceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service ownership by keyspace get params +func (o *StorageServiceOwnershipByKeyspaceGetParams) WithContext(ctx context.Context) *StorageServiceOwnershipByKeyspaceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service ownership by keyspace get params +func (o *StorageServiceOwnershipByKeyspaceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service ownership by keyspace get params +func (o *StorageServiceOwnershipByKeyspaceGetParams) WithHTTPClient(client *http.Client) *StorageServiceOwnershipByKeyspaceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service ownership by keyspace get params +func (o *StorageServiceOwnershipByKeyspaceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithKeyspace adds the keyspace to the storage service ownership by keyspace get params +func (o *StorageServiceOwnershipByKeyspaceGetParams) WithKeyspace(keyspace string) *StorageServiceOwnershipByKeyspaceGetParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service ownership by keyspace get params +func (o *StorageServiceOwnershipByKeyspaceGetParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceOwnershipByKeyspaceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_by_keyspace_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_by_keyspace_get_responses.go new file mode 100644 index 00000000000..467b23fdc6f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_by_keyspace_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceOwnershipByKeyspaceGetReader is a Reader for the StorageServiceOwnershipByKeyspaceGet structure. +type StorageServiceOwnershipByKeyspaceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceOwnershipByKeyspaceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceOwnershipByKeyspaceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceOwnershipByKeyspaceGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceOwnershipByKeyspaceGetOK creates a StorageServiceOwnershipByKeyspaceGetOK with default headers values +func NewStorageServiceOwnershipByKeyspaceGetOK() *StorageServiceOwnershipByKeyspaceGetOK { + return &StorageServiceOwnershipByKeyspaceGetOK{} +} + +/* +StorageServiceOwnershipByKeyspaceGetOK handles this case with default header values. + +Success +*/ +type StorageServiceOwnershipByKeyspaceGetOK struct { + Payload []*models.Mapper +} + +func (o *StorageServiceOwnershipByKeyspaceGetOK) GetPayload() []*models.Mapper { + return o.Payload +} + +func (o *StorageServiceOwnershipByKeyspaceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceOwnershipByKeyspaceGetDefault creates a StorageServiceOwnershipByKeyspaceGetDefault with default headers values +func NewStorageServiceOwnershipByKeyspaceGetDefault(code int) *StorageServiceOwnershipByKeyspaceGetDefault { + return &StorageServiceOwnershipByKeyspaceGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceOwnershipByKeyspaceGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceOwnershipByKeyspaceGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service ownership by keyspace get default response +func (o *StorageServiceOwnershipByKeyspaceGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceOwnershipByKeyspaceGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceOwnershipByKeyspaceGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceOwnershipByKeyspaceGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_get_parameters.go new file mode 100644 index 00000000000..0c680737cb2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceOwnershipGetParams creates a new StorageServiceOwnershipGetParams object +// with the default values initialized. +func NewStorageServiceOwnershipGetParams() *StorageServiceOwnershipGetParams { + + return &StorageServiceOwnershipGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceOwnershipGetParamsWithTimeout creates a new StorageServiceOwnershipGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceOwnershipGetParamsWithTimeout(timeout time.Duration) *StorageServiceOwnershipGetParams { + + return &StorageServiceOwnershipGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceOwnershipGetParamsWithContext creates a new StorageServiceOwnershipGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceOwnershipGetParamsWithContext(ctx context.Context) *StorageServiceOwnershipGetParams { + + return &StorageServiceOwnershipGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceOwnershipGetParamsWithHTTPClient creates a new StorageServiceOwnershipGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceOwnershipGetParamsWithHTTPClient(client *http.Client) *StorageServiceOwnershipGetParams { + + return &StorageServiceOwnershipGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceOwnershipGetParams contains all the parameters to send to the API endpoint +for the storage service ownership get operation typically these are written to a http.Request +*/ +type StorageServiceOwnershipGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service ownership get params +func (o *StorageServiceOwnershipGetParams) WithTimeout(timeout time.Duration) *StorageServiceOwnershipGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service ownership get params +func (o *StorageServiceOwnershipGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service ownership get params +func (o *StorageServiceOwnershipGetParams) WithContext(ctx context.Context) *StorageServiceOwnershipGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service ownership get params +func (o *StorageServiceOwnershipGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service ownership get params +func (o *StorageServiceOwnershipGetParams) WithHTTPClient(client *http.Client) *StorageServiceOwnershipGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service ownership get params +func (o *StorageServiceOwnershipGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceOwnershipGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_get_responses.go new file mode 100644 index 00000000000..ebe20215d10 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_ownership_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceOwnershipGetReader is a Reader for the StorageServiceOwnershipGet structure. +type StorageServiceOwnershipGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceOwnershipGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceOwnershipGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceOwnershipGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceOwnershipGetOK creates a StorageServiceOwnershipGetOK with default headers values +func NewStorageServiceOwnershipGetOK() *StorageServiceOwnershipGetOK { + return &StorageServiceOwnershipGetOK{} +} + +/* +StorageServiceOwnershipGetOK handles this case with default header values. + +Success +*/ +type StorageServiceOwnershipGetOK struct { + Payload []*models.Mapper +} + +func (o *StorageServiceOwnershipGetOK) GetPayload() []*models.Mapper { + return o.Payload +} + +func (o *StorageServiceOwnershipGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceOwnershipGetDefault creates a StorageServiceOwnershipGetDefault with default headers values +func NewStorageServiceOwnershipGetDefault(code int) *StorageServiceOwnershipGetDefault { + return &StorageServiceOwnershipGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceOwnershipGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceOwnershipGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service ownership get default response +func (o *StorageServiceOwnershipGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceOwnershipGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceOwnershipGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceOwnershipGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_partitioner_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_partitioner_name_get_parameters.go new file mode 100644 index 00000000000..63221528df2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_partitioner_name_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServicePartitionerNameGetParams creates a new StorageServicePartitionerNameGetParams object +// with the default values initialized. +func NewStorageServicePartitionerNameGetParams() *StorageServicePartitionerNameGetParams { + + return &StorageServicePartitionerNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServicePartitionerNameGetParamsWithTimeout creates a new StorageServicePartitionerNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServicePartitionerNameGetParamsWithTimeout(timeout time.Duration) *StorageServicePartitionerNameGetParams { + + return &StorageServicePartitionerNameGetParams{ + + timeout: timeout, + } +} + +// NewStorageServicePartitionerNameGetParamsWithContext creates a new StorageServicePartitionerNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServicePartitionerNameGetParamsWithContext(ctx context.Context) *StorageServicePartitionerNameGetParams { + + return &StorageServicePartitionerNameGetParams{ + + Context: ctx, + } +} + +// NewStorageServicePartitionerNameGetParamsWithHTTPClient creates a new StorageServicePartitionerNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServicePartitionerNameGetParamsWithHTTPClient(client *http.Client) *StorageServicePartitionerNameGetParams { + + return &StorageServicePartitionerNameGetParams{ + HTTPClient: client, + } +} + +/* +StorageServicePartitionerNameGetParams contains all the parameters to send to the API endpoint +for the storage service partitioner name get operation typically these are written to a http.Request +*/ +type StorageServicePartitionerNameGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service partitioner name get params +func (o *StorageServicePartitionerNameGetParams) WithTimeout(timeout time.Duration) *StorageServicePartitionerNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service partitioner name get params +func (o *StorageServicePartitionerNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service partitioner name get params +func (o *StorageServicePartitionerNameGetParams) WithContext(ctx context.Context) *StorageServicePartitionerNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service partitioner name get params +func (o *StorageServicePartitionerNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service partitioner name get params +func (o *StorageServicePartitionerNameGetParams) WithHTTPClient(client *http.Client) *StorageServicePartitionerNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service partitioner name get params +func (o *StorageServicePartitionerNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServicePartitionerNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_partitioner_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_partitioner_name_get_responses.go new file mode 100644 index 00000000000..3eeb9e21a64 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_partitioner_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServicePartitionerNameGetReader is a Reader for the StorageServicePartitionerNameGet structure. +type StorageServicePartitionerNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServicePartitionerNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServicePartitionerNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServicePartitionerNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServicePartitionerNameGetOK creates a StorageServicePartitionerNameGetOK with default headers values +func NewStorageServicePartitionerNameGetOK() *StorageServicePartitionerNameGetOK { + return &StorageServicePartitionerNameGetOK{} +} + +/* +StorageServicePartitionerNameGetOK handles this case with default header values. + +Success +*/ +type StorageServicePartitionerNameGetOK struct { + Payload string +} + +func (o *StorageServicePartitionerNameGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServicePartitionerNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServicePartitionerNameGetDefault creates a StorageServicePartitionerNameGetDefault with default headers values +func NewStorageServicePartitionerNameGetDefault(code int) *StorageServicePartitionerNameGetDefault { + return &StorageServicePartitionerNameGetDefault{ + _statusCode: code, + } +} + +/* +StorageServicePartitionerNameGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServicePartitionerNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service partitioner name get default response +func (o *StorageServicePartitionerNameGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServicePartitionerNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServicePartitionerNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServicePartitionerNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_pending_range_by_keyspace_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_pending_range_by_keyspace_get_parameters.go new file mode 100644 index 00000000000..59745aaab9d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_pending_range_by_keyspace_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServicePendingRangeByKeyspaceGetParams creates a new StorageServicePendingRangeByKeyspaceGetParams object +// with the default values initialized. +func NewStorageServicePendingRangeByKeyspaceGetParams() *StorageServicePendingRangeByKeyspaceGetParams { + var () + return &StorageServicePendingRangeByKeyspaceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServicePendingRangeByKeyspaceGetParamsWithTimeout creates a new StorageServicePendingRangeByKeyspaceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServicePendingRangeByKeyspaceGetParamsWithTimeout(timeout time.Duration) *StorageServicePendingRangeByKeyspaceGetParams { + var () + return &StorageServicePendingRangeByKeyspaceGetParams{ + + timeout: timeout, + } +} + +// NewStorageServicePendingRangeByKeyspaceGetParamsWithContext creates a new StorageServicePendingRangeByKeyspaceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServicePendingRangeByKeyspaceGetParamsWithContext(ctx context.Context) *StorageServicePendingRangeByKeyspaceGetParams { + var () + return &StorageServicePendingRangeByKeyspaceGetParams{ + + Context: ctx, + } +} + +// NewStorageServicePendingRangeByKeyspaceGetParamsWithHTTPClient creates a new StorageServicePendingRangeByKeyspaceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServicePendingRangeByKeyspaceGetParamsWithHTTPClient(client *http.Client) *StorageServicePendingRangeByKeyspaceGetParams { + var () + return &StorageServicePendingRangeByKeyspaceGetParams{ + HTTPClient: client, + } +} + +/* +StorageServicePendingRangeByKeyspaceGetParams contains all the parameters to send to the API endpoint +for the storage service pending range by keyspace get operation typically these are written to a http.Request +*/ +type StorageServicePendingRangeByKeyspaceGetParams struct { + + /*Keyspace + The keyspace to query about + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service pending range by keyspace get params +func (o *StorageServicePendingRangeByKeyspaceGetParams) WithTimeout(timeout time.Duration) *StorageServicePendingRangeByKeyspaceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service pending range by keyspace get params +func (o *StorageServicePendingRangeByKeyspaceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service pending range by keyspace get params +func (o *StorageServicePendingRangeByKeyspaceGetParams) WithContext(ctx context.Context) *StorageServicePendingRangeByKeyspaceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service pending range by keyspace get params +func (o *StorageServicePendingRangeByKeyspaceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service pending range by keyspace get params +func (o *StorageServicePendingRangeByKeyspaceGetParams) WithHTTPClient(client *http.Client) *StorageServicePendingRangeByKeyspaceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service pending range by keyspace get params +func (o *StorageServicePendingRangeByKeyspaceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithKeyspace adds the keyspace to the storage service pending range by keyspace get params +func (o *StorageServicePendingRangeByKeyspaceGetParams) WithKeyspace(keyspace string) *StorageServicePendingRangeByKeyspaceGetParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service pending range by keyspace get params +func (o *StorageServicePendingRangeByKeyspaceGetParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServicePendingRangeByKeyspaceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_pending_range_by_keyspace_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_pending_range_by_keyspace_get_responses.go new file mode 100644 index 00000000000..67a3f877dc5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_pending_range_by_keyspace_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServicePendingRangeByKeyspaceGetReader is a Reader for the StorageServicePendingRangeByKeyspaceGet structure. +type StorageServicePendingRangeByKeyspaceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServicePendingRangeByKeyspaceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServicePendingRangeByKeyspaceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServicePendingRangeByKeyspaceGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServicePendingRangeByKeyspaceGetOK creates a StorageServicePendingRangeByKeyspaceGetOK with default headers values +func NewStorageServicePendingRangeByKeyspaceGetOK() *StorageServicePendingRangeByKeyspaceGetOK { + return &StorageServicePendingRangeByKeyspaceGetOK{} +} + +/* +StorageServicePendingRangeByKeyspaceGetOK handles this case with default header values. + +Success +*/ +type StorageServicePendingRangeByKeyspaceGetOK struct { + Payload []*models.MaplistMapper +} + +func (o *StorageServicePendingRangeByKeyspaceGetOK) GetPayload() []*models.MaplistMapper { + return o.Payload +} + +func (o *StorageServicePendingRangeByKeyspaceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServicePendingRangeByKeyspaceGetDefault creates a StorageServicePendingRangeByKeyspaceGetDefault with default headers values +func NewStorageServicePendingRangeByKeyspaceGetDefault(code int) *StorageServicePendingRangeByKeyspaceGetDefault { + return &StorageServicePendingRangeByKeyspaceGetDefault{ + _statusCode: code, + } +} + +/* +StorageServicePendingRangeByKeyspaceGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServicePendingRangeByKeyspaceGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service pending range by keyspace get default response +func (o *StorageServicePendingRangeByKeyspaceGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServicePendingRangeByKeyspaceGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServicePendingRangeByKeyspaceGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServicePendingRangeByKeyspaceGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_range_to_endpoint_map_by_keyspace_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_range_to_endpoint_map_by_keyspace_get_parameters.go new file mode 100644 index 00000000000..10e3b7b5187 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_range_to_endpoint_map_by_keyspace_get_parameters.go @@ -0,0 +1,169 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceRangeToEndpointMapByKeyspaceGetParams creates a new StorageServiceRangeToEndpointMapByKeyspaceGetParams object +// with the default values initialized. +func NewStorageServiceRangeToEndpointMapByKeyspaceGetParams() *StorageServiceRangeToEndpointMapByKeyspaceGetParams { + var () + return &StorageServiceRangeToEndpointMapByKeyspaceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRangeToEndpointMapByKeyspaceGetParamsWithTimeout creates a new StorageServiceRangeToEndpointMapByKeyspaceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRangeToEndpointMapByKeyspaceGetParamsWithTimeout(timeout time.Duration) *StorageServiceRangeToEndpointMapByKeyspaceGetParams { + var () + return &StorageServiceRangeToEndpointMapByKeyspaceGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRangeToEndpointMapByKeyspaceGetParamsWithContext creates a new StorageServiceRangeToEndpointMapByKeyspaceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRangeToEndpointMapByKeyspaceGetParamsWithContext(ctx context.Context) *StorageServiceRangeToEndpointMapByKeyspaceGetParams { + var () + return &StorageServiceRangeToEndpointMapByKeyspaceGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceRangeToEndpointMapByKeyspaceGetParamsWithHTTPClient creates a new StorageServiceRangeToEndpointMapByKeyspaceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRangeToEndpointMapByKeyspaceGetParamsWithHTTPClient(client *http.Client) *StorageServiceRangeToEndpointMapByKeyspaceGetParams { + var () + return &StorageServiceRangeToEndpointMapByKeyspaceGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRangeToEndpointMapByKeyspaceGetParams contains all the parameters to send to the API endpoint +for the storage service range to endpoint map by keyspace get operation typically these are written to a http.Request +*/ +type StorageServiceRangeToEndpointMapByKeyspaceGetParams struct { + + /*Keyspace + The keyspace to query about + + */ + Keyspace string + /*RPC + When set to true, return the rpc address + + */ + RPC *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service range to endpoint map by keyspace get params +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) WithTimeout(timeout time.Duration) *StorageServiceRangeToEndpointMapByKeyspaceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service range to endpoint map by keyspace get params +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service range to endpoint map by keyspace get params +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) WithContext(ctx context.Context) *StorageServiceRangeToEndpointMapByKeyspaceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service range to endpoint map by keyspace get params +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service range to endpoint map by keyspace get params +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) WithHTTPClient(client *http.Client) *StorageServiceRangeToEndpointMapByKeyspaceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service range to endpoint map by keyspace get params +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithKeyspace adds the keyspace to the storage service range to endpoint map by keyspace get params +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) WithKeyspace(keyspace string) *StorageServiceRangeToEndpointMapByKeyspaceGetParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service range to endpoint map by keyspace get params +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WithRPC adds the rpc to the storage service range to endpoint map by keyspace get params +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) WithRPC(rpc *bool) *StorageServiceRangeToEndpointMapByKeyspaceGetParams { + o.SetRPC(rpc) + return o +} + +// SetRPC adds the rpc to the storage service range to endpoint map by keyspace get params +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) SetRPC(rpc *bool) { + o.RPC = rpc +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if o.RPC != nil { + + // query param rpc + var qrRPC bool + if o.RPC != nil { + qrRPC = *o.RPC + } + qRPC := swag.FormatBool(qrRPC) + if qRPC != "" { + if err := r.SetQueryParam("rpc", qRPC); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_range_to_endpoint_map_by_keyspace_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_range_to_endpoint_map_by_keyspace_get_responses.go new file mode 100644 index 00000000000..6610a294725 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_range_to_endpoint_map_by_keyspace_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRangeToEndpointMapByKeyspaceGetReader is a Reader for the StorageServiceRangeToEndpointMapByKeyspaceGet structure. +type StorageServiceRangeToEndpointMapByKeyspaceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRangeToEndpointMapByKeyspaceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRangeToEndpointMapByKeyspaceGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRangeToEndpointMapByKeyspaceGetOK creates a StorageServiceRangeToEndpointMapByKeyspaceGetOK with default headers values +func NewStorageServiceRangeToEndpointMapByKeyspaceGetOK() *StorageServiceRangeToEndpointMapByKeyspaceGetOK { + return &StorageServiceRangeToEndpointMapByKeyspaceGetOK{} +} + +/* +StorageServiceRangeToEndpointMapByKeyspaceGetOK handles this case with default header values. + +Success +*/ +type StorageServiceRangeToEndpointMapByKeyspaceGetOK struct { + Payload []*models.MaplistMapper +} + +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetOK) GetPayload() []*models.MaplistMapper { + return o.Payload +} + +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceRangeToEndpointMapByKeyspaceGetDefault creates a StorageServiceRangeToEndpointMapByKeyspaceGetDefault with default headers values +func NewStorageServiceRangeToEndpointMapByKeyspaceGetDefault(code int) *StorageServiceRangeToEndpointMapByKeyspaceGetDefault { + return &StorageServiceRangeToEndpointMapByKeyspaceGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRangeToEndpointMapByKeyspaceGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRangeToEndpointMapByKeyspaceGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service range to endpoint map by keyspace get default response +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRangeToEndpointMapByKeyspaceGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rebuild_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rebuild_post_parameters.go new file mode 100644 index 00000000000..f4853c92817 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rebuild_post_parameters.go @@ -0,0 +1,147 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceRebuildPostParams creates a new StorageServiceRebuildPostParams object +// with the default values initialized. +func NewStorageServiceRebuildPostParams() *StorageServiceRebuildPostParams { + var () + return &StorageServiceRebuildPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRebuildPostParamsWithTimeout creates a new StorageServiceRebuildPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRebuildPostParamsWithTimeout(timeout time.Duration) *StorageServiceRebuildPostParams { + var () + return &StorageServiceRebuildPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRebuildPostParamsWithContext creates a new StorageServiceRebuildPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRebuildPostParamsWithContext(ctx context.Context) *StorageServiceRebuildPostParams { + var () + return &StorageServiceRebuildPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceRebuildPostParamsWithHTTPClient creates a new StorageServiceRebuildPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRebuildPostParamsWithHTTPClient(client *http.Client) *StorageServiceRebuildPostParams { + var () + return &StorageServiceRebuildPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRebuildPostParams contains all the parameters to send to the API endpoint +for the storage service rebuild post operation typically these are written to a http.Request +*/ +type StorageServiceRebuildPostParams struct { + + /*SourceDc + Name of DC from which to select sources for streaming or none to pick any node + + */ + SourceDc *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service rebuild post params +func (o *StorageServiceRebuildPostParams) WithTimeout(timeout time.Duration) *StorageServiceRebuildPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service rebuild post params +func (o *StorageServiceRebuildPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service rebuild post params +func (o *StorageServiceRebuildPostParams) WithContext(ctx context.Context) *StorageServiceRebuildPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service rebuild post params +func (o *StorageServiceRebuildPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service rebuild post params +func (o *StorageServiceRebuildPostParams) WithHTTPClient(client *http.Client) *StorageServiceRebuildPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service rebuild post params +func (o *StorageServiceRebuildPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithSourceDc adds the sourceDc to the storage service rebuild post params +func (o *StorageServiceRebuildPostParams) WithSourceDc(sourceDc *string) *StorageServiceRebuildPostParams { + o.SetSourceDc(sourceDc) + return o +} + +// SetSourceDc adds the sourceDc to the storage service rebuild post params +func (o *StorageServiceRebuildPostParams) SetSourceDc(sourceDc *string) { + o.SourceDc = sourceDc +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRebuildPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.SourceDc != nil { + + // query param source_dc + var qrSourceDc string + if o.SourceDc != nil { + qrSourceDc = *o.SourceDc + } + qSourceDc := qrSourceDc + if qSourceDc != "" { + if err := r.SetQueryParam("source_dc", qSourceDc); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rebuild_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rebuild_post_responses.go new file mode 100644 index 00000000000..b433f62afc3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rebuild_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRebuildPostReader is a Reader for the StorageServiceRebuildPost structure. +type StorageServiceRebuildPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRebuildPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRebuildPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRebuildPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRebuildPostOK creates a StorageServiceRebuildPostOK with default headers values +func NewStorageServiceRebuildPostOK() *StorageServiceRebuildPostOK { + return &StorageServiceRebuildPostOK{} +} + +/* +StorageServiceRebuildPostOK handles this case with default header values. + +Success +*/ +type StorageServiceRebuildPostOK struct { +} + +func (o *StorageServiceRebuildPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceRebuildPostDefault creates a StorageServiceRebuildPostDefault with default headers values +func NewStorageServiceRebuildPostDefault(code int) *StorageServiceRebuildPostDefault { + return &StorageServiceRebuildPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRebuildPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRebuildPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service rebuild post default response +func (o *StorageServiceRebuildPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRebuildPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRebuildPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRebuildPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_release_version_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_release_version_get_parameters.go new file mode 100644 index 00000000000..ba74ad2f462 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_release_version_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceReleaseVersionGetParams creates a new StorageServiceReleaseVersionGetParams object +// with the default values initialized. +func NewStorageServiceReleaseVersionGetParams() *StorageServiceReleaseVersionGetParams { + + return &StorageServiceReleaseVersionGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceReleaseVersionGetParamsWithTimeout creates a new StorageServiceReleaseVersionGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceReleaseVersionGetParamsWithTimeout(timeout time.Duration) *StorageServiceReleaseVersionGetParams { + + return &StorageServiceReleaseVersionGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceReleaseVersionGetParamsWithContext creates a new StorageServiceReleaseVersionGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceReleaseVersionGetParamsWithContext(ctx context.Context) *StorageServiceReleaseVersionGetParams { + + return &StorageServiceReleaseVersionGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceReleaseVersionGetParamsWithHTTPClient creates a new StorageServiceReleaseVersionGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceReleaseVersionGetParamsWithHTTPClient(client *http.Client) *StorageServiceReleaseVersionGetParams { + + return &StorageServiceReleaseVersionGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceReleaseVersionGetParams contains all the parameters to send to the API endpoint +for the storage service release version get operation typically these are written to a http.Request +*/ +type StorageServiceReleaseVersionGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service release version get params +func (o *StorageServiceReleaseVersionGetParams) WithTimeout(timeout time.Duration) *StorageServiceReleaseVersionGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service release version get params +func (o *StorageServiceReleaseVersionGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service release version get params +func (o *StorageServiceReleaseVersionGetParams) WithContext(ctx context.Context) *StorageServiceReleaseVersionGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service release version get params +func (o *StorageServiceReleaseVersionGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service release version get params +func (o *StorageServiceReleaseVersionGetParams) WithHTTPClient(client *http.Client) *StorageServiceReleaseVersionGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service release version get params +func (o *StorageServiceReleaseVersionGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceReleaseVersionGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_release_version_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_release_version_get_responses.go new file mode 100644 index 00000000000..a8e2b11a82b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_release_version_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceReleaseVersionGetReader is a Reader for the StorageServiceReleaseVersionGet structure. +type StorageServiceReleaseVersionGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceReleaseVersionGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceReleaseVersionGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceReleaseVersionGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceReleaseVersionGetOK creates a StorageServiceReleaseVersionGetOK with default headers values +func NewStorageServiceReleaseVersionGetOK() *StorageServiceReleaseVersionGetOK { + return &StorageServiceReleaseVersionGetOK{} +} + +/* +StorageServiceReleaseVersionGetOK handles this case with default header values. + +Success +*/ +type StorageServiceReleaseVersionGetOK struct { + Payload string +} + +func (o *StorageServiceReleaseVersionGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceReleaseVersionGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceReleaseVersionGetDefault creates a StorageServiceReleaseVersionGetDefault with default headers values +func NewStorageServiceReleaseVersionGetDefault(code int) *StorageServiceReleaseVersionGetDefault { + return &StorageServiceReleaseVersionGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceReleaseVersionGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceReleaseVersionGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service release version get default response +func (o *StorageServiceReleaseVersionGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceReleaseVersionGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceReleaseVersionGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceReleaseVersionGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_relocal_schema_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_relocal_schema_post_parameters.go new file mode 100644 index 00000000000..2d97124b803 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_relocal_schema_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceRelocalSchemaPostParams creates a new StorageServiceRelocalSchemaPostParams object +// with the default values initialized. +func NewStorageServiceRelocalSchemaPostParams() *StorageServiceRelocalSchemaPostParams { + + return &StorageServiceRelocalSchemaPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRelocalSchemaPostParamsWithTimeout creates a new StorageServiceRelocalSchemaPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRelocalSchemaPostParamsWithTimeout(timeout time.Duration) *StorageServiceRelocalSchemaPostParams { + + return &StorageServiceRelocalSchemaPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRelocalSchemaPostParamsWithContext creates a new StorageServiceRelocalSchemaPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRelocalSchemaPostParamsWithContext(ctx context.Context) *StorageServiceRelocalSchemaPostParams { + + return &StorageServiceRelocalSchemaPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceRelocalSchemaPostParamsWithHTTPClient creates a new StorageServiceRelocalSchemaPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRelocalSchemaPostParamsWithHTTPClient(client *http.Client) *StorageServiceRelocalSchemaPostParams { + + return &StorageServiceRelocalSchemaPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRelocalSchemaPostParams contains all the parameters to send to the API endpoint +for the storage service relocal schema post operation typically these are written to a http.Request +*/ +type StorageServiceRelocalSchemaPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service relocal schema post params +func (o *StorageServiceRelocalSchemaPostParams) WithTimeout(timeout time.Duration) *StorageServiceRelocalSchemaPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service relocal schema post params +func (o *StorageServiceRelocalSchemaPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service relocal schema post params +func (o *StorageServiceRelocalSchemaPostParams) WithContext(ctx context.Context) *StorageServiceRelocalSchemaPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service relocal schema post params +func (o *StorageServiceRelocalSchemaPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service relocal schema post params +func (o *StorageServiceRelocalSchemaPostParams) WithHTTPClient(client *http.Client) *StorageServiceRelocalSchemaPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service relocal schema post params +func (o *StorageServiceRelocalSchemaPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRelocalSchemaPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_relocal_schema_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_relocal_schema_post_responses.go new file mode 100644 index 00000000000..502b058fbb4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_relocal_schema_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRelocalSchemaPostReader is a Reader for the StorageServiceRelocalSchemaPost structure. +type StorageServiceRelocalSchemaPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRelocalSchemaPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRelocalSchemaPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRelocalSchemaPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRelocalSchemaPostOK creates a StorageServiceRelocalSchemaPostOK with default headers values +func NewStorageServiceRelocalSchemaPostOK() *StorageServiceRelocalSchemaPostOK { + return &StorageServiceRelocalSchemaPostOK{} +} + +/* +StorageServiceRelocalSchemaPostOK handles this case with default header values. + +Success +*/ +type StorageServiceRelocalSchemaPostOK struct { +} + +func (o *StorageServiceRelocalSchemaPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceRelocalSchemaPostDefault creates a StorageServiceRelocalSchemaPostDefault with default headers values +func NewStorageServiceRelocalSchemaPostDefault(code int) *StorageServiceRelocalSchemaPostDefault { + return &StorageServiceRelocalSchemaPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRelocalSchemaPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRelocalSchemaPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service relocal schema post default response +func (o *StorageServiceRelocalSchemaPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRelocalSchemaPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRelocalSchemaPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRelocalSchemaPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_removal_status_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_removal_status_get_parameters.go new file mode 100644 index 00000000000..235e573f4ae --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_removal_status_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceRemovalStatusGetParams creates a new StorageServiceRemovalStatusGetParams object +// with the default values initialized. +func NewStorageServiceRemovalStatusGetParams() *StorageServiceRemovalStatusGetParams { + + return &StorageServiceRemovalStatusGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRemovalStatusGetParamsWithTimeout creates a new StorageServiceRemovalStatusGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRemovalStatusGetParamsWithTimeout(timeout time.Duration) *StorageServiceRemovalStatusGetParams { + + return &StorageServiceRemovalStatusGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRemovalStatusGetParamsWithContext creates a new StorageServiceRemovalStatusGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRemovalStatusGetParamsWithContext(ctx context.Context) *StorageServiceRemovalStatusGetParams { + + return &StorageServiceRemovalStatusGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceRemovalStatusGetParamsWithHTTPClient creates a new StorageServiceRemovalStatusGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRemovalStatusGetParamsWithHTTPClient(client *http.Client) *StorageServiceRemovalStatusGetParams { + + return &StorageServiceRemovalStatusGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRemovalStatusGetParams contains all the parameters to send to the API endpoint +for the storage service removal status get operation typically these are written to a http.Request +*/ +type StorageServiceRemovalStatusGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service removal status get params +func (o *StorageServiceRemovalStatusGetParams) WithTimeout(timeout time.Duration) *StorageServiceRemovalStatusGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service removal status get params +func (o *StorageServiceRemovalStatusGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service removal status get params +func (o *StorageServiceRemovalStatusGetParams) WithContext(ctx context.Context) *StorageServiceRemovalStatusGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service removal status get params +func (o *StorageServiceRemovalStatusGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service removal status get params +func (o *StorageServiceRemovalStatusGetParams) WithHTTPClient(client *http.Client) *StorageServiceRemovalStatusGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service removal status get params +func (o *StorageServiceRemovalStatusGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRemovalStatusGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_removal_status_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_removal_status_get_responses.go new file mode 100644 index 00000000000..41923ee362f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_removal_status_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRemovalStatusGetReader is a Reader for the StorageServiceRemovalStatusGet structure. +type StorageServiceRemovalStatusGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRemovalStatusGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRemovalStatusGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRemovalStatusGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRemovalStatusGetOK creates a StorageServiceRemovalStatusGetOK with default headers values +func NewStorageServiceRemovalStatusGetOK() *StorageServiceRemovalStatusGetOK { + return &StorageServiceRemovalStatusGetOK{} +} + +/* +StorageServiceRemovalStatusGetOK handles this case with default header values. + +Success +*/ +type StorageServiceRemovalStatusGetOK struct { + Payload string +} + +func (o *StorageServiceRemovalStatusGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceRemovalStatusGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceRemovalStatusGetDefault creates a StorageServiceRemovalStatusGetDefault with default headers values +func NewStorageServiceRemovalStatusGetDefault(code int) *StorageServiceRemovalStatusGetDefault { + return &StorageServiceRemovalStatusGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRemovalStatusGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRemovalStatusGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service removal status get default response +func (o *StorageServiceRemovalStatusGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRemovalStatusGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRemovalStatusGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRemovalStatusGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_remove_node_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_remove_node_post_parameters.go new file mode 100644 index 00000000000..708be26ee45 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_remove_node_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceRemoveNodePostParams creates a new StorageServiceRemoveNodePostParams object +// with the default values initialized. +func NewStorageServiceRemoveNodePostParams() *StorageServiceRemoveNodePostParams { + var () + return &StorageServiceRemoveNodePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRemoveNodePostParamsWithTimeout creates a new StorageServiceRemoveNodePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRemoveNodePostParamsWithTimeout(timeout time.Duration) *StorageServiceRemoveNodePostParams { + var () + return &StorageServiceRemoveNodePostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRemoveNodePostParamsWithContext creates a new StorageServiceRemoveNodePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRemoveNodePostParamsWithContext(ctx context.Context) *StorageServiceRemoveNodePostParams { + var () + return &StorageServiceRemoveNodePostParams{ + + Context: ctx, + } +} + +// NewStorageServiceRemoveNodePostParamsWithHTTPClient creates a new StorageServiceRemoveNodePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRemoveNodePostParamsWithHTTPClient(client *http.Client) *StorageServiceRemoveNodePostParams { + var () + return &StorageServiceRemoveNodePostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRemoveNodePostParams contains all the parameters to send to the API endpoint +for the storage service remove node post operation typically these are written to a http.Request +*/ +type StorageServiceRemoveNodePostParams struct { + + /*HostID + Remove the node with host_id from the cluster + + */ + HostID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service remove node post params +func (o *StorageServiceRemoveNodePostParams) WithTimeout(timeout time.Duration) *StorageServiceRemoveNodePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service remove node post params +func (o *StorageServiceRemoveNodePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service remove node post params +func (o *StorageServiceRemoveNodePostParams) WithContext(ctx context.Context) *StorageServiceRemoveNodePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service remove node post params +func (o *StorageServiceRemoveNodePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service remove node post params +func (o *StorageServiceRemoveNodePostParams) WithHTTPClient(client *http.Client) *StorageServiceRemoveNodePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service remove node post params +func (o *StorageServiceRemoveNodePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithHostID adds the hostID to the storage service remove node post params +func (o *StorageServiceRemoveNodePostParams) WithHostID(hostID string) *StorageServiceRemoveNodePostParams { + o.SetHostID(hostID) + return o +} + +// SetHostID adds the hostId to the storage service remove node post params +func (o *StorageServiceRemoveNodePostParams) SetHostID(hostID string) { + o.HostID = hostID +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRemoveNodePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param host_id + qrHostID := o.HostID + qHostID := qrHostID + if qHostID != "" { + if err := r.SetQueryParam("host_id", qHostID); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_remove_node_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_remove_node_post_responses.go new file mode 100644 index 00000000000..97fb90700dc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_remove_node_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRemoveNodePostReader is a Reader for the StorageServiceRemoveNodePost structure. +type StorageServiceRemoveNodePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRemoveNodePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRemoveNodePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRemoveNodePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRemoveNodePostOK creates a StorageServiceRemoveNodePostOK with default headers values +func NewStorageServiceRemoveNodePostOK() *StorageServiceRemoveNodePostOK { + return &StorageServiceRemoveNodePostOK{} +} + +/* +StorageServiceRemoveNodePostOK handles this case with default header values. + +Success +*/ +type StorageServiceRemoveNodePostOK struct { +} + +func (o *StorageServiceRemoveNodePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceRemoveNodePostDefault creates a StorageServiceRemoveNodePostDefault with default headers values +func NewStorageServiceRemoveNodePostDefault(code int) *StorageServiceRemoveNodePostDefault { + return &StorageServiceRemoveNodePostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRemoveNodePostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRemoveNodePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service remove node post default response +func (o *StorageServiceRemoveNodePostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRemoveNodePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRemoveNodePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRemoveNodePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_get_parameters.go new file mode 100644 index 00000000000..fbd9f705cd6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_get_parameters.go @@ -0,0 +1,162 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceRepairAsyncByKeyspaceGetParams creates a new StorageServiceRepairAsyncByKeyspaceGetParams object +// with the default values initialized. +func NewStorageServiceRepairAsyncByKeyspaceGetParams() *StorageServiceRepairAsyncByKeyspaceGetParams { + var () + return &StorageServiceRepairAsyncByKeyspaceGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRepairAsyncByKeyspaceGetParamsWithTimeout creates a new StorageServiceRepairAsyncByKeyspaceGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRepairAsyncByKeyspaceGetParamsWithTimeout(timeout time.Duration) *StorageServiceRepairAsyncByKeyspaceGetParams { + var () + return &StorageServiceRepairAsyncByKeyspaceGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRepairAsyncByKeyspaceGetParamsWithContext creates a new StorageServiceRepairAsyncByKeyspaceGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRepairAsyncByKeyspaceGetParamsWithContext(ctx context.Context) *StorageServiceRepairAsyncByKeyspaceGetParams { + var () + return &StorageServiceRepairAsyncByKeyspaceGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceRepairAsyncByKeyspaceGetParamsWithHTTPClient creates a new StorageServiceRepairAsyncByKeyspaceGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRepairAsyncByKeyspaceGetParamsWithHTTPClient(client *http.Client) *StorageServiceRepairAsyncByKeyspaceGetParams { + var () + return &StorageServiceRepairAsyncByKeyspaceGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRepairAsyncByKeyspaceGetParams contains all the parameters to send to the API endpoint +for the storage service repair async by keyspace get operation typically these are written to a http.Request +*/ +type StorageServiceRepairAsyncByKeyspaceGetParams struct { + + /*ID + The repair ID to check for status + + */ + ID int32 + /*Keyspace + The keyspace repair is running on + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service repair async by keyspace get params +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) WithTimeout(timeout time.Duration) *StorageServiceRepairAsyncByKeyspaceGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service repair async by keyspace get params +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service repair async by keyspace get params +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) WithContext(ctx context.Context) *StorageServiceRepairAsyncByKeyspaceGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service repair async by keyspace get params +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service repair async by keyspace get params +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) WithHTTPClient(client *http.Client) *StorageServiceRepairAsyncByKeyspaceGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service repair async by keyspace get params +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the storage service repair async by keyspace get params +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) WithID(id int32) *StorageServiceRepairAsyncByKeyspaceGetParams { + o.SetID(id) + return o +} + +// SetID adds the id to the storage service repair async by keyspace get params +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) SetID(id int32) { + o.ID = id +} + +// WithKeyspace adds the keyspace to the storage service repair async by keyspace get params +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) WithKeyspace(keyspace string) *StorageServiceRepairAsyncByKeyspaceGetParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service repair async by keyspace get params +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRepairAsyncByKeyspaceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param id + qrID := o.ID + qID := swag.FormatInt32(qrID) + if qID != "" { + if err := r.SetQueryParam("id", qID); err != nil { + return err + } + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_get_responses.go new file mode 100644 index 00000000000..76017594502 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRepairAsyncByKeyspaceGetReader is a Reader for the StorageServiceRepairAsyncByKeyspaceGet structure. +type StorageServiceRepairAsyncByKeyspaceGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRepairAsyncByKeyspaceGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRepairAsyncByKeyspaceGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRepairAsyncByKeyspaceGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRepairAsyncByKeyspaceGetOK creates a StorageServiceRepairAsyncByKeyspaceGetOK with default headers values +func NewStorageServiceRepairAsyncByKeyspaceGetOK() *StorageServiceRepairAsyncByKeyspaceGetOK { + return &StorageServiceRepairAsyncByKeyspaceGetOK{} +} + +/* +StorageServiceRepairAsyncByKeyspaceGetOK handles this case with default header values. + +Success +*/ +type StorageServiceRepairAsyncByKeyspaceGetOK struct { + Payload models.RepairAsyncStatusResponse +} + +func (o *StorageServiceRepairAsyncByKeyspaceGetOK) GetPayload() models.RepairAsyncStatusResponse { + return o.Payload +} + +func (o *StorageServiceRepairAsyncByKeyspaceGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceRepairAsyncByKeyspaceGetDefault creates a StorageServiceRepairAsyncByKeyspaceGetDefault with default headers values +func NewStorageServiceRepairAsyncByKeyspaceGetDefault(code int) *StorageServiceRepairAsyncByKeyspaceGetDefault { + return &StorageServiceRepairAsyncByKeyspaceGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRepairAsyncByKeyspaceGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRepairAsyncByKeyspaceGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service repair async by keyspace get default response +func (o *StorageServiceRepairAsyncByKeyspaceGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRepairAsyncByKeyspaceGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRepairAsyncByKeyspaceGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRepairAsyncByKeyspaceGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_post_parameters.go new file mode 100644 index 00000000000..2b25936ceab --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_post_parameters.go @@ -0,0 +1,488 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceRepairAsyncByKeyspacePostParams creates a new StorageServiceRepairAsyncByKeyspacePostParams object +// with the default values initialized. +func NewStorageServiceRepairAsyncByKeyspacePostParams() *StorageServiceRepairAsyncByKeyspacePostParams { + var () + return &StorageServiceRepairAsyncByKeyspacePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRepairAsyncByKeyspacePostParamsWithTimeout creates a new StorageServiceRepairAsyncByKeyspacePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRepairAsyncByKeyspacePostParamsWithTimeout(timeout time.Duration) *StorageServiceRepairAsyncByKeyspacePostParams { + var () + return &StorageServiceRepairAsyncByKeyspacePostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRepairAsyncByKeyspacePostParamsWithContext creates a new StorageServiceRepairAsyncByKeyspacePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRepairAsyncByKeyspacePostParamsWithContext(ctx context.Context) *StorageServiceRepairAsyncByKeyspacePostParams { + var () + return &StorageServiceRepairAsyncByKeyspacePostParams{ + + Context: ctx, + } +} + +// NewStorageServiceRepairAsyncByKeyspacePostParamsWithHTTPClient creates a new StorageServiceRepairAsyncByKeyspacePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRepairAsyncByKeyspacePostParamsWithHTTPClient(client *http.Client) *StorageServiceRepairAsyncByKeyspacePostParams { + var () + return &StorageServiceRepairAsyncByKeyspacePostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRepairAsyncByKeyspacePostParams contains all the parameters to send to the API endpoint +for the storage service repair async by keyspace post operation typically these are written to a http.Request +*/ +type StorageServiceRepairAsyncByKeyspacePostParams struct { + + /*ColumnFamilies + Which column families to repair in the given keyspace. Multiple columns families can be named separated by commas. If this option is missing, all column families in the keyspace are repaired. + + */ + ColumnFamilies *string + /*DataCenters + Which data centers are to participate in this repair. Multiple data centers can be listed separated by commas. + + */ + DataCenters *string + /*EndToken + Token on which to end repair + + */ + EndToken *string + /*Hosts + Which hosts are to participate in this repair. Multiple hosts can be listed separated by commas. + + */ + Hosts *string + /*Incremental + If the value is the string 'true' with any capitalization, perform incremental repair. + + */ + Incremental *string + /*JobThreads + An integer specifying the parallelism on each node. + + */ + JobThreads *string + /*Keyspace + The keyspace to repair + + */ + Keyspace string + /*Parallelism + Repair parallelism, can be 0 (sequential), 1 (parallel) or 2 (datacenter-aware). + + */ + Parallelism *string + /*PrimaryRange + If the value is the string 'true' with any capitalization, repair only the first range returned by the partitioner. + + */ + PrimaryRange *string + /*Ranges + An explicit list of ranges to repair, overriding the default choice. Each range is expressed as token1:token2, and multiple ranges can be given as a comma separated list. + + */ + Ranges *string + /*StartToken + Token on which to begin repair + + */ + StartToken *string + /*Trace + If the value is the string 'true' with any capitalization, enable tracing of the repair. + + */ + Trace *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithTimeout(timeout time.Duration) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithContext(ctx context.Context) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithHTTPClient(client *http.Client) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithColumnFamilies adds the columnFamilies to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithColumnFamilies(columnFamilies *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetColumnFamilies(columnFamilies) + return o +} + +// SetColumnFamilies adds the columnFamilies to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetColumnFamilies(columnFamilies *string) { + o.ColumnFamilies = columnFamilies +} + +// WithDataCenters adds the dataCenters to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithDataCenters(dataCenters *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetDataCenters(dataCenters) + return o +} + +// SetDataCenters adds the dataCenters to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetDataCenters(dataCenters *string) { + o.DataCenters = dataCenters +} + +// WithEndToken adds the endToken to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithEndToken(endToken *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetEndToken(endToken) + return o +} + +// SetEndToken adds the endToken to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetEndToken(endToken *string) { + o.EndToken = endToken +} + +// WithHosts adds the hosts to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithHosts(hosts *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetHosts(hosts) + return o +} + +// SetHosts adds the hosts to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetHosts(hosts *string) { + o.Hosts = hosts +} + +// WithIncremental adds the incremental to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithIncremental(incremental *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetIncremental(incremental) + return o +} + +// SetIncremental adds the incremental to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetIncremental(incremental *string) { + o.Incremental = incremental +} + +// WithJobThreads adds the jobThreads to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithJobThreads(jobThreads *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetJobThreads(jobThreads) + return o +} + +// SetJobThreads adds the jobThreads to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetJobThreads(jobThreads *string) { + o.JobThreads = jobThreads +} + +// WithKeyspace adds the keyspace to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithKeyspace(keyspace string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WithParallelism adds the parallelism to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithParallelism(parallelism *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetParallelism(parallelism) + return o +} + +// SetParallelism adds the parallelism to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetParallelism(parallelism *string) { + o.Parallelism = parallelism +} + +// WithPrimaryRange adds the primaryRange to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithPrimaryRange(primaryRange *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetPrimaryRange(primaryRange) + return o +} + +// SetPrimaryRange adds the primaryRange to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetPrimaryRange(primaryRange *string) { + o.PrimaryRange = primaryRange +} + +// WithRanges adds the ranges to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithRanges(ranges *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetRanges(ranges) + return o +} + +// SetRanges adds the ranges to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetRanges(ranges *string) { + o.Ranges = ranges +} + +// WithStartToken adds the startToken to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithStartToken(startToken *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetStartToken(startToken) + return o +} + +// SetStartToken adds the startToken to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetStartToken(startToken *string) { + o.StartToken = startToken +} + +// WithTrace adds the trace to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WithTrace(trace *string) *StorageServiceRepairAsyncByKeyspacePostParams { + o.SetTrace(trace) + return o +} + +// SetTrace adds the trace to the storage service repair async by keyspace post params +func (o *StorageServiceRepairAsyncByKeyspacePostParams) SetTrace(trace *string) { + o.Trace = trace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRepairAsyncByKeyspacePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.ColumnFamilies != nil { + + // query param columnFamilies + var qrColumnFamilies string + if o.ColumnFamilies != nil { + qrColumnFamilies = *o.ColumnFamilies + } + qColumnFamilies := qrColumnFamilies + if qColumnFamilies != "" { + if err := r.SetQueryParam("columnFamilies", qColumnFamilies); err != nil { + return err + } + } + + } + + if o.DataCenters != nil { + + // query param dataCenters + var qrDataCenters string + if o.DataCenters != nil { + qrDataCenters = *o.DataCenters + } + qDataCenters := qrDataCenters + if qDataCenters != "" { + if err := r.SetQueryParam("dataCenters", qDataCenters); err != nil { + return err + } + } + + } + + if o.EndToken != nil { + + // query param endToken + var qrEndToken string + if o.EndToken != nil { + qrEndToken = *o.EndToken + } + qEndToken := qrEndToken + if qEndToken != "" { + if err := r.SetQueryParam("endToken", qEndToken); err != nil { + return err + } + } + + } + + if o.Hosts != nil { + + // query param hosts + var qrHosts string + if o.Hosts != nil { + qrHosts = *o.Hosts + } + qHosts := qrHosts + if qHosts != "" { + if err := r.SetQueryParam("hosts", qHosts); err != nil { + return err + } + } + + } + + if o.Incremental != nil { + + // query param incremental + var qrIncremental string + if o.Incremental != nil { + qrIncremental = *o.Incremental + } + qIncremental := qrIncremental + if qIncremental != "" { + if err := r.SetQueryParam("incremental", qIncremental); err != nil { + return err + } + } + + } + + if o.JobThreads != nil { + + // query param jobThreads + var qrJobThreads string + if o.JobThreads != nil { + qrJobThreads = *o.JobThreads + } + qJobThreads := qrJobThreads + if qJobThreads != "" { + if err := r.SetQueryParam("jobThreads", qJobThreads); err != nil { + return err + } + } + + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if o.Parallelism != nil { + + // query param parallelism + var qrParallelism string + if o.Parallelism != nil { + qrParallelism = *o.Parallelism + } + qParallelism := qrParallelism + if qParallelism != "" { + if err := r.SetQueryParam("parallelism", qParallelism); err != nil { + return err + } + } + + } + + if o.PrimaryRange != nil { + + // query param primaryRange + var qrPrimaryRange string + if o.PrimaryRange != nil { + qrPrimaryRange = *o.PrimaryRange + } + qPrimaryRange := qrPrimaryRange + if qPrimaryRange != "" { + if err := r.SetQueryParam("primaryRange", qPrimaryRange); err != nil { + return err + } + } + + } + + if o.Ranges != nil { + + // query param ranges + var qrRanges string + if o.Ranges != nil { + qrRanges = *o.Ranges + } + qRanges := qrRanges + if qRanges != "" { + if err := r.SetQueryParam("ranges", qRanges); err != nil { + return err + } + } + + } + + if o.StartToken != nil { + + // query param startToken + var qrStartToken string + if o.StartToken != nil { + qrStartToken = *o.StartToken + } + qStartToken := qrStartToken + if qStartToken != "" { + if err := r.SetQueryParam("startToken", qStartToken); err != nil { + return err + } + } + + } + + if o.Trace != nil { + + // query param trace + var qrTrace string + if o.Trace != nil { + qrTrace = *o.Trace + } + qTrace := qrTrace + if qTrace != "" { + if err := r.SetQueryParam("trace", qTrace); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_post_responses.go new file mode 100644 index 00000000000..cb0b212e0d5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_async_by_keyspace_post_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRepairAsyncByKeyspacePostReader is a Reader for the StorageServiceRepairAsyncByKeyspacePost structure. +type StorageServiceRepairAsyncByKeyspacePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRepairAsyncByKeyspacePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRepairAsyncByKeyspacePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRepairAsyncByKeyspacePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRepairAsyncByKeyspacePostOK creates a StorageServiceRepairAsyncByKeyspacePostOK with default headers values +func NewStorageServiceRepairAsyncByKeyspacePostOK() *StorageServiceRepairAsyncByKeyspacePostOK { + return &StorageServiceRepairAsyncByKeyspacePostOK{} +} + +/* +StorageServiceRepairAsyncByKeyspacePostOK handles this case with default header values. + +Success +*/ +type StorageServiceRepairAsyncByKeyspacePostOK struct { + Payload int32 +} + +func (o *StorageServiceRepairAsyncByKeyspacePostOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceRepairAsyncByKeyspacePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceRepairAsyncByKeyspacePostDefault creates a StorageServiceRepairAsyncByKeyspacePostDefault with default headers values +func NewStorageServiceRepairAsyncByKeyspacePostDefault(code int) *StorageServiceRepairAsyncByKeyspacePostDefault { + return &StorageServiceRepairAsyncByKeyspacePostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRepairAsyncByKeyspacePostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRepairAsyncByKeyspacePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service repair async by keyspace post default response +func (o *StorageServiceRepairAsyncByKeyspacePostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRepairAsyncByKeyspacePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRepairAsyncByKeyspacePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRepairAsyncByKeyspacePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_status_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_status_parameters.go new file mode 100644 index 00000000000..32d8e629649 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_status_parameters.go @@ -0,0 +1,173 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceRepairStatusParams creates a new StorageServiceRepairStatusParams object +// with the default values initialized. +func NewStorageServiceRepairStatusParams() *StorageServiceRepairStatusParams { + var () + return &StorageServiceRepairStatusParams{ + + requestTimeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRepairStatusParamsWithTimeout creates a new StorageServiceRepairStatusParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRepairStatusParamsWithTimeout(timeout time.Duration) *StorageServiceRepairStatusParams { + var () + return &StorageServiceRepairStatusParams{ + + requestTimeout: timeout, + } +} + +// NewStorageServiceRepairStatusParamsWithContext creates a new StorageServiceRepairStatusParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRepairStatusParamsWithContext(ctx context.Context) *StorageServiceRepairStatusParams { + var () + return &StorageServiceRepairStatusParams{ + + Context: ctx, + } +} + +// NewStorageServiceRepairStatusParamsWithHTTPClient creates a new StorageServiceRepairStatusParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRepairStatusParamsWithHTTPClient(client *http.Client) *StorageServiceRepairStatusParams { + var () + return &StorageServiceRepairStatusParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRepairStatusParams contains all the parameters to send to the API endpoint +for the storage service repair status operation typically these are written to a http.Request +*/ +type StorageServiceRepairStatusParams struct { + + /*ID + The repair ID to check for status + + */ + ID int32 + /*Timeout + Seconds to wait before the query returns even if the repair is not finished. The value -1 or not providing this parameter means no timeout + + */ + Timeout *int64 + + requestTimeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithRequestTimeout adds the timeout to the storage service repair status params +func (o *StorageServiceRepairStatusParams) WithRequestTimeout(timeout time.Duration) *StorageServiceRepairStatusParams { + o.SetRequestTimeout(timeout) + return o +} + +// SetRequestTimeout adds the timeout to the storage service repair status params +func (o *StorageServiceRepairStatusParams) SetRequestTimeout(timeout time.Duration) { + o.requestTimeout = timeout +} + +// WithContext adds the context to the storage service repair status params +func (o *StorageServiceRepairStatusParams) WithContext(ctx context.Context) *StorageServiceRepairStatusParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service repair status params +func (o *StorageServiceRepairStatusParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service repair status params +func (o *StorageServiceRepairStatusParams) WithHTTPClient(client *http.Client) *StorageServiceRepairStatusParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service repair status params +func (o *StorageServiceRepairStatusParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithID adds the id to the storage service repair status params +func (o *StorageServiceRepairStatusParams) WithID(id int32) *StorageServiceRepairStatusParams { + o.SetID(id) + return o +} + +// SetID adds the id to the storage service repair status params +func (o *StorageServiceRepairStatusParams) SetID(id int32) { + o.ID = id +} + +// WithTimeout adds the timeout to the storage service repair status params +func (o *StorageServiceRepairStatusParams) WithTimeout(timeout *int64) *StorageServiceRepairStatusParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service repair status params +func (o *StorageServiceRepairStatusParams) SetTimeout(timeout *int64) { + o.Timeout = timeout +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRepairStatusParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.requestTimeout); err != nil { + return err + } + var res []error + + // query param id + qrID := o.ID + qID := swag.FormatInt32(qrID) + if qID != "" { + if err := r.SetQueryParam("id", qID); err != nil { + return err + } + } + + if o.Timeout != nil { + + // query param timeout + var qrTimeout int64 + if o.Timeout != nil { + qrTimeout = *o.Timeout + } + qTimeout := swag.FormatInt64(qrTimeout) + if qTimeout != "" { + if err := r.SetQueryParam("timeout", qTimeout); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_status_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_status_responses.go new file mode 100644 index 00000000000..f3d224cd480 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_repair_status_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRepairStatusReader is a Reader for the StorageServiceRepairStatus structure. +type StorageServiceRepairStatusReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRepairStatusReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRepairStatusOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRepairStatusDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRepairStatusOK creates a StorageServiceRepairStatusOK with default headers values +func NewStorageServiceRepairStatusOK() *StorageServiceRepairStatusOK { + return &StorageServiceRepairStatusOK{} +} + +/* +StorageServiceRepairStatusOK handles this case with default header values. + +Repair status value +*/ +type StorageServiceRepairStatusOK struct { + Payload models.RepairAsyncStatusResponse +} + +func (o *StorageServiceRepairStatusOK) GetPayload() models.RepairAsyncStatusResponse { + return o.Payload +} + +func (o *StorageServiceRepairStatusOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceRepairStatusDefault creates a StorageServiceRepairStatusDefault with default headers values +func NewStorageServiceRepairStatusDefault(code int) *StorageServiceRepairStatusDefault { + return &StorageServiceRepairStatusDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRepairStatusDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRepairStatusDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service repair status default response +func (o *StorageServiceRepairStatusDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRepairStatusDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRepairStatusDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRepairStatusDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_reschedule_failed_deletions_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_reschedule_failed_deletions_post_parameters.go new file mode 100644 index 00000000000..094ac4e6849 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_reschedule_failed_deletions_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceRescheduleFailedDeletionsPostParams creates a new StorageServiceRescheduleFailedDeletionsPostParams object +// with the default values initialized. +func NewStorageServiceRescheduleFailedDeletionsPostParams() *StorageServiceRescheduleFailedDeletionsPostParams { + + return &StorageServiceRescheduleFailedDeletionsPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRescheduleFailedDeletionsPostParamsWithTimeout creates a new StorageServiceRescheduleFailedDeletionsPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRescheduleFailedDeletionsPostParamsWithTimeout(timeout time.Duration) *StorageServiceRescheduleFailedDeletionsPostParams { + + return &StorageServiceRescheduleFailedDeletionsPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRescheduleFailedDeletionsPostParamsWithContext creates a new StorageServiceRescheduleFailedDeletionsPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRescheduleFailedDeletionsPostParamsWithContext(ctx context.Context) *StorageServiceRescheduleFailedDeletionsPostParams { + + return &StorageServiceRescheduleFailedDeletionsPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceRescheduleFailedDeletionsPostParamsWithHTTPClient creates a new StorageServiceRescheduleFailedDeletionsPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRescheduleFailedDeletionsPostParamsWithHTTPClient(client *http.Client) *StorageServiceRescheduleFailedDeletionsPostParams { + + return &StorageServiceRescheduleFailedDeletionsPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRescheduleFailedDeletionsPostParams contains all the parameters to send to the API endpoint +for the storage service reschedule failed deletions post operation typically these are written to a http.Request +*/ +type StorageServiceRescheduleFailedDeletionsPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service reschedule failed deletions post params +func (o *StorageServiceRescheduleFailedDeletionsPostParams) WithTimeout(timeout time.Duration) *StorageServiceRescheduleFailedDeletionsPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service reschedule failed deletions post params +func (o *StorageServiceRescheduleFailedDeletionsPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service reschedule failed deletions post params +func (o *StorageServiceRescheduleFailedDeletionsPostParams) WithContext(ctx context.Context) *StorageServiceRescheduleFailedDeletionsPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service reschedule failed deletions post params +func (o *StorageServiceRescheduleFailedDeletionsPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service reschedule failed deletions post params +func (o *StorageServiceRescheduleFailedDeletionsPostParams) WithHTTPClient(client *http.Client) *StorageServiceRescheduleFailedDeletionsPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service reschedule failed deletions post params +func (o *StorageServiceRescheduleFailedDeletionsPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRescheduleFailedDeletionsPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_reschedule_failed_deletions_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_reschedule_failed_deletions_post_responses.go new file mode 100644 index 00000000000..747d18db568 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_reschedule_failed_deletions_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRescheduleFailedDeletionsPostReader is a Reader for the StorageServiceRescheduleFailedDeletionsPost structure. +type StorageServiceRescheduleFailedDeletionsPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRescheduleFailedDeletionsPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRescheduleFailedDeletionsPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRescheduleFailedDeletionsPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRescheduleFailedDeletionsPostOK creates a StorageServiceRescheduleFailedDeletionsPostOK with default headers values +func NewStorageServiceRescheduleFailedDeletionsPostOK() *StorageServiceRescheduleFailedDeletionsPostOK { + return &StorageServiceRescheduleFailedDeletionsPostOK{} +} + +/* +StorageServiceRescheduleFailedDeletionsPostOK handles this case with default header values. + +Success +*/ +type StorageServiceRescheduleFailedDeletionsPostOK struct { +} + +func (o *StorageServiceRescheduleFailedDeletionsPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceRescheduleFailedDeletionsPostDefault creates a StorageServiceRescheduleFailedDeletionsPostDefault with default headers values +func NewStorageServiceRescheduleFailedDeletionsPostDefault(code int) *StorageServiceRescheduleFailedDeletionsPostDefault { + return &StorageServiceRescheduleFailedDeletionsPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRescheduleFailedDeletionsPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRescheduleFailedDeletionsPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service reschedule failed deletions post default response +func (o *StorageServiceRescheduleFailedDeletionsPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRescheduleFailedDeletionsPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRescheduleFailedDeletionsPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRescheduleFailedDeletionsPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_delete_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_delete_parameters.go new file mode 100644 index 00000000000..01805f5ffd5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_delete_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceRPCServerDeleteParams creates a new StorageServiceRPCServerDeleteParams object +// with the default values initialized. +func NewStorageServiceRPCServerDeleteParams() *StorageServiceRPCServerDeleteParams { + + return &StorageServiceRPCServerDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRPCServerDeleteParamsWithTimeout creates a new StorageServiceRPCServerDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRPCServerDeleteParamsWithTimeout(timeout time.Duration) *StorageServiceRPCServerDeleteParams { + + return &StorageServiceRPCServerDeleteParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRPCServerDeleteParamsWithContext creates a new StorageServiceRPCServerDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRPCServerDeleteParamsWithContext(ctx context.Context) *StorageServiceRPCServerDeleteParams { + + return &StorageServiceRPCServerDeleteParams{ + + Context: ctx, + } +} + +// NewStorageServiceRPCServerDeleteParamsWithHTTPClient creates a new StorageServiceRPCServerDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRPCServerDeleteParamsWithHTTPClient(client *http.Client) *StorageServiceRPCServerDeleteParams { + + return &StorageServiceRPCServerDeleteParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRPCServerDeleteParams contains all the parameters to send to the API endpoint +for the storage service Rpc server delete operation typically these are written to a http.Request +*/ +type StorageServiceRPCServerDeleteParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service Rpc server delete params +func (o *StorageServiceRPCServerDeleteParams) WithTimeout(timeout time.Duration) *StorageServiceRPCServerDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service Rpc server delete params +func (o *StorageServiceRPCServerDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service Rpc server delete params +func (o *StorageServiceRPCServerDeleteParams) WithContext(ctx context.Context) *StorageServiceRPCServerDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service Rpc server delete params +func (o *StorageServiceRPCServerDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service Rpc server delete params +func (o *StorageServiceRPCServerDeleteParams) WithHTTPClient(client *http.Client) *StorageServiceRPCServerDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service Rpc server delete params +func (o *StorageServiceRPCServerDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRPCServerDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_delete_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_delete_responses.go new file mode 100644 index 00000000000..74b2ee53dbf --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_delete_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRPCServerDeleteReader is a Reader for the StorageServiceRPCServerDelete structure. +type StorageServiceRPCServerDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRPCServerDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRPCServerDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRPCServerDeleteDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRPCServerDeleteOK creates a StorageServiceRPCServerDeleteOK with default headers values +func NewStorageServiceRPCServerDeleteOK() *StorageServiceRPCServerDeleteOK { + return &StorageServiceRPCServerDeleteOK{} +} + +/* +StorageServiceRPCServerDeleteOK handles this case with default header values. + +Success +*/ +type StorageServiceRPCServerDeleteOK struct { +} + +func (o *StorageServiceRPCServerDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceRPCServerDeleteDefault creates a StorageServiceRPCServerDeleteDefault with default headers values +func NewStorageServiceRPCServerDeleteDefault(code int) *StorageServiceRPCServerDeleteDefault { + return &StorageServiceRPCServerDeleteDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRPCServerDeleteDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRPCServerDeleteDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service Rpc server delete default response +func (o *StorageServiceRPCServerDeleteDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRPCServerDeleteDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRPCServerDeleteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRPCServerDeleteDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_get_parameters.go new file mode 100644 index 00000000000..1c3a0ea5814 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceRPCServerGetParams creates a new StorageServiceRPCServerGetParams object +// with the default values initialized. +func NewStorageServiceRPCServerGetParams() *StorageServiceRPCServerGetParams { + + return &StorageServiceRPCServerGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRPCServerGetParamsWithTimeout creates a new StorageServiceRPCServerGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRPCServerGetParamsWithTimeout(timeout time.Duration) *StorageServiceRPCServerGetParams { + + return &StorageServiceRPCServerGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRPCServerGetParamsWithContext creates a new StorageServiceRPCServerGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRPCServerGetParamsWithContext(ctx context.Context) *StorageServiceRPCServerGetParams { + + return &StorageServiceRPCServerGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceRPCServerGetParamsWithHTTPClient creates a new StorageServiceRPCServerGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRPCServerGetParamsWithHTTPClient(client *http.Client) *StorageServiceRPCServerGetParams { + + return &StorageServiceRPCServerGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRPCServerGetParams contains all the parameters to send to the API endpoint +for the storage service Rpc server get operation typically these are written to a http.Request +*/ +type StorageServiceRPCServerGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service Rpc server get params +func (o *StorageServiceRPCServerGetParams) WithTimeout(timeout time.Duration) *StorageServiceRPCServerGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service Rpc server get params +func (o *StorageServiceRPCServerGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service Rpc server get params +func (o *StorageServiceRPCServerGetParams) WithContext(ctx context.Context) *StorageServiceRPCServerGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service Rpc server get params +func (o *StorageServiceRPCServerGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service Rpc server get params +func (o *StorageServiceRPCServerGetParams) WithHTTPClient(client *http.Client) *StorageServiceRPCServerGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service Rpc server get params +func (o *StorageServiceRPCServerGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRPCServerGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_get_responses.go new file mode 100644 index 00000000000..841a23ed6a9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRPCServerGetReader is a Reader for the StorageServiceRPCServerGet structure. +type StorageServiceRPCServerGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRPCServerGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRPCServerGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRPCServerGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRPCServerGetOK creates a StorageServiceRPCServerGetOK with default headers values +func NewStorageServiceRPCServerGetOK() *StorageServiceRPCServerGetOK { + return &StorageServiceRPCServerGetOK{} +} + +/* +StorageServiceRPCServerGetOK handles this case with default header values. + +Success +*/ +type StorageServiceRPCServerGetOK struct { + Payload bool +} + +func (o *StorageServiceRPCServerGetOK) GetPayload() bool { + return o.Payload +} + +func (o *StorageServiceRPCServerGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceRPCServerGetDefault creates a StorageServiceRPCServerGetDefault with default headers values +func NewStorageServiceRPCServerGetDefault(code int) *StorageServiceRPCServerGetDefault { + return &StorageServiceRPCServerGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRPCServerGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRPCServerGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service Rpc server get default response +func (o *StorageServiceRPCServerGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRPCServerGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRPCServerGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRPCServerGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_post_parameters.go new file mode 100644 index 00000000000..33b7f87d7ca --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceRPCServerPostParams creates a new StorageServiceRPCServerPostParams object +// with the default values initialized. +func NewStorageServiceRPCServerPostParams() *StorageServiceRPCServerPostParams { + + return &StorageServiceRPCServerPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceRPCServerPostParamsWithTimeout creates a new StorageServiceRPCServerPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceRPCServerPostParamsWithTimeout(timeout time.Duration) *StorageServiceRPCServerPostParams { + + return &StorageServiceRPCServerPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceRPCServerPostParamsWithContext creates a new StorageServiceRPCServerPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceRPCServerPostParamsWithContext(ctx context.Context) *StorageServiceRPCServerPostParams { + + return &StorageServiceRPCServerPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceRPCServerPostParamsWithHTTPClient creates a new StorageServiceRPCServerPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceRPCServerPostParamsWithHTTPClient(client *http.Client) *StorageServiceRPCServerPostParams { + + return &StorageServiceRPCServerPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceRPCServerPostParams contains all the parameters to send to the API endpoint +for the storage service Rpc server post operation typically these are written to a http.Request +*/ +type StorageServiceRPCServerPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service Rpc server post params +func (o *StorageServiceRPCServerPostParams) WithTimeout(timeout time.Duration) *StorageServiceRPCServerPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service Rpc server post params +func (o *StorageServiceRPCServerPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service Rpc server post params +func (o *StorageServiceRPCServerPostParams) WithContext(ctx context.Context) *StorageServiceRPCServerPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service Rpc server post params +func (o *StorageServiceRPCServerPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service Rpc server post params +func (o *StorageServiceRPCServerPostParams) WithHTTPClient(client *http.Client) *StorageServiceRPCServerPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service Rpc server post params +func (o *StorageServiceRPCServerPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceRPCServerPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_post_responses.go new file mode 100644 index 00000000000..fe01555e696 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_rpc_server_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceRPCServerPostReader is a Reader for the StorageServiceRPCServerPost structure. +type StorageServiceRPCServerPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceRPCServerPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceRPCServerPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceRPCServerPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceRPCServerPostOK creates a StorageServiceRPCServerPostOK with default headers values +func NewStorageServiceRPCServerPostOK() *StorageServiceRPCServerPostOK { + return &StorageServiceRPCServerPostOK{} +} + +/* +StorageServiceRPCServerPostOK handles this case with default header values. + +Success +*/ +type StorageServiceRPCServerPostOK struct { +} + +func (o *StorageServiceRPCServerPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceRPCServerPostDefault creates a StorageServiceRPCServerPostDefault with default headers values +func NewStorageServiceRPCServerPostDefault(code int) *StorageServiceRPCServerPostDefault { + return &StorageServiceRPCServerPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceRPCServerPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceRPCServerPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service Rpc server post default response +func (o *StorageServiceRPCServerPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceRPCServerPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceRPCServerPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceRPCServerPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sample_key_range_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sample_key_range_get_parameters.go new file mode 100644 index 00000000000..2f5f5557f46 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sample_key_range_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceSampleKeyRangeGetParams creates a new StorageServiceSampleKeyRangeGetParams object +// with the default values initialized. +func NewStorageServiceSampleKeyRangeGetParams() *StorageServiceSampleKeyRangeGetParams { + + return &StorageServiceSampleKeyRangeGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceSampleKeyRangeGetParamsWithTimeout creates a new StorageServiceSampleKeyRangeGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceSampleKeyRangeGetParamsWithTimeout(timeout time.Duration) *StorageServiceSampleKeyRangeGetParams { + + return &StorageServiceSampleKeyRangeGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceSampleKeyRangeGetParamsWithContext creates a new StorageServiceSampleKeyRangeGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceSampleKeyRangeGetParamsWithContext(ctx context.Context) *StorageServiceSampleKeyRangeGetParams { + + return &StorageServiceSampleKeyRangeGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceSampleKeyRangeGetParamsWithHTTPClient creates a new StorageServiceSampleKeyRangeGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceSampleKeyRangeGetParamsWithHTTPClient(client *http.Client) *StorageServiceSampleKeyRangeGetParams { + + return &StorageServiceSampleKeyRangeGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceSampleKeyRangeGetParams contains all the parameters to send to the API endpoint +for the storage service sample key range get operation typically these are written to a http.Request +*/ +type StorageServiceSampleKeyRangeGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service sample key range get params +func (o *StorageServiceSampleKeyRangeGetParams) WithTimeout(timeout time.Duration) *StorageServiceSampleKeyRangeGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service sample key range get params +func (o *StorageServiceSampleKeyRangeGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service sample key range get params +func (o *StorageServiceSampleKeyRangeGetParams) WithContext(ctx context.Context) *StorageServiceSampleKeyRangeGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service sample key range get params +func (o *StorageServiceSampleKeyRangeGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service sample key range get params +func (o *StorageServiceSampleKeyRangeGetParams) WithHTTPClient(client *http.Client) *StorageServiceSampleKeyRangeGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service sample key range get params +func (o *StorageServiceSampleKeyRangeGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceSampleKeyRangeGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sample_key_range_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sample_key_range_get_responses.go new file mode 100644 index 00000000000..3da0c7914f6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sample_key_range_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceSampleKeyRangeGetReader is a Reader for the StorageServiceSampleKeyRangeGet structure. +type StorageServiceSampleKeyRangeGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceSampleKeyRangeGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceSampleKeyRangeGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceSampleKeyRangeGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceSampleKeyRangeGetOK creates a StorageServiceSampleKeyRangeGetOK with default headers values +func NewStorageServiceSampleKeyRangeGetOK() *StorageServiceSampleKeyRangeGetOK { + return &StorageServiceSampleKeyRangeGetOK{} +} + +/* +StorageServiceSampleKeyRangeGetOK handles this case with default header values. + +Success +*/ +type StorageServiceSampleKeyRangeGetOK struct { + Payload []string +} + +func (o *StorageServiceSampleKeyRangeGetOK) GetPayload() []string { + return o.Payload +} + +func (o *StorageServiceSampleKeyRangeGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceSampleKeyRangeGetDefault creates a StorageServiceSampleKeyRangeGetDefault with default headers values +func NewStorageServiceSampleKeyRangeGetDefault(code int) *StorageServiceSampleKeyRangeGetDefault { + return &StorageServiceSampleKeyRangeGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceSampleKeyRangeGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceSampleKeyRangeGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service sample key range get default response +func (o *StorageServiceSampleKeyRangeGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceSampleKeyRangeGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceSampleKeyRangeGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceSampleKeyRangeGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_saved_caches_location_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_saved_caches_location_get_parameters.go new file mode 100644 index 00000000000..f216a226aa3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_saved_caches_location_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceSavedCachesLocationGetParams creates a new StorageServiceSavedCachesLocationGetParams object +// with the default values initialized. +func NewStorageServiceSavedCachesLocationGetParams() *StorageServiceSavedCachesLocationGetParams { + + return &StorageServiceSavedCachesLocationGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceSavedCachesLocationGetParamsWithTimeout creates a new StorageServiceSavedCachesLocationGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceSavedCachesLocationGetParamsWithTimeout(timeout time.Duration) *StorageServiceSavedCachesLocationGetParams { + + return &StorageServiceSavedCachesLocationGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceSavedCachesLocationGetParamsWithContext creates a new StorageServiceSavedCachesLocationGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceSavedCachesLocationGetParamsWithContext(ctx context.Context) *StorageServiceSavedCachesLocationGetParams { + + return &StorageServiceSavedCachesLocationGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceSavedCachesLocationGetParamsWithHTTPClient creates a new StorageServiceSavedCachesLocationGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceSavedCachesLocationGetParamsWithHTTPClient(client *http.Client) *StorageServiceSavedCachesLocationGetParams { + + return &StorageServiceSavedCachesLocationGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceSavedCachesLocationGetParams contains all the parameters to send to the API endpoint +for the storage service saved caches location get operation typically these are written to a http.Request +*/ +type StorageServiceSavedCachesLocationGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service saved caches location get params +func (o *StorageServiceSavedCachesLocationGetParams) WithTimeout(timeout time.Duration) *StorageServiceSavedCachesLocationGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service saved caches location get params +func (o *StorageServiceSavedCachesLocationGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service saved caches location get params +func (o *StorageServiceSavedCachesLocationGetParams) WithContext(ctx context.Context) *StorageServiceSavedCachesLocationGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service saved caches location get params +func (o *StorageServiceSavedCachesLocationGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service saved caches location get params +func (o *StorageServiceSavedCachesLocationGetParams) WithHTTPClient(client *http.Client) *StorageServiceSavedCachesLocationGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service saved caches location get params +func (o *StorageServiceSavedCachesLocationGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceSavedCachesLocationGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_saved_caches_location_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_saved_caches_location_get_responses.go new file mode 100644 index 00000000000..f9d56ad2d73 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_saved_caches_location_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceSavedCachesLocationGetReader is a Reader for the StorageServiceSavedCachesLocationGet structure. +type StorageServiceSavedCachesLocationGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceSavedCachesLocationGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceSavedCachesLocationGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceSavedCachesLocationGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceSavedCachesLocationGetOK creates a StorageServiceSavedCachesLocationGetOK with default headers values +func NewStorageServiceSavedCachesLocationGetOK() *StorageServiceSavedCachesLocationGetOK { + return &StorageServiceSavedCachesLocationGetOK{} +} + +/* +StorageServiceSavedCachesLocationGetOK handles this case with default header values. + +Success +*/ +type StorageServiceSavedCachesLocationGetOK struct { + Payload string +} + +func (o *StorageServiceSavedCachesLocationGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceSavedCachesLocationGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceSavedCachesLocationGetDefault creates a StorageServiceSavedCachesLocationGetDefault with default headers values +func NewStorageServiceSavedCachesLocationGetDefault(code int) *StorageServiceSavedCachesLocationGetDefault { + return &StorageServiceSavedCachesLocationGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceSavedCachesLocationGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceSavedCachesLocationGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service saved caches location get default response +func (o *StorageServiceSavedCachesLocationGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceSavedCachesLocationGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceSavedCachesLocationGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceSavedCachesLocationGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_schema_version_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_schema_version_get_parameters.go new file mode 100644 index 00000000000..4050f488638 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_schema_version_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceSchemaVersionGetParams creates a new StorageServiceSchemaVersionGetParams object +// with the default values initialized. +func NewStorageServiceSchemaVersionGetParams() *StorageServiceSchemaVersionGetParams { + + return &StorageServiceSchemaVersionGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceSchemaVersionGetParamsWithTimeout creates a new StorageServiceSchemaVersionGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceSchemaVersionGetParamsWithTimeout(timeout time.Duration) *StorageServiceSchemaVersionGetParams { + + return &StorageServiceSchemaVersionGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceSchemaVersionGetParamsWithContext creates a new StorageServiceSchemaVersionGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceSchemaVersionGetParamsWithContext(ctx context.Context) *StorageServiceSchemaVersionGetParams { + + return &StorageServiceSchemaVersionGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceSchemaVersionGetParamsWithHTTPClient creates a new StorageServiceSchemaVersionGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceSchemaVersionGetParamsWithHTTPClient(client *http.Client) *StorageServiceSchemaVersionGetParams { + + return &StorageServiceSchemaVersionGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceSchemaVersionGetParams contains all the parameters to send to the API endpoint +for the storage service schema version get operation typically these are written to a http.Request +*/ +type StorageServiceSchemaVersionGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service schema version get params +func (o *StorageServiceSchemaVersionGetParams) WithTimeout(timeout time.Duration) *StorageServiceSchemaVersionGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service schema version get params +func (o *StorageServiceSchemaVersionGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service schema version get params +func (o *StorageServiceSchemaVersionGetParams) WithContext(ctx context.Context) *StorageServiceSchemaVersionGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service schema version get params +func (o *StorageServiceSchemaVersionGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service schema version get params +func (o *StorageServiceSchemaVersionGetParams) WithHTTPClient(client *http.Client) *StorageServiceSchemaVersionGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service schema version get params +func (o *StorageServiceSchemaVersionGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceSchemaVersionGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_schema_version_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_schema_version_get_responses.go new file mode 100644 index 00000000000..f7c12a66342 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_schema_version_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceSchemaVersionGetReader is a Reader for the StorageServiceSchemaVersionGet structure. +type StorageServiceSchemaVersionGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceSchemaVersionGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceSchemaVersionGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceSchemaVersionGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceSchemaVersionGetOK creates a StorageServiceSchemaVersionGetOK with default headers values +func NewStorageServiceSchemaVersionGetOK() *StorageServiceSchemaVersionGetOK { + return &StorageServiceSchemaVersionGetOK{} +} + +/* +StorageServiceSchemaVersionGetOK handles this case with default header values. + +Success +*/ +type StorageServiceSchemaVersionGetOK struct { + Payload string +} + +func (o *StorageServiceSchemaVersionGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceSchemaVersionGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceSchemaVersionGetDefault creates a StorageServiceSchemaVersionGetDefault with default headers values +func NewStorageServiceSchemaVersionGetDefault(code int) *StorageServiceSchemaVersionGetDefault { + return &StorageServiceSchemaVersionGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceSchemaVersionGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceSchemaVersionGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service schema version get default response +func (o *StorageServiceSchemaVersionGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceSchemaVersionGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceSchemaVersionGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceSchemaVersionGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_scylla_release_version_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_scylla_release_version_get_parameters.go new file mode 100644 index 00000000000..4298278f553 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_scylla_release_version_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceScyllaReleaseVersionGetParams creates a new StorageServiceScyllaReleaseVersionGetParams object +// with the default values initialized. +func NewStorageServiceScyllaReleaseVersionGetParams() *StorageServiceScyllaReleaseVersionGetParams { + + return &StorageServiceScyllaReleaseVersionGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceScyllaReleaseVersionGetParamsWithTimeout creates a new StorageServiceScyllaReleaseVersionGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceScyllaReleaseVersionGetParamsWithTimeout(timeout time.Duration) *StorageServiceScyllaReleaseVersionGetParams { + + return &StorageServiceScyllaReleaseVersionGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceScyllaReleaseVersionGetParamsWithContext creates a new StorageServiceScyllaReleaseVersionGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceScyllaReleaseVersionGetParamsWithContext(ctx context.Context) *StorageServiceScyllaReleaseVersionGetParams { + + return &StorageServiceScyllaReleaseVersionGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceScyllaReleaseVersionGetParamsWithHTTPClient creates a new StorageServiceScyllaReleaseVersionGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceScyllaReleaseVersionGetParamsWithHTTPClient(client *http.Client) *StorageServiceScyllaReleaseVersionGetParams { + + return &StorageServiceScyllaReleaseVersionGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceScyllaReleaseVersionGetParams contains all the parameters to send to the API endpoint +for the storage service scylla release version get operation typically these are written to a http.Request +*/ +type StorageServiceScyllaReleaseVersionGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service scylla release version get params +func (o *StorageServiceScyllaReleaseVersionGetParams) WithTimeout(timeout time.Duration) *StorageServiceScyllaReleaseVersionGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service scylla release version get params +func (o *StorageServiceScyllaReleaseVersionGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service scylla release version get params +func (o *StorageServiceScyllaReleaseVersionGetParams) WithContext(ctx context.Context) *StorageServiceScyllaReleaseVersionGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service scylla release version get params +func (o *StorageServiceScyllaReleaseVersionGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service scylla release version get params +func (o *StorageServiceScyllaReleaseVersionGetParams) WithHTTPClient(client *http.Client) *StorageServiceScyllaReleaseVersionGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service scylla release version get params +func (o *StorageServiceScyllaReleaseVersionGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceScyllaReleaseVersionGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_scylla_release_version_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_scylla_release_version_get_responses.go new file mode 100644 index 00000000000..6ebb9a24504 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_scylla_release_version_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceScyllaReleaseVersionGetReader is a Reader for the StorageServiceScyllaReleaseVersionGet structure. +type StorageServiceScyllaReleaseVersionGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceScyllaReleaseVersionGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceScyllaReleaseVersionGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceScyllaReleaseVersionGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceScyllaReleaseVersionGetOK creates a StorageServiceScyllaReleaseVersionGetOK with default headers values +func NewStorageServiceScyllaReleaseVersionGetOK() *StorageServiceScyllaReleaseVersionGetOK { + return &StorageServiceScyllaReleaseVersionGetOK{} +} + +/* +StorageServiceScyllaReleaseVersionGetOK handles this case with default header values. + +Success +*/ +type StorageServiceScyllaReleaseVersionGetOK struct { + Payload string +} + +func (o *StorageServiceScyllaReleaseVersionGetOK) GetPayload() string { + return o.Payload +} + +func (o *StorageServiceScyllaReleaseVersionGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceScyllaReleaseVersionGetDefault creates a StorageServiceScyllaReleaseVersionGetDefault with default headers values +func NewStorageServiceScyllaReleaseVersionGetDefault(code int) *StorageServiceScyllaReleaseVersionGetDefault { + return &StorageServiceScyllaReleaseVersionGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceScyllaReleaseVersionGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceScyllaReleaseVersionGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service scylla release version get default response +func (o *StorageServiceScyllaReleaseVersionGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceScyllaReleaseVersionGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceScyllaReleaseVersionGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceScyllaReleaseVersionGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_get_parameters.go new file mode 100644 index 00000000000..5809eac4b0f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceSlowQueryGetParams creates a new StorageServiceSlowQueryGetParams object +// with the default values initialized. +func NewStorageServiceSlowQueryGetParams() *StorageServiceSlowQueryGetParams { + + return &StorageServiceSlowQueryGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceSlowQueryGetParamsWithTimeout creates a new StorageServiceSlowQueryGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceSlowQueryGetParamsWithTimeout(timeout time.Duration) *StorageServiceSlowQueryGetParams { + + return &StorageServiceSlowQueryGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceSlowQueryGetParamsWithContext creates a new StorageServiceSlowQueryGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceSlowQueryGetParamsWithContext(ctx context.Context) *StorageServiceSlowQueryGetParams { + + return &StorageServiceSlowQueryGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceSlowQueryGetParamsWithHTTPClient creates a new StorageServiceSlowQueryGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceSlowQueryGetParamsWithHTTPClient(client *http.Client) *StorageServiceSlowQueryGetParams { + + return &StorageServiceSlowQueryGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceSlowQueryGetParams contains all the parameters to send to the API endpoint +for the storage service slow query get operation typically these are written to a http.Request +*/ +type StorageServiceSlowQueryGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service slow query get params +func (o *StorageServiceSlowQueryGetParams) WithTimeout(timeout time.Duration) *StorageServiceSlowQueryGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service slow query get params +func (o *StorageServiceSlowQueryGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service slow query get params +func (o *StorageServiceSlowQueryGetParams) WithContext(ctx context.Context) *StorageServiceSlowQueryGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service slow query get params +func (o *StorageServiceSlowQueryGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service slow query get params +func (o *StorageServiceSlowQueryGetParams) WithHTTPClient(client *http.Client) *StorageServiceSlowQueryGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service slow query get params +func (o *StorageServiceSlowQueryGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceSlowQueryGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_get_responses.go new file mode 100644 index 00000000000..d63ee9057fb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_get_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceSlowQueryGetReader is a Reader for the StorageServiceSlowQueryGet structure. +type StorageServiceSlowQueryGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceSlowQueryGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceSlowQueryGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceSlowQueryGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceSlowQueryGetOK creates a StorageServiceSlowQueryGetOK with default headers values +func NewStorageServiceSlowQueryGetOK() *StorageServiceSlowQueryGetOK { + return &StorageServiceSlowQueryGetOK{} +} + +/* +StorageServiceSlowQueryGetOK handles this case with default header values. + +Success +*/ +type StorageServiceSlowQueryGetOK struct { + Payload *models.SlowQueryInfo +} + +func (o *StorageServiceSlowQueryGetOK) GetPayload() *models.SlowQueryInfo { + return o.Payload +} + +func (o *StorageServiceSlowQueryGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.SlowQueryInfo) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceSlowQueryGetDefault creates a StorageServiceSlowQueryGetDefault with default headers values +func NewStorageServiceSlowQueryGetDefault(code int) *StorageServiceSlowQueryGetDefault { + return &StorageServiceSlowQueryGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceSlowQueryGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceSlowQueryGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service slow query get default response +func (o *StorageServiceSlowQueryGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceSlowQueryGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceSlowQueryGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceSlowQueryGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_post_parameters.go new file mode 100644 index 00000000000..48c9347ceb2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_post_parameters.go @@ -0,0 +1,212 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceSlowQueryPostParams creates a new StorageServiceSlowQueryPostParams object +// with the default values initialized. +func NewStorageServiceSlowQueryPostParams() *StorageServiceSlowQueryPostParams { + var () + return &StorageServiceSlowQueryPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceSlowQueryPostParamsWithTimeout creates a new StorageServiceSlowQueryPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceSlowQueryPostParamsWithTimeout(timeout time.Duration) *StorageServiceSlowQueryPostParams { + var () + return &StorageServiceSlowQueryPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceSlowQueryPostParamsWithContext creates a new StorageServiceSlowQueryPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceSlowQueryPostParamsWithContext(ctx context.Context) *StorageServiceSlowQueryPostParams { + var () + return &StorageServiceSlowQueryPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceSlowQueryPostParamsWithHTTPClient creates a new StorageServiceSlowQueryPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceSlowQueryPostParamsWithHTTPClient(client *http.Client) *StorageServiceSlowQueryPostParams { + var () + return &StorageServiceSlowQueryPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceSlowQueryPostParams contains all the parameters to send to the API endpoint +for the storage service slow query post operation typically these are written to a http.Request +*/ +type StorageServiceSlowQueryPostParams struct { + + /*Enable + set it to true to enable, anything else to disable + + */ + Enable *bool + /*Threshold + Slow query record threshold in microseconds + + */ + Threshold *string + /*TTL + TTL in seconds + + */ + TTL *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) WithTimeout(timeout time.Duration) *StorageServiceSlowQueryPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) WithContext(ctx context.Context) *StorageServiceSlowQueryPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) WithHTTPClient(client *http.Client) *StorageServiceSlowQueryPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEnable adds the enable to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) WithEnable(enable *bool) *StorageServiceSlowQueryPostParams { + o.SetEnable(enable) + return o +} + +// SetEnable adds the enable to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) SetEnable(enable *bool) { + o.Enable = enable +} + +// WithThreshold adds the threshold to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) WithThreshold(threshold *string) *StorageServiceSlowQueryPostParams { + o.SetThreshold(threshold) + return o +} + +// SetThreshold adds the threshold to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) SetThreshold(threshold *string) { + o.Threshold = threshold +} + +// WithTTL adds the ttl to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) WithTTL(ttl *string) *StorageServiceSlowQueryPostParams { + o.SetTTL(ttl) + return o +} + +// SetTTL adds the ttl to the storage service slow query post params +func (o *StorageServiceSlowQueryPostParams) SetTTL(ttl *string) { + o.TTL = ttl +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceSlowQueryPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Enable != nil { + + // query param enable + var qrEnable bool + if o.Enable != nil { + qrEnable = *o.Enable + } + qEnable := swag.FormatBool(qrEnable) + if qEnable != "" { + if err := r.SetQueryParam("enable", qEnable); err != nil { + return err + } + } + + } + + if o.Threshold != nil { + + // query param threshold + var qrThreshold string + if o.Threshold != nil { + qrThreshold = *o.Threshold + } + qThreshold := qrThreshold + if qThreshold != "" { + if err := r.SetQueryParam("threshold", qThreshold); err != nil { + return err + } + } + + } + + if o.TTL != nil { + + // query param ttl + var qrTTL string + if o.TTL != nil { + qrTTL = *o.TTL + } + qTTL := qrTTL + if qTTL != "" { + if err := r.SetQueryParam("ttl", qTTL); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_post_responses.go new file mode 100644 index 00000000000..1fd31e358cc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_slow_query_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceSlowQueryPostReader is a Reader for the StorageServiceSlowQueryPost structure. +type StorageServiceSlowQueryPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceSlowQueryPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceSlowQueryPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceSlowQueryPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceSlowQueryPostOK creates a StorageServiceSlowQueryPostOK with default headers values +func NewStorageServiceSlowQueryPostOK() *StorageServiceSlowQueryPostOK { + return &StorageServiceSlowQueryPostOK{} +} + +/* +StorageServiceSlowQueryPostOK handles this case with default header values. + +Success +*/ +type StorageServiceSlowQueryPostOK struct { +} + +func (o *StorageServiceSlowQueryPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceSlowQueryPostDefault creates a StorageServiceSlowQueryPostDefault with default headers values +func NewStorageServiceSlowQueryPostDefault(code int) *StorageServiceSlowQueryPostDefault { + return &StorageServiceSlowQueryPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceSlowQueryPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceSlowQueryPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service slow query post default response +func (o *StorageServiceSlowQueryPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceSlowQueryPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceSlowQueryPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceSlowQueryPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_delete_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_delete_parameters.go new file mode 100644 index 00000000000..2d442506b59 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_delete_parameters.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceSnapshotsDeleteParams creates a new StorageServiceSnapshotsDeleteParams object +// with the default values initialized. +func NewStorageServiceSnapshotsDeleteParams() *StorageServiceSnapshotsDeleteParams { + var () + return &StorageServiceSnapshotsDeleteParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceSnapshotsDeleteParamsWithTimeout creates a new StorageServiceSnapshotsDeleteParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceSnapshotsDeleteParamsWithTimeout(timeout time.Duration) *StorageServiceSnapshotsDeleteParams { + var () + return &StorageServiceSnapshotsDeleteParams{ + + timeout: timeout, + } +} + +// NewStorageServiceSnapshotsDeleteParamsWithContext creates a new StorageServiceSnapshotsDeleteParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceSnapshotsDeleteParamsWithContext(ctx context.Context) *StorageServiceSnapshotsDeleteParams { + var () + return &StorageServiceSnapshotsDeleteParams{ + + Context: ctx, + } +} + +// NewStorageServiceSnapshotsDeleteParamsWithHTTPClient creates a new StorageServiceSnapshotsDeleteParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceSnapshotsDeleteParamsWithHTTPClient(client *http.Client) *StorageServiceSnapshotsDeleteParams { + var () + return &StorageServiceSnapshotsDeleteParams{ + HTTPClient: client, + } +} + +/* +StorageServiceSnapshotsDeleteParams contains all the parameters to send to the API endpoint +for the storage service snapshots delete operation typically these are written to a http.Request +*/ +type StorageServiceSnapshotsDeleteParams struct { + + /*Cf + an optional table name that its snapshot will be deleted + + */ + Cf *string + /*Kn + Comma seperated keyspaces name to snapshot + + */ + Kn *string + /*Tag + the tag given to the snapshot + + */ + Tag *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) WithTimeout(timeout time.Duration) *StorageServiceSnapshotsDeleteParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) WithContext(ctx context.Context) *StorageServiceSnapshotsDeleteParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) WithHTTPClient(client *http.Client) *StorageServiceSnapshotsDeleteParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) WithCf(cf *string) *StorageServiceSnapshotsDeleteParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) SetCf(cf *string) { + o.Cf = cf +} + +// WithKn adds the kn to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) WithKn(kn *string) *StorageServiceSnapshotsDeleteParams { + o.SetKn(kn) + return o +} + +// SetKn adds the kn to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) SetKn(kn *string) { + o.Kn = kn +} + +// WithTag adds the tag to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) WithTag(tag *string) *StorageServiceSnapshotsDeleteParams { + o.SetTag(tag) + return o +} + +// SetTag adds the tag to the storage service snapshots delete params +func (o *StorageServiceSnapshotsDeleteParams) SetTag(tag *string) { + o.Tag = tag +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceSnapshotsDeleteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Cf != nil { + + // query param cf + var qrCf string + if o.Cf != nil { + qrCf = *o.Cf + } + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + } + + if o.Kn != nil { + + // query param kn + var qrKn string + if o.Kn != nil { + qrKn = *o.Kn + } + qKn := qrKn + if qKn != "" { + if err := r.SetQueryParam("kn", qKn); err != nil { + return err + } + } + + } + + if o.Tag != nil { + + // query param tag + var qrTag string + if o.Tag != nil { + qrTag = *o.Tag + } + qTag := qrTag + if qTag != "" { + if err := r.SetQueryParam("tag", qTag); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_delete_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_delete_responses.go new file mode 100644 index 00000000000..1423834d18d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_delete_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceSnapshotsDeleteReader is a Reader for the StorageServiceSnapshotsDelete structure. +type StorageServiceSnapshotsDeleteReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceSnapshotsDeleteReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceSnapshotsDeleteOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceSnapshotsDeleteDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceSnapshotsDeleteOK creates a StorageServiceSnapshotsDeleteOK with default headers values +func NewStorageServiceSnapshotsDeleteOK() *StorageServiceSnapshotsDeleteOK { + return &StorageServiceSnapshotsDeleteOK{} +} + +/* +StorageServiceSnapshotsDeleteOK handles this case with default header values. + +Success +*/ +type StorageServiceSnapshotsDeleteOK struct { +} + +func (o *StorageServiceSnapshotsDeleteOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceSnapshotsDeleteDefault creates a StorageServiceSnapshotsDeleteDefault with default headers values +func NewStorageServiceSnapshotsDeleteDefault(code int) *StorageServiceSnapshotsDeleteDefault { + return &StorageServiceSnapshotsDeleteDefault{ + _statusCode: code, + } +} + +/* +StorageServiceSnapshotsDeleteDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceSnapshotsDeleteDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service snapshots delete default response +func (o *StorageServiceSnapshotsDeleteDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceSnapshotsDeleteDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceSnapshotsDeleteDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceSnapshotsDeleteDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_get_parameters.go new file mode 100644 index 00000000000..0b826f2db98 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceSnapshotsGetParams creates a new StorageServiceSnapshotsGetParams object +// with the default values initialized. +func NewStorageServiceSnapshotsGetParams() *StorageServiceSnapshotsGetParams { + + return &StorageServiceSnapshotsGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceSnapshotsGetParamsWithTimeout creates a new StorageServiceSnapshotsGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceSnapshotsGetParamsWithTimeout(timeout time.Duration) *StorageServiceSnapshotsGetParams { + + return &StorageServiceSnapshotsGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceSnapshotsGetParamsWithContext creates a new StorageServiceSnapshotsGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceSnapshotsGetParamsWithContext(ctx context.Context) *StorageServiceSnapshotsGetParams { + + return &StorageServiceSnapshotsGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceSnapshotsGetParamsWithHTTPClient creates a new StorageServiceSnapshotsGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceSnapshotsGetParamsWithHTTPClient(client *http.Client) *StorageServiceSnapshotsGetParams { + + return &StorageServiceSnapshotsGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceSnapshotsGetParams contains all the parameters to send to the API endpoint +for the storage service snapshots get operation typically these are written to a http.Request +*/ +type StorageServiceSnapshotsGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service snapshots get params +func (o *StorageServiceSnapshotsGetParams) WithTimeout(timeout time.Duration) *StorageServiceSnapshotsGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service snapshots get params +func (o *StorageServiceSnapshotsGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service snapshots get params +func (o *StorageServiceSnapshotsGetParams) WithContext(ctx context.Context) *StorageServiceSnapshotsGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service snapshots get params +func (o *StorageServiceSnapshotsGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service snapshots get params +func (o *StorageServiceSnapshotsGetParams) WithHTTPClient(client *http.Client) *StorageServiceSnapshotsGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service snapshots get params +func (o *StorageServiceSnapshotsGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceSnapshotsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_get_responses.go new file mode 100644 index 00000000000..cbd784d1c1c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceSnapshotsGetReader is a Reader for the StorageServiceSnapshotsGet structure. +type StorageServiceSnapshotsGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceSnapshotsGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceSnapshotsGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceSnapshotsGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceSnapshotsGetOK creates a StorageServiceSnapshotsGetOK with default headers values +func NewStorageServiceSnapshotsGetOK() *StorageServiceSnapshotsGetOK { + return &StorageServiceSnapshotsGetOK{} +} + +/* +StorageServiceSnapshotsGetOK handles this case with default header values. + +Success +*/ +type StorageServiceSnapshotsGetOK struct { + Payload []*models.Snapshots +} + +func (o *StorageServiceSnapshotsGetOK) GetPayload() []*models.Snapshots { + return o.Payload +} + +func (o *StorageServiceSnapshotsGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceSnapshotsGetDefault creates a StorageServiceSnapshotsGetDefault with default headers values +func NewStorageServiceSnapshotsGetDefault(code int) *StorageServiceSnapshotsGetDefault { + return &StorageServiceSnapshotsGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceSnapshotsGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceSnapshotsGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service snapshots get default response +func (o *StorageServiceSnapshotsGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceSnapshotsGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceSnapshotsGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceSnapshotsGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_post_parameters.go new file mode 100644 index 00000000000..169a3a9ba1e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_post_parameters.go @@ -0,0 +1,211 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceSnapshotsPostParams creates a new StorageServiceSnapshotsPostParams object +// with the default values initialized. +func NewStorageServiceSnapshotsPostParams() *StorageServiceSnapshotsPostParams { + var () + return &StorageServiceSnapshotsPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceSnapshotsPostParamsWithTimeout creates a new StorageServiceSnapshotsPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceSnapshotsPostParamsWithTimeout(timeout time.Duration) *StorageServiceSnapshotsPostParams { + var () + return &StorageServiceSnapshotsPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceSnapshotsPostParamsWithContext creates a new StorageServiceSnapshotsPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceSnapshotsPostParamsWithContext(ctx context.Context) *StorageServiceSnapshotsPostParams { + var () + return &StorageServiceSnapshotsPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceSnapshotsPostParamsWithHTTPClient creates a new StorageServiceSnapshotsPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceSnapshotsPostParamsWithHTTPClient(client *http.Client) *StorageServiceSnapshotsPostParams { + var () + return &StorageServiceSnapshotsPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceSnapshotsPostParams contains all the parameters to send to the API endpoint +for the storage service snapshots post operation typically these are written to a http.Request +*/ +type StorageServiceSnapshotsPostParams struct { + + /*Cf + the column family to snapshot + + */ + Cf *string + /*Kn + Comma seperated keyspaces name to snapshot + + */ + Kn *string + /*Tag + the tag given to the snapshot + + */ + Tag *string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) WithTimeout(timeout time.Duration) *StorageServiceSnapshotsPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) WithContext(ctx context.Context) *StorageServiceSnapshotsPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) WithHTTPClient(client *http.Client) *StorageServiceSnapshotsPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) WithCf(cf *string) *StorageServiceSnapshotsPostParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) SetCf(cf *string) { + o.Cf = cf +} + +// WithKn adds the kn to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) WithKn(kn *string) *StorageServiceSnapshotsPostParams { + o.SetKn(kn) + return o +} + +// SetKn adds the kn to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) SetKn(kn *string) { + o.Kn = kn +} + +// WithTag adds the tag to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) WithTag(tag *string) *StorageServiceSnapshotsPostParams { + o.SetTag(tag) + return o +} + +// SetTag adds the tag to the storage service snapshots post params +func (o *StorageServiceSnapshotsPostParams) SetTag(tag *string) { + o.Tag = tag +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceSnapshotsPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Cf != nil { + + // query param cf + var qrCf string + if o.Cf != nil { + qrCf = *o.Cf + } + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + } + + if o.Kn != nil { + + // query param kn + var qrKn string + if o.Kn != nil { + qrKn = *o.Kn + } + qKn := qrKn + if qKn != "" { + if err := r.SetQueryParam("kn", qKn); err != nil { + return err + } + } + + } + + if o.Tag != nil { + + // query param tag + var qrTag string + if o.Tag != nil { + qrTag = *o.Tag + } + qTag := qrTag + if qTag != "" { + if err := r.SetQueryParam("tag", qTag); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_post_responses.go new file mode 100644 index 00000000000..f35738ccfb2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceSnapshotsPostReader is a Reader for the StorageServiceSnapshotsPost structure. +type StorageServiceSnapshotsPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceSnapshotsPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceSnapshotsPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceSnapshotsPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceSnapshotsPostOK creates a StorageServiceSnapshotsPostOK with default headers values +func NewStorageServiceSnapshotsPostOK() *StorageServiceSnapshotsPostOK { + return &StorageServiceSnapshotsPostOK{} +} + +/* +StorageServiceSnapshotsPostOK handles this case with default header values. + +Success +*/ +type StorageServiceSnapshotsPostOK struct { +} + +func (o *StorageServiceSnapshotsPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceSnapshotsPostDefault creates a StorageServiceSnapshotsPostDefault with default headers values +func NewStorageServiceSnapshotsPostDefault(code int) *StorageServiceSnapshotsPostDefault { + return &StorageServiceSnapshotsPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceSnapshotsPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceSnapshotsPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service snapshots post default response +func (o *StorageServiceSnapshotsPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceSnapshotsPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceSnapshotsPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceSnapshotsPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_size_true_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_size_true_get_parameters.go new file mode 100644 index 00000000000..209ba422532 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_size_true_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceSnapshotsSizeTrueGetParams creates a new StorageServiceSnapshotsSizeTrueGetParams object +// with the default values initialized. +func NewStorageServiceSnapshotsSizeTrueGetParams() *StorageServiceSnapshotsSizeTrueGetParams { + + return &StorageServiceSnapshotsSizeTrueGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceSnapshotsSizeTrueGetParamsWithTimeout creates a new StorageServiceSnapshotsSizeTrueGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceSnapshotsSizeTrueGetParamsWithTimeout(timeout time.Duration) *StorageServiceSnapshotsSizeTrueGetParams { + + return &StorageServiceSnapshotsSizeTrueGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceSnapshotsSizeTrueGetParamsWithContext creates a new StorageServiceSnapshotsSizeTrueGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceSnapshotsSizeTrueGetParamsWithContext(ctx context.Context) *StorageServiceSnapshotsSizeTrueGetParams { + + return &StorageServiceSnapshotsSizeTrueGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceSnapshotsSizeTrueGetParamsWithHTTPClient creates a new StorageServiceSnapshotsSizeTrueGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceSnapshotsSizeTrueGetParamsWithHTTPClient(client *http.Client) *StorageServiceSnapshotsSizeTrueGetParams { + + return &StorageServiceSnapshotsSizeTrueGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceSnapshotsSizeTrueGetParams contains all the parameters to send to the API endpoint +for the storage service snapshots size true get operation typically these are written to a http.Request +*/ +type StorageServiceSnapshotsSizeTrueGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service snapshots size true get params +func (o *StorageServiceSnapshotsSizeTrueGetParams) WithTimeout(timeout time.Duration) *StorageServiceSnapshotsSizeTrueGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service snapshots size true get params +func (o *StorageServiceSnapshotsSizeTrueGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service snapshots size true get params +func (o *StorageServiceSnapshotsSizeTrueGetParams) WithContext(ctx context.Context) *StorageServiceSnapshotsSizeTrueGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service snapshots size true get params +func (o *StorageServiceSnapshotsSizeTrueGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service snapshots size true get params +func (o *StorageServiceSnapshotsSizeTrueGetParams) WithHTTPClient(client *http.Client) *StorageServiceSnapshotsSizeTrueGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service snapshots size true get params +func (o *StorageServiceSnapshotsSizeTrueGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceSnapshotsSizeTrueGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_size_true_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_size_true_get_responses.go new file mode 100644 index 00000000000..4dc40630121 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_snapshots_size_true_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceSnapshotsSizeTrueGetReader is a Reader for the StorageServiceSnapshotsSizeTrueGet structure. +type StorageServiceSnapshotsSizeTrueGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceSnapshotsSizeTrueGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceSnapshotsSizeTrueGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceSnapshotsSizeTrueGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceSnapshotsSizeTrueGetOK creates a StorageServiceSnapshotsSizeTrueGetOK with default headers values +func NewStorageServiceSnapshotsSizeTrueGetOK() *StorageServiceSnapshotsSizeTrueGetOK { + return &StorageServiceSnapshotsSizeTrueGetOK{} +} + +/* +StorageServiceSnapshotsSizeTrueGetOK handles this case with default header values. + +Success +*/ +type StorageServiceSnapshotsSizeTrueGetOK struct { + Payload interface{} +} + +func (o *StorageServiceSnapshotsSizeTrueGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageServiceSnapshotsSizeTrueGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceSnapshotsSizeTrueGetDefault creates a StorageServiceSnapshotsSizeTrueGetDefault with default headers values +func NewStorageServiceSnapshotsSizeTrueGetDefault(code int) *StorageServiceSnapshotsSizeTrueGetDefault { + return &StorageServiceSnapshotsSizeTrueGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceSnapshotsSizeTrueGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceSnapshotsSizeTrueGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service snapshots size true get default response +func (o *StorageServiceSnapshotsSizeTrueGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceSnapshotsSizeTrueGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceSnapshotsSizeTrueGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceSnapshotsSizeTrueGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sstables_by_keyspace_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sstables_by_keyspace_post_parameters.go new file mode 100644 index 00000000000..7298674e2a5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sstables_by_keyspace_post_parameters.go @@ -0,0 +1,226 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceSstablesByKeyspacePostParams creates a new StorageServiceSstablesByKeyspacePostParams object +// with the default values initialized. +func NewStorageServiceSstablesByKeyspacePostParams() *StorageServiceSstablesByKeyspacePostParams { + var () + return &StorageServiceSstablesByKeyspacePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceSstablesByKeyspacePostParamsWithTimeout creates a new StorageServiceSstablesByKeyspacePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceSstablesByKeyspacePostParamsWithTimeout(timeout time.Duration) *StorageServiceSstablesByKeyspacePostParams { + var () + return &StorageServiceSstablesByKeyspacePostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceSstablesByKeyspacePostParamsWithContext creates a new StorageServiceSstablesByKeyspacePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceSstablesByKeyspacePostParamsWithContext(ctx context.Context) *StorageServiceSstablesByKeyspacePostParams { + var () + return &StorageServiceSstablesByKeyspacePostParams{ + + Context: ctx, + } +} + +// NewStorageServiceSstablesByKeyspacePostParamsWithHTTPClient creates a new StorageServiceSstablesByKeyspacePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceSstablesByKeyspacePostParamsWithHTTPClient(client *http.Client) *StorageServiceSstablesByKeyspacePostParams { + var () + return &StorageServiceSstablesByKeyspacePostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceSstablesByKeyspacePostParams contains all the parameters to send to the API endpoint +for the storage service sstables by keyspace post operation typically these are written to a http.Request +*/ +type StorageServiceSstablesByKeyspacePostParams struct { + + /*Cf + Column family name + + */ + Cf string + /*Keyspace + The keyspace + + */ + Keyspace string + /*LoadAndStream + Load the sstables and stream to all replica nodes that owns the data + + */ + LoadAndStream *bool + /*PrimaryReplicaOnly + Load the sstables and stream to primary replica node that owns the data. Repair is needed after the load and stream process + + */ + PrimaryReplicaOnly *bool + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) WithTimeout(timeout time.Duration) *StorageServiceSstablesByKeyspacePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) WithContext(ctx context.Context) *StorageServiceSstablesByKeyspacePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) WithHTTPClient(client *http.Client) *StorageServiceSstablesByKeyspacePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) WithCf(cf string) *StorageServiceSstablesByKeyspacePostParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) SetCf(cf string) { + o.Cf = cf +} + +// WithKeyspace adds the keyspace to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) WithKeyspace(keyspace string) *StorageServiceSstablesByKeyspacePostParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WithLoadAndStream adds the loadAndStream to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) WithLoadAndStream(loadAndStream *bool) *StorageServiceSstablesByKeyspacePostParams { + o.SetLoadAndStream(loadAndStream) + return o +} + +// SetLoadAndStream adds the loadAndStream to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) SetLoadAndStream(loadAndStream *bool) { + o.LoadAndStream = loadAndStream +} + +// WithPrimaryReplicaOnly adds the primaryReplicaOnly to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) WithPrimaryReplicaOnly(primaryReplicaOnly *bool) *StorageServiceSstablesByKeyspacePostParams { + o.SetPrimaryReplicaOnly(primaryReplicaOnly) + return o +} + +// SetPrimaryReplicaOnly adds the primaryReplicaOnly to the storage service sstables by keyspace post params +func (o *StorageServiceSstablesByKeyspacePostParams) SetPrimaryReplicaOnly(primaryReplicaOnly *bool) { + o.PrimaryReplicaOnly = primaryReplicaOnly +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceSstablesByKeyspacePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param cf + qrCf := o.Cf + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if o.LoadAndStream != nil { + + // query param load_and_stream + var qrLoadAndStream bool + if o.LoadAndStream != nil { + qrLoadAndStream = *o.LoadAndStream + } + qLoadAndStream := swag.FormatBool(qrLoadAndStream) + if qLoadAndStream != "" { + if err := r.SetQueryParam("load_and_stream", qLoadAndStream); err != nil { + return err + } + } + + } + + if o.PrimaryReplicaOnly != nil { + + // query param primary_replica_only + var qrPrimaryReplicaOnly bool + if o.PrimaryReplicaOnly != nil { + qrPrimaryReplicaOnly = *o.PrimaryReplicaOnly + } + qPrimaryReplicaOnly := swag.FormatBool(qrPrimaryReplicaOnly) + if qPrimaryReplicaOnly != "" { + if err := r.SetQueryParam("primary_replica_only", qPrimaryReplicaOnly); err != nil { + return err + } + } + + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sstables_by_keyspace_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sstables_by_keyspace_post_responses.go new file mode 100644 index 00000000000..0c0c3037e10 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_sstables_by_keyspace_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceSstablesByKeyspacePostReader is a Reader for the StorageServiceSstablesByKeyspacePost structure. +type StorageServiceSstablesByKeyspacePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceSstablesByKeyspacePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceSstablesByKeyspacePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceSstablesByKeyspacePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceSstablesByKeyspacePostOK creates a StorageServiceSstablesByKeyspacePostOK with default headers values +func NewStorageServiceSstablesByKeyspacePostOK() *StorageServiceSstablesByKeyspacePostOK { + return &StorageServiceSstablesByKeyspacePostOK{} +} + +/* +StorageServiceSstablesByKeyspacePostOK handles this case with default header values. + +Success +*/ +type StorageServiceSstablesByKeyspacePostOK struct { +} + +func (o *StorageServiceSstablesByKeyspacePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceSstablesByKeyspacePostDefault creates a StorageServiceSstablesByKeyspacePostDefault with default headers values +func NewStorageServiceSstablesByKeyspacePostDefault(code int) *StorageServiceSstablesByKeyspacePostDefault { + return &StorageServiceSstablesByKeyspacePostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceSstablesByKeyspacePostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceSstablesByKeyspacePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service sstables by keyspace post default response +func (o *StorageServiceSstablesByKeyspacePostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceSstablesByKeyspacePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceSstablesByKeyspacePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceSstablesByKeyspacePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stop_daemon_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stop_daemon_post_parameters.go new file mode 100644 index 00000000000..07572379cf4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stop_daemon_post_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceStopDaemonPostParams creates a new StorageServiceStopDaemonPostParams object +// with the default values initialized. +func NewStorageServiceStopDaemonPostParams() *StorageServiceStopDaemonPostParams { + + return &StorageServiceStopDaemonPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceStopDaemonPostParamsWithTimeout creates a new StorageServiceStopDaemonPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceStopDaemonPostParamsWithTimeout(timeout time.Duration) *StorageServiceStopDaemonPostParams { + + return &StorageServiceStopDaemonPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceStopDaemonPostParamsWithContext creates a new StorageServiceStopDaemonPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceStopDaemonPostParamsWithContext(ctx context.Context) *StorageServiceStopDaemonPostParams { + + return &StorageServiceStopDaemonPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceStopDaemonPostParamsWithHTTPClient creates a new StorageServiceStopDaemonPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceStopDaemonPostParamsWithHTTPClient(client *http.Client) *StorageServiceStopDaemonPostParams { + + return &StorageServiceStopDaemonPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceStopDaemonPostParams contains all the parameters to send to the API endpoint +for the storage service stop daemon post operation typically these are written to a http.Request +*/ +type StorageServiceStopDaemonPostParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service stop daemon post params +func (o *StorageServiceStopDaemonPostParams) WithTimeout(timeout time.Duration) *StorageServiceStopDaemonPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service stop daemon post params +func (o *StorageServiceStopDaemonPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service stop daemon post params +func (o *StorageServiceStopDaemonPostParams) WithContext(ctx context.Context) *StorageServiceStopDaemonPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service stop daemon post params +func (o *StorageServiceStopDaemonPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service stop daemon post params +func (o *StorageServiceStopDaemonPostParams) WithHTTPClient(client *http.Client) *StorageServiceStopDaemonPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service stop daemon post params +func (o *StorageServiceStopDaemonPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceStopDaemonPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stop_daemon_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stop_daemon_post_responses.go new file mode 100644 index 00000000000..e0686eb1eb3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stop_daemon_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceStopDaemonPostReader is a Reader for the StorageServiceStopDaemonPost structure. +type StorageServiceStopDaemonPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceStopDaemonPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceStopDaemonPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceStopDaemonPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceStopDaemonPostOK creates a StorageServiceStopDaemonPostOK with default headers values +func NewStorageServiceStopDaemonPostOK() *StorageServiceStopDaemonPostOK { + return &StorageServiceStopDaemonPostOK{} +} + +/* +StorageServiceStopDaemonPostOK handles this case with default header values. + +Success +*/ +type StorageServiceStopDaemonPostOK struct { +} + +func (o *StorageServiceStopDaemonPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceStopDaemonPostDefault creates a StorageServiceStopDaemonPostDefault with default headers values +func NewStorageServiceStopDaemonPostDefault(code int) *StorageServiceStopDaemonPostDefault { + return &StorageServiceStopDaemonPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceStopDaemonPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceStopDaemonPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service stop daemon post default response +func (o *StorageServiceStopDaemonPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceStopDaemonPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceStopDaemonPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceStopDaemonPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_get_parameters.go new file mode 100644 index 00000000000..581005330a8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceStreamThroughputGetParams creates a new StorageServiceStreamThroughputGetParams object +// with the default values initialized. +func NewStorageServiceStreamThroughputGetParams() *StorageServiceStreamThroughputGetParams { + + return &StorageServiceStreamThroughputGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceStreamThroughputGetParamsWithTimeout creates a new StorageServiceStreamThroughputGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceStreamThroughputGetParamsWithTimeout(timeout time.Duration) *StorageServiceStreamThroughputGetParams { + + return &StorageServiceStreamThroughputGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceStreamThroughputGetParamsWithContext creates a new StorageServiceStreamThroughputGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceStreamThroughputGetParamsWithContext(ctx context.Context) *StorageServiceStreamThroughputGetParams { + + return &StorageServiceStreamThroughputGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceStreamThroughputGetParamsWithHTTPClient creates a new StorageServiceStreamThroughputGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceStreamThroughputGetParamsWithHTTPClient(client *http.Client) *StorageServiceStreamThroughputGetParams { + + return &StorageServiceStreamThroughputGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceStreamThroughputGetParams contains all the parameters to send to the API endpoint +for the storage service stream throughput get operation typically these are written to a http.Request +*/ +type StorageServiceStreamThroughputGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service stream throughput get params +func (o *StorageServiceStreamThroughputGetParams) WithTimeout(timeout time.Duration) *StorageServiceStreamThroughputGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service stream throughput get params +func (o *StorageServiceStreamThroughputGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service stream throughput get params +func (o *StorageServiceStreamThroughputGetParams) WithContext(ctx context.Context) *StorageServiceStreamThroughputGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service stream throughput get params +func (o *StorageServiceStreamThroughputGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service stream throughput get params +func (o *StorageServiceStreamThroughputGetParams) WithHTTPClient(client *http.Client) *StorageServiceStreamThroughputGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service stream throughput get params +func (o *StorageServiceStreamThroughputGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceStreamThroughputGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_get_responses.go new file mode 100644 index 00000000000..094c63d709b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceStreamThroughputGetReader is a Reader for the StorageServiceStreamThroughputGet structure. +type StorageServiceStreamThroughputGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceStreamThroughputGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceStreamThroughputGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceStreamThroughputGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceStreamThroughputGetOK creates a StorageServiceStreamThroughputGetOK with default headers values +func NewStorageServiceStreamThroughputGetOK() *StorageServiceStreamThroughputGetOK { + return &StorageServiceStreamThroughputGetOK{} +} + +/* +StorageServiceStreamThroughputGetOK handles this case with default header values. + +Success +*/ +type StorageServiceStreamThroughputGetOK struct { + Payload int32 +} + +func (o *StorageServiceStreamThroughputGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceStreamThroughputGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceStreamThroughputGetDefault creates a StorageServiceStreamThroughputGetDefault with default headers values +func NewStorageServiceStreamThroughputGetDefault(code int) *StorageServiceStreamThroughputGetDefault { + return &StorageServiceStreamThroughputGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceStreamThroughputGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceStreamThroughputGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service stream throughput get default response +func (o *StorageServiceStreamThroughputGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceStreamThroughputGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceStreamThroughputGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceStreamThroughputGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_post_parameters.go new file mode 100644 index 00000000000..9ec5bf7205f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceStreamThroughputPostParams creates a new StorageServiceStreamThroughputPostParams object +// with the default values initialized. +func NewStorageServiceStreamThroughputPostParams() *StorageServiceStreamThroughputPostParams { + var () + return &StorageServiceStreamThroughputPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceStreamThroughputPostParamsWithTimeout creates a new StorageServiceStreamThroughputPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceStreamThroughputPostParamsWithTimeout(timeout time.Duration) *StorageServiceStreamThroughputPostParams { + var () + return &StorageServiceStreamThroughputPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceStreamThroughputPostParamsWithContext creates a new StorageServiceStreamThroughputPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceStreamThroughputPostParamsWithContext(ctx context.Context) *StorageServiceStreamThroughputPostParams { + var () + return &StorageServiceStreamThroughputPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceStreamThroughputPostParamsWithHTTPClient creates a new StorageServiceStreamThroughputPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceStreamThroughputPostParamsWithHTTPClient(client *http.Client) *StorageServiceStreamThroughputPostParams { + var () + return &StorageServiceStreamThroughputPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceStreamThroughputPostParams contains all the parameters to send to the API endpoint +for the storage service stream throughput post operation typically these are written to a http.Request +*/ +type StorageServiceStreamThroughputPostParams struct { + + /*Value + Stream throughput + + */ + Value int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service stream throughput post params +func (o *StorageServiceStreamThroughputPostParams) WithTimeout(timeout time.Duration) *StorageServiceStreamThroughputPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service stream throughput post params +func (o *StorageServiceStreamThroughputPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service stream throughput post params +func (o *StorageServiceStreamThroughputPostParams) WithContext(ctx context.Context) *StorageServiceStreamThroughputPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service stream throughput post params +func (o *StorageServiceStreamThroughputPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service stream throughput post params +func (o *StorageServiceStreamThroughputPostParams) WithHTTPClient(client *http.Client) *StorageServiceStreamThroughputPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service stream throughput post params +func (o *StorageServiceStreamThroughputPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithValue adds the value to the storage service stream throughput post params +func (o *StorageServiceStreamThroughputPostParams) WithValue(value int32) *StorageServiceStreamThroughputPostParams { + o.SetValue(value) + return o +} + +// SetValue adds the value to the storage service stream throughput post params +func (o *StorageServiceStreamThroughputPostParams) SetValue(value int32) { + o.Value = value +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceStreamThroughputPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param value + qrValue := o.Value + qValue := swag.FormatInt32(qrValue) + if qValue != "" { + if err := r.SetQueryParam("value", qValue); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_post_responses.go new file mode 100644 index 00000000000..2aea7b6c38d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_stream_throughput_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceStreamThroughputPostReader is a Reader for the StorageServiceStreamThroughputPost structure. +type StorageServiceStreamThroughputPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceStreamThroughputPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceStreamThroughputPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceStreamThroughputPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceStreamThroughputPostOK creates a StorageServiceStreamThroughputPostOK with default headers values +func NewStorageServiceStreamThroughputPostOK() *StorageServiceStreamThroughputPostOK { + return &StorageServiceStreamThroughputPostOK{} +} + +/* +StorageServiceStreamThroughputPostOK handles this case with default header values. + +Success +*/ +type StorageServiceStreamThroughputPostOK struct { +} + +func (o *StorageServiceStreamThroughputPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceStreamThroughputPostDefault creates a StorageServiceStreamThroughputPostDefault with default headers values +func NewStorageServiceStreamThroughputPostDefault(code int) *StorageServiceStreamThroughputPostDefault { + return &StorageServiceStreamThroughputPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceStreamThroughputPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceStreamThroughputPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service stream throughput post default response +func (o *StorageServiceStreamThroughputPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceStreamThroughputPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceStreamThroughputPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceStreamThroughputPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_by_endpoint_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_by_endpoint_get_parameters.go new file mode 100644 index 00000000000..08332b224ee --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_by_endpoint_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceTokensByEndpointGetParams creates a new StorageServiceTokensByEndpointGetParams object +// with the default values initialized. +func NewStorageServiceTokensByEndpointGetParams() *StorageServiceTokensByEndpointGetParams { + var () + return &StorageServiceTokensByEndpointGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceTokensByEndpointGetParamsWithTimeout creates a new StorageServiceTokensByEndpointGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceTokensByEndpointGetParamsWithTimeout(timeout time.Duration) *StorageServiceTokensByEndpointGetParams { + var () + return &StorageServiceTokensByEndpointGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceTokensByEndpointGetParamsWithContext creates a new StorageServiceTokensByEndpointGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceTokensByEndpointGetParamsWithContext(ctx context.Context) *StorageServiceTokensByEndpointGetParams { + var () + return &StorageServiceTokensByEndpointGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceTokensByEndpointGetParamsWithHTTPClient creates a new StorageServiceTokensByEndpointGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceTokensByEndpointGetParamsWithHTTPClient(client *http.Client) *StorageServiceTokensByEndpointGetParams { + var () + return &StorageServiceTokensByEndpointGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceTokensByEndpointGetParams contains all the parameters to send to the API endpoint +for the storage service tokens by endpoint get operation typically these are written to a http.Request +*/ +type StorageServiceTokensByEndpointGetParams struct { + + /*Endpoint + The endpoint + + */ + Endpoint string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service tokens by endpoint get params +func (o *StorageServiceTokensByEndpointGetParams) WithTimeout(timeout time.Duration) *StorageServiceTokensByEndpointGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service tokens by endpoint get params +func (o *StorageServiceTokensByEndpointGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service tokens by endpoint get params +func (o *StorageServiceTokensByEndpointGetParams) WithContext(ctx context.Context) *StorageServiceTokensByEndpointGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service tokens by endpoint get params +func (o *StorageServiceTokensByEndpointGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service tokens by endpoint get params +func (o *StorageServiceTokensByEndpointGetParams) WithHTTPClient(client *http.Client) *StorageServiceTokensByEndpointGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service tokens by endpoint get params +func (o *StorageServiceTokensByEndpointGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithEndpoint adds the endpoint to the storage service tokens by endpoint get params +func (o *StorageServiceTokensByEndpointGetParams) WithEndpoint(endpoint string) *StorageServiceTokensByEndpointGetParams { + o.SetEndpoint(endpoint) + return o +} + +// SetEndpoint adds the endpoint to the storage service tokens by endpoint get params +func (o *StorageServiceTokensByEndpointGetParams) SetEndpoint(endpoint string) { + o.Endpoint = endpoint +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceTokensByEndpointGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param endpoint + if err := r.SetPathParam("endpoint", o.Endpoint); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_by_endpoint_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_by_endpoint_get_responses.go new file mode 100644 index 00000000000..1a16a90a567 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_by_endpoint_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceTokensByEndpointGetReader is a Reader for the StorageServiceTokensByEndpointGet structure. +type StorageServiceTokensByEndpointGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceTokensByEndpointGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceTokensByEndpointGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceTokensByEndpointGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceTokensByEndpointGetOK creates a StorageServiceTokensByEndpointGetOK with default headers values +func NewStorageServiceTokensByEndpointGetOK() *StorageServiceTokensByEndpointGetOK { + return &StorageServiceTokensByEndpointGetOK{} +} + +/* +StorageServiceTokensByEndpointGetOK handles this case with default header values. + +Success +*/ +type StorageServiceTokensByEndpointGetOK struct { + Payload []string +} + +func (o *StorageServiceTokensByEndpointGetOK) GetPayload() []string { + return o.Payload +} + +func (o *StorageServiceTokensByEndpointGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceTokensByEndpointGetDefault creates a StorageServiceTokensByEndpointGetDefault with default headers values +func NewStorageServiceTokensByEndpointGetDefault(code int) *StorageServiceTokensByEndpointGetDefault { + return &StorageServiceTokensByEndpointGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceTokensByEndpointGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceTokensByEndpointGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service tokens by endpoint get default response +func (o *StorageServiceTokensByEndpointGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceTokensByEndpointGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceTokensByEndpointGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceTokensByEndpointGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_endpoint_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_endpoint_get_parameters.go new file mode 100644 index 00000000000..79f625bb7b1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_endpoint_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceTokensEndpointGetParams creates a new StorageServiceTokensEndpointGetParams object +// with the default values initialized. +func NewStorageServiceTokensEndpointGetParams() *StorageServiceTokensEndpointGetParams { + + return &StorageServiceTokensEndpointGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceTokensEndpointGetParamsWithTimeout creates a new StorageServiceTokensEndpointGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceTokensEndpointGetParamsWithTimeout(timeout time.Duration) *StorageServiceTokensEndpointGetParams { + + return &StorageServiceTokensEndpointGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceTokensEndpointGetParamsWithContext creates a new StorageServiceTokensEndpointGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceTokensEndpointGetParamsWithContext(ctx context.Context) *StorageServiceTokensEndpointGetParams { + + return &StorageServiceTokensEndpointGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceTokensEndpointGetParamsWithHTTPClient creates a new StorageServiceTokensEndpointGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceTokensEndpointGetParamsWithHTTPClient(client *http.Client) *StorageServiceTokensEndpointGetParams { + + return &StorageServiceTokensEndpointGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceTokensEndpointGetParams contains all the parameters to send to the API endpoint +for the storage service tokens endpoint get operation typically these are written to a http.Request +*/ +type StorageServiceTokensEndpointGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service tokens endpoint get params +func (o *StorageServiceTokensEndpointGetParams) WithTimeout(timeout time.Duration) *StorageServiceTokensEndpointGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service tokens endpoint get params +func (o *StorageServiceTokensEndpointGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service tokens endpoint get params +func (o *StorageServiceTokensEndpointGetParams) WithContext(ctx context.Context) *StorageServiceTokensEndpointGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service tokens endpoint get params +func (o *StorageServiceTokensEndpointGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service tokens endpoint get params +func (o *StorageServiceTokensEndpointGetParams) WithHTTPClient(client *http.Client) *StorageServiceTokensEndpointGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service tokens endpoint get params +func (o *StorageServiceTokensEndpointGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceTokensEndpointGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_endpoint_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_endpoint_get_responses.go new file mode 100644 index 00000000000..73c60f941af --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_endpoint_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceTokensEndpointGetReader is a Reader for the StorageServiceTokensEndpointGet structure. +type StorageServiceTokensEndpointGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceTokensEndpointGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceTokensEndpointGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceTokensEndpointGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceTokensEndpointGetOK creates a StorageServiceTokensEndpointGetOK with default headers values +func NewStorageServiceTokensEndpointGetOK() *StorageServiceTokensEndpointGetOK { + return &StorageServiceTokensEndpointGetOK{} +} + +/* +StorageServiceTokensEndpointGetOK handles this case with default header values. + +Success +*/ +type StorageServiceTokensEndpointGetOK struct { + Payload []*models.Mapper +} + +func (o *StorageServiceTokensEndpointGetOK) GetPayload() []*models.Mapper { + return o.Payload +} + +func (o *StorageServiceTokensEndpointGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceTokensEndpointGetDefault creates a StorageServiceTokensEndpointGetDefault with default headers values +func NewStorageServiceTokensEndpointGetDefault(code int) *StorageServiceTokensEndpointGetDefault { + return &StorageServiceTokensEndpointGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceTokensEndpointGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceTokensEndpointGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service tokens endpoint get default response +func (o *StorageServiceTokensEndpointGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceTokensEndpointGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceTokensEndpointGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceTokensEndpointGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_get_parameters.go new file mode 100644 index 00000000000..8a7efa2cdb3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceTokensGetParams creates a new StorageServiceTokensGetParams object +// with the default values initialized. +func NewStorageServiceTokensGetParams() *StorageServiceTokensGetParams { + + return &StorageServiceTokensGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceTokensGetParamsWithTimeout creates a new StorageServiceTokensGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceTokensGetParamsWithTimeout(timeout time.Duration) *StorageServiceTokensGetParams { + + return &StorageServiceTokensGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceTokensGetParamsWithContext creates a new StorageServiceTokensGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceTokensGetParamsWithContext(ctx context.Context) *StorageServiceTokensGetParams { + + return &StorageServiceTokensGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceTokensGetParamsWithHTTPClient creates a new StorageServiceTokensGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceTokensGetParamsWithHTTPClient(client *http.Client) *StorageServiceTokensGetParams { + + return &StorageServiceTokensGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceTokensGetParams contains all the parameters to send to the API endpoint +for the storage service tokens get operation typically these are written to a http.Request +*/ +type StorageServiceTokensGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service tokens get params +func (o *StorageServiceTokensGetParams) WithTimeout(timeout time.Duration) *StorageServiceTokensGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service tokens get params +func (o *StorageServiceTokensGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service tokens get params +func (o *StorageServiceTokensGetParams) WithContext(ctx context.Context) *StorageServiceTokensGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service tokens get params +func (o *StorageServiceTokensGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service tokens get params +func (o *StorageServiceTokensGetParams) WithHTTPClient(client *http.Client) *StorageServiceTokensGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service tokens get params +func (o *StorageServiceTokensGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceTokensGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_get_responses.go new file mode 100644 index 00000000000..71c4dcd3e1d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tokens_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceTokensGetReader is a Reader for the StorageServiceTokensGet structure. +type StorageServiceTokensGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceTokensGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceTokensGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceTokensGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceTokensGetOK creates a StorageServiceTokensGetOK with default headers values +func NewStorageServiceTokensGetOK() *StorageServiceTokensGetOK { + return &StorageServiceTokensGetOK{} +} + +/* +StorageServiceTokensGetOK handles this case with default header values. + +Success +*/ +type StorageServiceTokensGetOK struct { + Payload []string +} + +func (o *StorageServiceTokensGetOK) GetPayload() []string { + return o.Payload +} + +func (o *StorageServiceTokensGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceTokensGetDefault creates a StorageServiceTokensGetDefault with default headers values +func NewStorageServiceTokensGetDefault(code int) *StorageServiceTokensGetDefault { + return &StorageServiceTokensGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceTokensGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceTokensGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service tokens get default response +func (o *StorageServiceTokensGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceTokensGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceTokensGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceTokensGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_get_parameters.go new file mode 100644 index 00000000000..4cb08c1d81d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceTombstoneFailureThresholdGetParams creates a new StorageServiceTombstoneFailureThresholdGetParams object +// with the default values initialized. +func NewStorageServiceTombstoneFailureThresholdGetParams() *StorageServiceTombstoneFailureThresholdGetParams { + + return &StorageServiceTombstoneFailureThresholdGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceTombstoneFailureThresholdGetParamsWithTimeout creates a new StorageServiceTombstoneFailureThresholdGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceTombstoneFailureThresholdGetParamsWithTimeout(timeout time.Duration) *StorageServiceTombstoneFailureThresholdGetParams { + + return &StorageServiceTombstoneFailureThresholdGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceTombstoneFailureThresholdGetParamsWithContext creates a new StorageServiceTombstoneFailureThresholdGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceTombstoneFailureThresholdGetParamsWithContext(ctx context.Context) *StorageServiceTombstoneFailureThresholdGetParams { + + return &StorageServiceTombstoneFailureThresholdGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceTombstoneFailureThresholdGetParamsWithHTTPClient creates a new StorageServiceTombstoneFailureThresholdGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceTombstoneFailureThresholdGetParamsWithHTTPClient(client *http.Client) *StorageServiceTombstoneFailureThresholdGetParams { + + return &StorageServiceTombstoneFailureThresholdGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceTombstoneFailureThresholdGetParams contains all the parameters to send to the API endpoint +for the storage service tombstone failure threshold get operation typically these are written to a http.Request +*/ +type StorageServiceTombstoneFailureThresholdGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service tombstone failure threshold get params +func (o *StorageServiceTombstoneFailureThresholdGetParams) WithTimeout(timeout time.Duration) *StorageServiceTombstoneFailureThresholdGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service tombstone failure threshold get params +func (o *StorageServiceTombstoneFailureThresholdGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service tombstone failure threshold get params +func (o *StorageServiceTombstoneFailureThresholdGetParams) WithContext(ctx context.Context) *StorageServiceTombstoneFailureThresholdGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service tombstone failure threshold get params +func (o *StorageServiceTombstoneFailureThresholdGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service tombstone failure threshold get params +func (o *StorageServiceTombstoneFailureThresholdGetParams) WithHTTPClient(client *http.Client) *StorageServiceTombstoneFailureThresholdGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service tombstone failure threshold get params +func (o *StorageServiceTombstoneFailureThresholdGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceTombstoneFailureThresholdGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_get_responses.go new file mode 100644 index 00000000000..0eaea9895c7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceTombstoneFailureThresholdGetReader is a Reader for the StorageServiceTombstoneFailureThresholdGet structure. +type StorageServiceTombstoneFailureThresholdGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceTombstoneFailureThresholdGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceTombstoneFailureThresholdGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceTombstoneFailureThresholdGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceTombstoneFailureThresholdGetOK creates a StorageServiceTombstoneFailureThresholdGetOK with default headers values +func NewStorageServiceTombstoneFailureThresholdGetOK() *StorageServiceTombstoneFailureThresholdGetOK { + return &StorageServiceTombstoneFailureThresholdGetOK{} +} + +/* +StorageServiceTombstoneFailureThresholdGetOK handles this case with default header values. + +Success +*/ +type StorageServiceTombstoneFailureThresholdGetOK struct { + Payload int32 +} + +func (o *StorageServiceTombstoneFailureThresholdGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceTombstoneFailureThresholdGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceTombstoneFailureThresholdGetDefault creates a StorageServiceTombstoneFailureThresholdGetDefault with default headers values +func NewStorageServiceTombstoneFailureThresholdGetDefault(code int) *StorageServiceTombstoneFailureThresholdGetDefault { + return &StorageServiceTombstoneFailureThresholdGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceTombstoneFailureThresholdGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceTombstoneFailureThresholdGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service tombstone failure threshold get default response +func (o *StorageServiceTombstoneFailureThresholdGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceTombstoneFailureThresholdGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceTombstoneFailureThresholdGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceTombstoneFailureThresholdGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_post_parameters.go new file mode 100644 index 00000000000..3084d0f871e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceTombstoneFailureThresholdPostParams creates a new StorageServiceTombstoneFailureThresholdPostParams object +// with the default values initialized. +func NewStorageServiceTombstoneFailureThresholdPostParams() *StorageServiceTombstoneFailureThresholdPostParams { + var () + return &StorageServiceTombstoneFailureThresholdPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceTombstoneFailureThresholdPostParamsWithTimeout creates a new StorageServiceTombstoneFailureThresholdPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceTombstoneFailureThresholdPostParamsWithTimeout(timeout time.Duration) *StorageServiceTombstoneFailureThresholdPostParams { + var () + return &StorageServiceTombstoneFailureThresholdPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceTombstoneFailureThresholdPostParamsWithContext creates a new StorageServiceTombstoneFailureThresholdPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceTombstoneFailureThresholdPostParamsWithContext(ctx context.Context) *StorageServiceTombstoneFailureThresholdPostParams { + var () + return &StorageServiceTombstoneFailureThresholdPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceTombstoneFailureThresholdPostParamsWithHTTPClient creates a new StorageServiceTombstoneFailureThresholdPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceTombstoneFailureThresholdPostParamsWithHTTPClient(client *http.Client) *StorageServiceTombstoneFailureThresholdPostParams { + var () + return &StorageServiceTombstoneFailureThresholdPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceTombstoneFailureThresholdPostParams contains all the parameters to send to the API endpoint +for the storage service tombstone failure threshold post operation typically these are written to a http.Request +*/ +type StorageServiceTombstoneFailureThresholdPostParams struct { + + /*TombstoneDebugThreshold + tombstone debug threshold + + */ + TombstoneDebugThreshold int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service tombstone failure threshold post params +func (o *StorageServiceTombstoneFailureThresholdPostParams) WithTimeout(timeout time.Duration) *StorageServiceTombstoneFailureThresholdPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service tombstone failure threshold post params +func (o *StorageServiceTombstoneFailureThresholdPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service tombstone failure threshold post params +func (o *StorageServiceTombstoneFailureThresholdPostParams) WithContext(ctx context.Context) *StorageServiceTombstoneFailureThresholdPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service tombstone failure threshold post params +func (o *StorageServiceTombstoneFailureThresholdPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service tombstone failure threshold post params +func (o *StorageServiceTombstoneFailureThresholdPostParams) WithHTTPClient(client *http.Client) *StorageServiceTombstoneFailureThresholdPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service tombstone failure threshold post params +func (o *StorageServiceTombstoneFailureThresholdPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithTombstoneDebugThreshold adds the tombstoneDebugThreshold to the storage service tombstone failure threshold post params +func (o *StorageServiceTombstoneFailureThresholdPostParams) WithTombstoneDebugThreshold(tombstoneDebugThreshold int32) *StorageServiceTombstoneFailureThresholdPostParams { + o.SetTombstoneDebugThreshold(tombstoneDebugThreshold) + return o +} + +// SetTombstoneDebugThreshold adds the tombstoneDebugThreshold to the storage service tombstone failure threshold post params +func (o *StorageServiceTombstoneFailureThresholdPostParams) SetTombstoneDebugThreshold(tombstoneDebugThreshold int32) { + o.TombstoneDebugThreshold = tombstoneDebugThreshold +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceTombstoneFailureThresholdPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param tombstone_debug_threshold + qrTombstoneDebugThreshold := o.TombstoneDebugThreshold + qTombstoneDebugThreshold := swag.FormatInt32(qrTombstoneDebugThreshold) + if qTombstoneDebugThreshold != "" { + if err := r.SetQueryParam("tombstone_debug_threshold", qTombstoneDebugThreshold); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_post_responses.go new file mode 100644 index 00000000000..a6637271231 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_failure_threshold_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceTombstoneFailureThresholdPostReader is a Reader for the StorageServiceTombstoneFailureThresholdPost structure. +type StorageServiceTombstoneFailureThresholdPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceTombstoneFailureThresholdPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceTombstoneFailureThresholdPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceTombstoneFailureThresholdPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceTombstoneFailureThresholdPostOK creates a StorageServiceTombstoneFailureThresholdPostOK with default headers values +func NewStorageServiceTombstoneFailureThresholdPostOK() *StorageServiceTombstoneFailureThresholdPostOK { + return &StorageServiceTombstoneFailureThresholdPostOK{} +} + +/* +StorageServiceTombstoneFailureThresholdPostOK handles this case with default header values. + +Success +*/ +type StorageServiceTombstoneFailureThresholdPostOK struct { +} + +func (o *StorageServiceTombstoneFailureThresholdPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceTombstoneFailureThresholdPostDefault creates a StorageServiceTombstoneFailureThresholdPostDefault with default headers values +func NewStorageServiceTombstoneFailureThresholdPostDefault(code int) *StorageServiceTombstoneFailureThresholdPostDefault { + return &StorageServiceTombstoneFailureThresholdPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceTombstoneFailureThresholdPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceTombstoneFailureThresholdPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service tombstone failure threshold post default response +func (o *StorageServiceTombstoneFailureThresholdPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceTombstoneFailureThresholdPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceTombstoneFailureThresholdPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceTombstoneFailureThresholdPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_get_parameters.go new file mode 100644 index 00000000000..cf1bba0dd15 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceTombstoneWarnThresholdGetParams creates a new StorageServiceTombstoneWarnThresholdGetParams object +// with the default values initialized. +func NewStorageServiceTombstoneWarnThresholdGetParams() *StorageServiceTombstoneWarnThresholdGetParams { + + return &StorageServiceTombstoneWarnThresholdGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceTombstoneWarnThresholdGetParamsWithTimeout creates a new StorageServiceTombstoneWarnThresholdGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceTombstoneWarnThresholdGetParamsWithTimeout(timeout time.Duration) *StorageServiceTombstoneWarnThresholdGetParams { + + return &StorageServiceTombstoneWarnThresholdGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceTombstoneWarnThresholdGetParamsWithContext creates a new StorageServiceTombstoneWarnThresholdGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceTombstoneWarnThresholdGetParamsWithContext(ctx context.Context) *StorageServiceTombstoneWarnThresholdGetParams { + + return &StorageServiceTombstoneWarnThresholdGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceTombstoneWarnThresholdGetParamsWithHTTPClient creates a new StorageServiceTombstoneWarnThresholdGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceTombstoneWarnThresholdGetParamsWithHTTPClient(client *http.Client) *StorageServiceTombstoneWarnThresholdGetParams { + + return &StorageServiceTombstoneWarnThresholdGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceTombstoneWarnThresholdGetParams contains all the parameters to send to the API endpoint +for the storage service tombstone warn threshold get operation typically these are written to a http.Request +*/ +type StorageServiceTombstoneWarnThresholdGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service tombstone warn threshold get params +func (o *StorageServiceTombstoneWarnThresholdGetParams) WithTimeout(timeout time.Duration) *StorageServiceTombstoneWarnThresholdGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service tombstone warn threshold get params +func (o *StorageServiceTombstoneWarnThresholdGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service tombstone warn threshold get params +func (o *StorageServiceTombstoneWarnThresholdGetParams) WithContext(ctx context.Context) *StorageServiceTombstoneWarnThresholdGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service tombstone warn threshold get params +func (o *StorageServiceTombstoneWarnThresholdGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service tombstone warn threshold get params +func (o *StorageServiceTombstoneWarnThresholdGetParams) WithHTTPClient(client *http.Client) *StorageServiceTombstoneWarnThresholdGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service tombstone warn threshold get params +func (o *StorageServiceTombstoneWarnThresholdGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceTombstoneWarnThresholdGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_get_responses.go new file mode 100644 index 00000000000..1dc27ec9f08 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceTombstoneWarnThresholdGetReader is a Reader for the StorageServiceTombstoneWarnThresholdGet structure. +type StorageServiceTombstoneWarnThresholdGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceTombstoneWarnThresholdGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceTombstoneWarnThresholdGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceTombstoneWarnThresholdGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceTombstoneWarnThresholdGetOK creates a StorageServiceTombstoneWarnThresholdGetOK with default headers values +func NewStorageServiceTombstoneWarnThresholdGetOK() *StorageServiceTombstoneWarnThresholdGetOK { + return &StorageServiceTombstoneWarnThresholdGetOK{} +} + +/* +StorageServiceTombstoneWarnThresholdGetOK handles this case with default header values. + +Success +*/ +type StorageServiceTombstoneWarnThresholdGetOK struct { + Payload int32 +} + +func (o *StorageServiceTombstoneWarnThresholdGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StorageServiceTombstoneWarnThresholdGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceTombstoneWarnThresholdGetDefault creates a StorageServiceTombstoneWarnThresholdGetDefault with default headers values +func NewStorageServiceTombstoneWarnThresholdGetDefault(code int) *StorageServiceTombstoneWarnThresholdGetDefault { + return &StorageServiceTombstoneWarnThresholdGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceTombstoneWarnThresholdGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceTombstoneWarnThresholdGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service tombstone warn threshold get default response +func (o *StorageServiceTombstoneWarnThresholdGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceTombstoneWarnThresholdGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceTombstoneWarnThresholdGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceTombstoneWarnThresholdGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_post_parameters.go new file mode 100644 index 00000000000..e1fcf893cad --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_post_parameters.go @@ -0,0 +1,141 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceTombstoneWarnThresholdPostParams creates a new StorageServiceTombstoneWarnThresholdPostParams object +// with the default values initialized. +func NewStorageServiceTombstoneWarnThresholdPostParams() *StorageServiceTombstoneWarnThresholdPostParams { + var () + return &StorageServiceTombstoneWarnThresholdPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceTombstoneWarnThresholdPostParamsWithTimeout creates a new StorageServiceTombstoneWarnThresholdPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceTombstoneWarnThresholdPostParamsWithTimeout(timeout time.Duration) *StorageServiceTombstoneWarnThresholdPostParams { + var () + return &StorageServiceTombstoneWarnThresholdPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceTombstoneWarnThresholdPostParamsWithContext creates a new StorageServiceTombstoneWarnThresholdPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceTombstoneWarnThresholdPostParamsWithContext(ctx context.Context) *StorageServiceTombstoneWarnThresholdPostParams { + var () + return &StorageServiceTombstoneWarnThresholdPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceTombstoneWarnThresholdPostParamsWithHTTPClient creates a new StorageServiceTombstoneWarnThresholdPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceTombstoneWarnThresholdPostParamsWithHTTPClient(client *http.Client) *StorageServiceTombstoneWarnThresholdPostParams { + var () + return &StorageServiceTombstoneWarnThresholdPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceTombstoneWarnThresholdPostParams contains all the parameters to send to the API endpoint +for the storage service tombstone warn threshold post operation typically these are written to a http.Request +*/ +type StorageServiceTombstoneWarnThresholdPostParams struct { + + /*DebugThreshold + tombstone debug threshold + + */ + DebugThreshold int32 + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service tombstone warn threshold post params +func (o *StorageServiceTombstoneWarnThresholdPostParams) WithTimeout(timeout time.Duration) *StorageServiceTombstoneWarnThresholdPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service tombstone warn threshold post params +func (o *StorageServiceTombstoneWarnThresholdPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service tombstone warn threshold post params +func (o *StorageServiceTombstoneWarnThresholdPostParams) WithContext(ctx context.Context) *StorageServiceTombstoneWarnThresholdPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service tombstone warn threshold post params +func (o *StorageServiceTombstoneWarnThresholdPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service tombstone warn threshold post params +func (o *StorageServiceTombstoneWarnThresholdPostParams) WithHTTPClient(client *http.Client) *StorageServiceTombstoneWarnThresholdPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service tombstone warn threshold post params +func (o *StorageServiceTombstoneWarnThresholdPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithDebugThreshold adds the debugThreshold to the storage service tombstone warn threshold post params +func (o *StorageServiceTombstoneWarnThresholdPostParams) WithDebugThreshold(debugThreshold int32) *StorageServiceTombstoneWarnThresholdPostParams { + o.SetDebugThreshold(debugThreshold) + return o +} + +// SetDebugThreshold adds the debugThreshold to the storage service tombstone warn threshold post params +func (o *StorageServiceTombstoneWarnThresholdPostParams) SetDebugThreshold(debugThreshold int32) { + o.DebugThreshold = debugThreshold +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceTombstoneWarnThresholdPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param debug_threshold + qrDebugThreshold := o.DebugThreshold + qDebugThreshold := swag.FormatInt32(qrDebugThreshold) + if qDebugThreshold != "" { + if err := r.SetQueryParam("debug_threshold", qDebugThreshold); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_post_responses.go new file mode 100644 index 00000000000..f11dcefb44a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_tombstone_warn_threshold_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceTombstoneWarnThresholdPostReader is a Reader for the StorageServiceTombstoneWarnThresholdPost structure. +type StorageServiceTombstoneWarnThresholdPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceTombstoneWarnThresholdPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceTombstoneWarnThresholdPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceTombstoneWarnThresholdPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceTombstoneWarnThresholdPostOK creates a StorageServiceTombstoneWarnThresholdPostOK with default headers values +func NewStorageServiceTombstoneWarnThresholdPostOK() *StorageServiceTombstoneWarnThresholdPostOK { + return &StorageServiceTombstoneWarnThresholdPostOK{} +} + +/* +StorageServiceTombstoneWarnThresholdPostOK handles this case with default header values. + +Success +*/ +type StorageServiceTombstoneWarnThresholdPostOK struct { +} + +func (o *StorageServiceTombstoneWarnThresholdPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceTombstoneWarnThresholdPostDefault creates a StorageServiceTombstoneWarnThresholdPostDefault with default headers values +func NewStorageServiceTombstoneWarnThresholdPostDefault(code int) *StorageServiceTombstoneWarnThresholdPostDefault { + return &StorageServiceTombstoneWarnThresholdPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceTombstoneWarnThresholdPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceTombstoneWarnThresholdPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service tombstone warn threshold post default response +func (o *StorageServiceTombstoneWarnThresholdPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceTombstoneWarnThresholdPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceTombstoneWarnThresholdPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceTombstoneWarnThresholdPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_get_parameters.go new file mode 100644 index 00000000000..42995660741 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceTraceProbabilityGetParams creates a new StorageServiceTraceProbabilityGetParams object +// with the default values initialized. +func NewStorageServiceTraceProbabilityGetParams() *StorageServiceTraceProbabilityGetParams { + + return &StorageServiceTraceProbabilityGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceTraceProbabilityGetParamsWithTimeout creates a new StorageServiceTraceProbabilityGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceTraceProbabilityGetParamsWithTimeout(timeout time.Duration) *StorageServiceTraceProbabilityGetParams { + + return &StorageServiceTraceProbabilityGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceTraceProbabilityGetParamsWithContext creates a new StorageServiceTraceProbabilityGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceTraceProbabilityGetParamsWithContext(ctx context.Context) *StorageServiceTraceProbabilityGetParams { + + return &StorageServiceTraceProbabilityGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceTraceProbabilityGetParamsWithHTTPClient creates a new StorageServiceTraceProbabilityGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceTraceProbabilityGetParamsWithHTTPClient(client *http.Client) *StorageServiceTraceProbabilityGetParams { + + return &StorageServiceTraceProbabilityGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceTraceProbabilityGetParams contains all the parameters to send to the API endpoint +for the storage service trace probability get operation typically these are written to a http.Request +*/ +type StorageServiceTraceProbabilityGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service trace probability get params +func (o *StorageServiceTraceProbabilityGetParams) WithTimeout(timeout time.Duration) *StorageServiceTraceProbabilityGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service trace probability get params +func (o *StorageServiceTraceProbabilityGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service trace probability get params +func (o *StorageServiceTraceProbabilityGetParams) WithContext(ctx context.Context) *StorageServiceTraceProbabilityGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service trace probability get params +func (o *StorageServiceTraceProbabilityGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service trace probability get params +func (o *StorageServiceTraceProbabilityGetParams) WithHTTPClient(client *http.Client) *StorageServiceTraceProbabilityGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service trace probability get params +func (o *StorageServiceTraceProbabilityGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceTraceProbabilityGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_get_responses.go new file mode 100644 index 00000000000..90e872d3e87 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceTraceProbabilityGetReader is a Reader for the StorageServiceTraceProbabilityGet structure. +type StorageServiceTraceProbabilityGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceTraceProbabilityGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceTraceProbabilityGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceTraceProbabilityGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceTraceProbabilityGetOK creates a StorageServiceTraceProbabilityGetOK with default headers values +func NewStorageServiceTraceProbabilityGetOK() *StorageServiceTraceProbabilityGetOK { + return &StorageServiceTraceProbabilityGetOK{} +} + +/* +StorageServiceTraceProbabilityGetOK handles this case with default header values. + +Success +*/ +type StorageServiceTraceProbabilityGetOK struct { + Payload interface{} +} + +func (o *StorageServiceTraceProbabilityGetOK) GetPayload() interface{} { + return o.Payload +} + +func (o *StorageServiceTraceProbabilityGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceTraceProbabilityGetDefault creates a StorageServiceTraceProbabilityGetDefault with default headers values +func NewStorageServiceTraceProbabilityGetDefault(code int) *StorageServiceTraceProbabilityGetDefault { + return &StorageServiceTraceProbabilityGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceTraceProbabilityGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceTraceProbabilityGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service trace probability get default response +func (o *StorageServiceTraceProbabilityGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceTraceProbabilityGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceTraceProbabilityGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceTraceProbabilityGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_post_parameters.go new file mode 100644 index 00000000000..f0fb066b1d8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceTraceProbabilityPostParams creates a new StorageServiceTraceProbabilityPostParams object +// with the default values initialized. +func NewStorageServiceTraceProbabilityPostParams() *StorageServiceTraceProbabilityPostParams { + var () + return &StorageServiceTraceProbabilityPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceTraceProbabilityPostParamsWithTimeout creates a new StorageServiceTraceProbabilityPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceTraceProbabilityPostParamsWithTimeout(timeout time.Duration) *StorageServiceTraceProbabilityPostParams { + var () + return &StorageServiceTraceProbabilityPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceTraceProbabilityPostParamsWithContext creates a new StorageServiceTraceProbabilityPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceTraceProbabilityPostParamsWithContext(ctx context.Context) *StorageServiceTraceProbabilityPostParams { + var () + return &StorageServiceTraceProbabilityPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceTraceProbabilityPostParamsWithHTTPClient creates a new StorageServiceTraceProbabilityPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceTraceProbabilityPostParamsWithHTTPClient(client *http.Client) *StorageServiceTraceProbabilityPostParams { + var () + return &StorageServiceTraceProbabilityPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceTraceProbabilityPostParams contains all the parameters to send to the API endpoint +for the storage service trace probability post operation typically these are written to a http.Request +*/ +type StorageServiceTraceProbabilityPostParams struct { + + /*Probability + [0,1] will enable tracing on a partial number of requests with the provided probability. 0 will disable tracing and 1 will enable tracing for all requests (which mich severely cripple the system) + + */ + Probability string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service trace probability post params +func (o *StorageServiceTraceProbabilityPostParams) WithTimeout(timeout time.Duration) *StorageServiceTraceProbabilityPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service trace probability post params +func (o *StorageServiceTraceProbabilityPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service trace probability post params +func (o *StorageServiceTraceProbabilityPostParams) WithContext(ctx context.Context) *StorageServiceTraceProbabilityPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service trace probability post params +func (o *StorageServiceTraceProbabilityPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service trace probability post params +func (o *StorageServiceTraceProbabilityPostParams) WithHTTPClient(client *http.Client) *StorageServiceTraceProbabilityPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service trace probability post params +func (o *StorageServiceTraceProbabilityPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithProbability adds the probability to the storage service trace probability post params +func (o *StorageServiceTraceProbabilityPostParams) WithProbability(probability string) *StorageServiceTraceProbabilityPostParams { + o.SetProbability(probability) + return o +} + +// SetProbability adds the probability to the storage service trace probability post params +func (o *StorageServiceTraceProbabilityPostParams) SetProbability(probability string) { + o.Probability = probability +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceTraceProbabilityPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param probability + qrProbability := o.Probability + qProbability := qrProbability + if qProbability != "" { + if err := r.SetQueryParam("probability", qProbability); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_post_responses.go new file mode 100644 index 00000000000..ac922c2adc8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_trace_probability_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceTraceProbabilityPostReader is a Reader for the StorageServiceTraceProbabilityPost structure. +type StorageServiceTraceProbabilityPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceTraceProbabilityPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceTraceProbabilityPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceTraceProbabilityPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceTraceProbabilityPostOK creates a StorageServiceTraceProbabilityPostOK with default headers values +func NewStorageServiceTraceProbabilityPostOK() *StorageServiceTraceProbabilityPostOK { + return &StorageServiceTraceProbabilityPostOK{} +} + +/* +StorageServiceTraceProbabilityPostOK handles this case with default header values. + +Success +*/ +type StorageServiceTraceProbabilityPostOK struct { +} + +func (o *StorageServiceTraceProbabilityPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceTraceProbabilityPostDefault creates a StorageServiceTraceProbabilityPostDefault with default headers values +func NewStorageServiceTraceProbabilityPostDefault(code int) *StorageServiceTraceProbabilityPostDefault { + return &StorageServiceTraceProbabilityPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceTraceProbabilityPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceTraceProbabilityPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service trace probability post default response +func (o *StorageServiceTraceProbabilityPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceTraceProbabilityPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceTraceProbabilityPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceTraceProbabilityPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_truncate_by_keyspace_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_truncate_by_keyspace_post_parameters.go new file mode 100644 index 00000000000..3cdc0cb7843 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_truncate_by_keyspace_post_parameters.go @@ -0,0 +1,168 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceTruncateByKeyspacePostParams creates a new StorageServiceTruncateByKeyspacePostParams object +// with the default values initialized. +func NewStorageServiceTruncateByKeyspacePostParams() *StorageServiceTruncateByKeyspacePostParams { + var () + return &StorageServiceTruncateByKeyspacePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceTruncateByKeyspacePostParamsWithTimeout creates a new StorageServiceTruncateByKeyspacePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceTruncateByKeyspacePostParamsWithTimeout(timeout time.Duration) *StorageServiceTruncateByKeyspacePostParams { + var () + return &StorageServiceTruncateByKeyspacePostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceTruncateByKeyspacePostParamsWithContext creates a new StorageServiceTruncateByKeyspacePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceTruncateByKeyspacePostParamsWithContext(ctx context.Context) *StorageServiceTruncateByKeyspacePostParams { + var () + return &StorageServiceTruncateByKeyspacePostParams{ + + Context: ctx, + } +} + +// NewStorageServiceTruncateByKeyspacePostParamsWithHTTPClient creates a new StorageServiceTruncateByKeyspacePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceTruncateByKeyspacePostParamsWithHTTPClient(client *http.Client) *StorageServiceTruncateByKeyspacePostParams { + var () + return &StorageServiceTruncateByKeyspacePostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceTruncateByKeyspacePostParams contains all the parameters to send to the API endpoint +for the storage service truncate by keyspace post operation typically these are written to a http.Request +*/ +type StorageServiceTruncateByKeyspacePostParams struct { + + /*Cf + Column family name + + */ + Cf *string + /*Keyspace + The keyspace + + */ + Keyspace string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service truncate by keyspace post params +func (o *StorageServiceTruncateByKeyspacePostParams) WithTimeout(timeout time.Duration) *StorageServiceTruncateByKeyspacePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service truncate by keyspace post params +func (o *StorageServiceTruncateByKeyspacePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service truncate by keyspace post params +func (o *StorageServiceTruncateByKeyspacePostParams) WithContext(ctx context.Context) *StorageServiceTruncateByKeyspacePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service truncate by keyspace post params +func (o *StorageServiceTruncateByKeyspacePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service truncate by keyspace post params +func (o *StorageServiceTruncateByKeyspacePostParams) WithHTTPClient(client *http.Client) *StorageServiceTruncateByKeyspacePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service truncate by keyspace post params +func (o *StorageServiceTruncateByKeyspacePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithCf adds the cf to the storage service truncate by keyspace post params +func (o *StorageServiceTruncateByKeyspacePostParams) WithCf(cf *string) *StorageServiceTruncateByKeyspacePostParams { + o.SetCf(cf) + return o +} + +// SetCf adds the cf to the storage service truncate by keyspace post params +func (o *StorageServiceTruncateByKeyspacePostParams) SetCf(cf *string) { + o.Cf = cf +} + +// WithKeyspace adds the keyspace to the storage service truncate by keyspace post params +func (o *StorageServiceTruncateByKeyspacePostParams) WithKeyspace(keyspace string) *StorageServiceTruncateByKeyspacePostParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service truncate by keyspace post params +func (o *StorageServiceTruncateByKeyspacePostParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceTruncateByKeyspacePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if o.Cf != nil { + + // query param cf + var qrCf string + if o.Cf != nil { + qrCf = *o.Cf + } + qCf := qrCf + if qCf != "" { + if err := r.SetQueryParam("cf", qCf); err != nil { + return err + } + } + + } + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_truncate_by_keyspace_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_truncate_by_keyspace_post_responses.go new file mode 100644 index 00000000000..a6def4a087e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_truncate_by_keyspace_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceTruncateByKeyspacePostReader is a Reader for the StorageServiceTruncateByKeyspacePost structure. +type StorageServiceTruncateByKeyspacePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceTruncateByKeyspacePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceTruncateByKeyspacePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceTruncateByKeyspacePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceTruncateByKeyspacePostOK creates a StorageServiceTruncateByKeyspacePostOK with default headers values +func NewStorageServiceTruncateByKeyspacePostOK() *StorageServiceTruncateByKeyspacePostOK { + return &StorageServiceTruncateByKeyspacePostOK{} +} + +/* +StorageServiceTruncateByKeyspacePostOK handles this case with default header values. + +Success +*/ +type StorageServiceTruncateByKeyspacePostOK struct { +} + +func (o *StorageServiceTruncateByKeyspacePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceTruncateByKeyspacePostDefault creates a StorageServiceTruncateByKeyspacePostDefault with default headers values +func NewStorageServiceTruncateByKeyspacePostDefault(code int) *StorageServiceTruncateByKeyspacePostDefault { + return &StorageServiceTruncateByKeyspacePostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceTruncateByKeyspacePostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceTruncateByKeyspacePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service truncate by keyspace post default response +func (o *StorageServiceTruncateByKeyspacePostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceTruncateByKeyspacePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceTruncateByKeyspacePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceTruncateByKeyspacePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_update_snitch_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_update_snitch_post_parameters.go new file mode 100644 index 00000000000..2a3f2a59da4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_update_snitch_post_parameters.go @@ -0,0 +1,262 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// NewStorageServiceUpdateSnitchPostParams creates a new StorageServiceUpdateSnitchPostParams object +// with the default values initialized. +func NewStorageServiceUpdateSnitchPostParams() *StorageServiceUpdateSnitchPostParams { + var () + return &StorageServiceUpdateSnitchPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceUpdateSnitchPostParamsWithTimeout creates a new StorageServiceUpdateSnitchPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceUpdateSnitchPostParamsWithTimeout(timeout time.Duration) *StorageServiceUpdateSnitchPostParams { + var () + return &StorageServiceUpdateSnitchPostParams{ + + timeout: timeout, + } +} + +// NewStorageServiceUpdateSnitchPostParamsWithContext creates a new StorageServiceUpdateSnitchPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceUpdateSnitchPostParamsWithContext(ctx context.Context) *StorageServiceUpdateSnitchPostParams { + var () + return &StorageServiceUpdateSnitchPostParams{ + + Context: ctx, + } +} + +// NewStorageServiceUpdateSnitchPostParamsWithHTTPClient creates a new StorageServiceUpdateSnitchPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceUpdateSnitchPostParamsWithHTTPClient(client *http.Client) *StorageServiceUpdateSnitchPostParams { + var () + return &StorageServiceUpdateSnitchPostParams{ + HTTPClient: client, + } +} + +/* +StorageServiceUpdateSnitchPostParams contains all the parameters to send to the API endpoint +for the storage service update snitch post operation typically these are written to a http.Request +*/ +type StorageServiceUpdateSnitchPostParams struct { + + /*Dynamic + When true dynamicsnitch is used + + */ + Dynamic bool + /*DynamicBadnessThreshold + Dynamic badness threshold, (default 0.0) + + */ + DynamicBadnessThreshold *string + /*DynamicResetInterval + integer, in ms (default 600,000) + + */ + DynamicResetInterval *int32 + /*DynamicUpdateInterval + integer, in ms (default 100) + + */ + DynamicUpdateInterval *int32 + /*EpSnitchClassName + The canonical path name for a class implementing IEndpointSnitch + + */ + EpSnitchClassName string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) WithTimeout(timeout time.Duration) *StorageServiceUpdateSnitchPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) WithContext(ctx context.Context) *StorageServiceUpdateSnitchPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) WithHTTPClient(client *http.Client) *StorageServiceUpdateSnitchPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithDynamic adds the dynamic to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) WithDynamic(dynamic bool) *StorageServiceUpdateSnitchPostParams { + o.SetDynamic(dynamic) + return o +} + +// SetDynamic adds the dynamic to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) SetDynamic(dynamic bool) { + o.Dynamic = dynamic +} + +// WithDynamicBadnessThreshold adds the dynamicBadnessThreshold to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) WithDynamicBadnessThreshold(dynamicBadnessThreshold *string) *StorageServiceUpdateSnitchPostParams { + o.SetDynamicBadnessThreshold(dynamicBadnessThreshold) + return o +} + +// SetDynamicBadnessThreshold adds the dynamicBadnessThreshold to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) SetDynamicBadnessThreshold(dynamicBadnessThreshold *string) { + o.DynamicBadnessThreshold = dynamicBadnessThreshold +} + +// WithDynamicResetInterval adds the dynamicResetInterval to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) WithDynamicResetInterval(dynamicResetInterval *int32) *StorageServiceUpdateSnitchPostParams { + o.SetDynamicResetInterval(dynamicResetInterval) + return o +} + +// SetDynamicResetInterval adds the dynamicResetInterval to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) SetDynamicResetInterval(dynamicResetInterval *int32) { + o.DynamicResetInterval = dynamicResetInterval +} + +// WithDynamicUpdateInterval adds the dynamicUpdateInterval to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) WithDynamicUpdateInterval(dynamicUpdateInterval *int32) *StorageServiceUpdateSnitchPostParams { + o.SetDynamicUpdateInterval(dynamicUpdateInterval) + return o +} + +// SetDynamicUpdateInterval adds the dynamicUpdateInterval to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) SetDynamicUpdateInterval(dynamicUpdateInterval *int32) { + o.DynamicUpdateInterval = dynamicUpdateInterval +} + +// WithEpSnitchClassName adds the epSnitchClassName to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) WithEpSnitchClassName(epSnitchClassName string) *StorageServiceUpdateSnitchPostParams { + o.SetEpSnitchClassName(epSnitchClassName) + return o +} + +// SetEpSnitchClassName adds the epSnitchClassName to the storage service update snitch post params +func (o *StorageServiceUpdateSnitchPostParams) SetEpSnitchClassName(epSnitchClassName string) { + o.EpSnitchClassName = epSnitchClassName +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceUpdateSnitchPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param dynamic + qrDynamic := o.Dynamic + qDynamic := swag.FormatBool(qrDynamic) + if qDynamic != "" { + if err := r.SetQueryParam("dynamic", qDynamic); err != nil { + return err + } + } + + if o.DynamicBadnessThreshold != nil { + + // query param dynamic_badness_threshold + var qrDynamicBadnessThreshold string + if o.DynamicBadnessThreshold != nil { + qrDynamicBadnessThreshold = *o.DynamicBadnessThreshold + } + qDynamicBadnessThreshold := qrDynamicBadnessThreshold + if qDynamicBadnessThreshold != "" { + if err := r.SetQueryParam("dynamic_badness_threshold", qDynamicBadnessThreshold); err != nil { + return err + } + } + + } + + if o.DynamicResetInterval != nil { + + // query param dynamic_reset_interval + var qrDynamicResetInterval int32 + if o.DynamicResetInterval != nil { + qrDynamicResetInterval = *o.DynamicResetInterval + } + qDynamicResetInterval := swag.FormatInt32(qrDynamicResetInterval) + if qDynamicResetInterval != "" { + if err := r.SetQueryParam("dynamic_reset_interval", qDynamicResetInterval); err != nil { + return err + } + } + + } + + if o.DynamicUpdateInterval != nil { + + // query param dynamic_update_interval + var qrDynamicUpdateInterval int32 + if o.DynamicUpdateInterval != nil { + qrDynamicUpdateInterval = *o.DynamicUpdateInterval + } + qDynamicUpdateInterval := swag.FormatInt32(qrDynamicUpdateInterval) + if qDynamicUpdateInterval != "" { + if err := r.SetQueryParam("dynamic_update_interval", qDynamicUpdateInterval); err != nil { + return err + } + } + + } + + // query param ep_snitch_class_name + qrEpSnitchClassName := o.EpSnitchClassName + qEpSnitchClassName := qrEpSnitchClassName + if qEpSnitchClassName != "" { + if err := r.SetQueryParam("ep_snitch_class_name", qEpSnitchClassName); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_update_snitch_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_update_snitch_post_responses.go new file mode 100644 index 00000000000..d9da7488006 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_update_snitch_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceUpdateSnitchPostReader is a Reader for the StorageServiceUpdateSnitchPost structure. +type StorageServiceUpdateSnitchPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceUpdateSnitchPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceUpdateSnitchPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceUpdateSnitchPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceUpdateSnitchPostOK creates a StorageServiceUpdateSnitchPostOK with default headers values +func NewStorageServiceUpdateSnitchPostOK() *StorageServiceUpdateSnitchPostOK { + return &StorageServiceUpdateSnitchPostOK{} +} + +/* +StorageServiceUpdateSnitchPostOK handles this case with default header values. + +Success +*/ +type StorageServiceUpdateSnitchPostOK struct { +} + +func (o *StorageServiceUpdateSnitchPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewStorageServiceUpdateSnitchPostDefault creates a StorageServiceUpdateSnitchPostDefault with default headers values +func NewStorageServiceUpdateSnitchPostDefault(code int) *StorageServiceUpdateSnitchPostDefault { + return &StorageServiceUpdateSnitchPostDefault{ + _statusCode: code, + } +} + +/* +StorageServiceUpdateSnitchPostDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceUpdateSnitchPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service update snitch post default response +func (o *StorageServiceUpdateSnitchPostDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceUpdateSnitchPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceUpdateSnitchPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceUpdateSnitchPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_view_build_statuses_by_keyspace_and_view_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_view_build_statuses_by_keyspace_and_view_get_parameters.go new file mode 100644 index 00000000000..01bd82b4720 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_view_build_statuses_by_keyspace_and_view_get_parameters.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetParams creates a new StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams object +// with the default values initialized. +func NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetParams() *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams { + var () + return &StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetParamsWithTimeout creates a new StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetParamsWithTimeout(timeout time.Duration) *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams { + var () + return &StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams{ + + timeout: timeout, + } +} + +// NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetParamsWithContext creates a new StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetParamsWithContext(ctx context.Context) *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams { + var () + return &StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams{ + + Context: ctx, + } +} + +// NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetParamsWithHTTPClient creates a new StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetParamsWithHTTPClient(client *http.Client) *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams { + var () + return &StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams{ + HTTPClient: client, + } +} + +/* +StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams contains all the parameters to send to the API endpoint +for the storage service view build statuses by keyspace and view get operation typically these are written to a http.Request +*/ +type StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams struct { + + /*Keyspace + The keyspace + + */ + Keyspace string + /*View + View name + + */ + View string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the storage service view build statuses by keyspace and view get params +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) WithTimeout(timeout time.Duration) *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the storage service view build statuses by keyspace and view get params +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the storage service view build statuses by keyspace and view get params +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) WithContext(ctx context.Context) *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the storage service view build statuses by keyspace and view get params +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the storage service view build statuses by keyspace and view get params +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) WithHTTPClient(client *http.Client) *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the storage service view build statuses by keyspace and view get params +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithKeyspace adds the keyspace to the storage service view build statuses by keyspace and view get params +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) WithKeyspace(keyspace string) *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams { + o.SetKeyspace(keyspace) + return o +} + +// SetKeyspace adds the keyspace to the storage service view build statuses by keyspace and view get params +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) SetKeyspace(keyspace string) { + o.Keyspace = keyspace +} + +// WithView adds the view to the storage service view build statuses by keyspace and view get params +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) WithView(view string) *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams { + o.SetView(view) + return o +} + +// SetView adds the view to the storage service view build statuses by keyspace and view get params +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) SetView(view string) { + o.View = view +} + +// WriteToRequest writes these params to a swagger request +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param keyspace + if err := r.SetPathParam("keyspace", o.Keyspace); err != nil { + return err + } + + // path param view + if err := r.SetPathParam("view", o.View); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_view_build_statuses_by_keyspace_and_view_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_view_build_statuses_by_keyspace_and_view_get_responses.go new file mode 100644 index 00000000000..6a9f27ec528 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/storage_service_view_build_statuses_by_keyspace_and_view_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StorageServiceViewBuildStatusesByKeyspaceAndViewGetReader is a Reader for the StorageServiceViewBuildStatusesByKeyspaceAndViewGet structure. +type StorageServiceViewBuildStatusesByKeyspaceAndViewGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetOK creates a StorageServiceViewBuildStatusesByKeyspaceAndViewGetOK with default headers values +func NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetOK() *StorageServiceViewBuildStatusesByKeyspaceAndViewGetOK { + return &StorageServiceViewBuildStatusesByKeyspaceAndViewGetOK{} +} + +/* +StorageServiceViewBuildStatusesByKeyspaceAndViewGetOK handles this case with default header values. + +Success +*/ +type StorageServiceViewBuildStatusesByKeyspaceAndViewGetOK struct { + Payload []*models.Mapper +} + +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetOK) GetPayload() []*models.Mapper { + return o.Payload +} + +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault creates a StorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault with default headers values +func NewStorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault(code int) *StorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault { + return &StorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault{ + _statusCode: code, + } +} + +/* +StorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault handles this case with default header values. + +internal server error +*/ +type StorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the storage service view build statuses by keyspace and view get default response +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault) Code() int { + return o._statusCode +} + +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StorageServiceViewBuildStatusesByKeyspaceAndViewGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_get_parameters.go new file mode 100644 index 00000000000..57bf515d034 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStreamManagerGetParams creates a new StreamManagerGetParams object +// with the default values initialized. +func NewStreamManagerGetParams() *StreamManagerGetParams { + + return &StreamManagerGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStreamManagerGetParamsWithTimeout creates a new StreamManagerGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStreamManagerGetParamsWithTimeout(timeout time.Duration) *StreamManagerGetParams { + + return &StreamManagerGetParams{ + + timeout: timeout, + } +} + +// NewStreamManagerGetParamsWithContext creates a new StreamManagerGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStreamManagerGetParamsWithContext(ctx context.Context) *StreamManagerGetParams { + + return &StreamManagerGetParams{ + + Context: ctx, + } +} + +// NewStreamManagerGetParamsWithHTTPClient creates a new StreamManagerGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStreamManagerGetParamsWithHTTPClient(client *http.Client) *StreamManagerGetParams { + + return &StreamManagerGetParams{ + HTTPClient: client, + } +} + +/* +StreamManagerGetParams contains all the parameters to send to the API endpoint +for the stream manager get operation typically these are written to a http.Request +*/ +type StreamManagerGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the stream manager get params +func (o *StreamManagerGetParams) WithTimeout(timeout time.Duration) *StreamManagerGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the stream manager get params +func (o *StreamManagerGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the stream manager get params +func (o *StreamManagerGetParams) WithContext(ctx context.Context) *StreamManagerGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the stream manager get params +func (o *StreamManagerGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the stream manager get params +func (o *StreamManagerGetParams) WithHTTPClient(client *http.Client) *StreamManagerGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the stream manager get params +func (o *StreamManagerGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StreamManagerGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_get_responses.go new file mode 100644 index 00000000000..3ac7909dbd6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StreamManagerGetReader is a Reader for the StreamManagerGet structure. +type StreamManagerGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StreamManagerGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStreamManagerGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStreamManagerGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStreamManagerGetOK creates a StreamManagerGetOK with default headers values +func NewStreamManagerGetOK() *StreamManagerGetOK { + return &StreamManagerGetOK{} +} + +/* +StreamManagerGetOK handles this case with default header values. + +Success +*/ +type StreamManagerGetOK struct { + Payload []*models.StreamState +} + +func (o *StreamManagerGetOK) GetPayload() []*models.StreamState { + return o.Payload +} + +func (o *StreamManagerGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStreamManagerGetDefault creates a StreamManagerGetDefault with default headers values +func NewStreamManagerGetDefault(code int) *StreamManagerGetDefault { + return &StreamManagerGetDefault{ + _statusCode: code, + } +} + +/* +StreamManagerGetDefault handles this case with default header values. + +internal server error +*/ +type StreamManagerGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the stream manager get default response +func (o *StreamManagerGetDefault) Code() int { + return o._statusCode +} + +func (o *StreamManagerGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StreamManagerGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StreamManagerGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_by_peer_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_by_peer_get_parameters.go new file mode 100644 index 00000000000..8800970c185 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_by_peer_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStreamManagerMetricsIncomingByPeerGetParams creates a new StreamManagerMetricsIncomingByPeerGetParams object +// with the default values initialized. +func NewStreamManagerMetricsIncomingByPeerGetParams() *StreamManagerMetricsIncomingByPeerGetParams { + var () + return &StreamManagerMetricsIncomingByPeerGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStreamManagerMetricsIncomingByPeerGetParamsWithTimeout creates a new StreamManagerMetricsIncomingByPeerGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStreamManagerMetricsIncomingByPeerGetParamsWithTimeout(timeout time.Duration) *StreamManagerMetricsIncomingByPeerGetParams { + var () + return &StreamManagerMetricsIncomingByPeerGetParams{ + + timeout: timeout, + } +} + +// NewStreamManagerMetricsIncomingByPeerGetParamsWithContext creates a new StreamManagerMetricsIncomingByPeerGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStreamManagerMetricsIncomingByPeerGetParamsWithContext(ctx context.Context) *StreamManagerMetricsIncomingByPeerGetParams { + var () + return &StreamManagerMetricsIncomingByPeerGetParams{ + + Context: ctx, + } +} + +// NewStreamManagerMetricsIncomingByPeerGetParamsWithHTTPClient creates a new StreamManagerMetricsIncomingByPeerGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStreamManagerMetricsIncomingByPeerGetParamsWithHTTPClient(client *http.Client) *StreamManagerMetricsIncomingByPeerGetParams { + var () + return &StreamManagerMetricsIncomingByPeerGetParams{ + HTTPClient: client, + } +} + +/* +StreamManagerMetricsIncomingByPeerGetParams contains all the parameters to send to the API endpoint +for the stream manager metrics incoming by peer get operation typically these are written to a http.Request +*/ +type StreamManagerMetricsIncomingByPeerGetParams struct { + + /*Peer + The stream peer + + */ + Peer string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the stream manager metrics incoming by peer get params +func (o *StreamManagerMetricsIncomingByPeerGetParams) WithTimeout(timeout time.Duration) *StreamManagerMetricsIncomingByPeerGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the stream manager metrics incoming by peer get params +func (o *StreamManagerMetricsIncomingByPeerGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the stream manager metrics incoming by peer get params +func (o *StreamManagerMetricsIncomingByPeerGetParams) WithContext(ctx context.Context) *StreamManagerMetricsIncomingByPeerGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the stream manager metrics incoming by peer get params +func (o *StreamManagerMetricsIncomingByPeerGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the stream manager metrics incoming by peer get params +func (o *StreamManagerMetricsIncomingByPeerGetParams) WithHTTPClient(client *http.Client) *StreamManagerMetricsIncomingByPeerGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the stream manager metrics incoming by peer get params +func (o *StreamManagerMetricsIncomingByPeerGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithPeer adds the peer to the stream manager metrics incoming by peer get params +func (o *StreamManagerMetricsIncomingByPeerGetParams) WithPeer(peer string) *StreamManagerMetricsIncomingByPeerGetParams { + o.SetPeer(peer) + return o +} + +// SetPeer adds the peer to the stream manager metrics incoming by peer get params +func (o *StreamManagerMetricsIncomingByPeerGetParams) SetPeer(peer string) { + o.Peer = peer +} + +// WriteToRequest writes these params to a swagger request +func (o *StreamManagerMetricsIncomingByPeerGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param peer + if err := r.SetPathParam("peer", o.Peer); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_by_peer_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_by_peer_get_responses.go new file mode 100644 index 00000000000..de5e5679e73 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_by_peer_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StreamManagerMetricsIncomingByPeerGetReader is a Reader for the StreamManagerMetricsIncomingByPeerGet structure. +type StreamManagerMetricsIncomingByPeerGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StreamManagerMetricsIncomingByPeerGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStreamManagerMetricsIncomingByPeerGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStreamManagerMetricsIncomingByPeerGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStreamManagerMetricsIncomingByPeerGetOK creates a StreamManagerMetricsIncomingByPeerGetOK with default headers values +func NewStreamManagerMetricsIncomingByPeerGetOK() *StreamManagerMetricsIncomingByPeerGetOK { + return &StreamManagerMetricsIncomingByPeerGetOK{} +} + +/* +StreamManagerMetricsIncomingByPeerGetOK handles this case with default header values. + +Success +*/ +type StreamManagerMetricsIncomingByPeerGetOK struct { + Payload int32 +} + +func (o *StreamManagerMetricsIncomingByPeerGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StreamManagerMetricsIncomingByPeerGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStreamManagerMetricsIncomingByPeerGetDefault creates a StreamManagerMetricsIncomingByPeerGetDefault with default headers values +func NewStreamManagerMetricsIncomingByPeerGetDefault(code int) *StreamManagerMetricsIncomingByPeerGetDefault { + return &StreamManagerMetricsIncomingByPeerGetDefault{ + _statusCode: code, + } +} + +/* +StreamManagerMetricsIncomingByPeerGetDefault handles this case with default header values. + +internal server error +*/ +type StreamManagerMetricsIncomingByPeerGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the stream manager metrics incoming by peer get default response +func (o *StreamManagerMetricsIncomingByPeerGetDefault) Code() int { + return o._statusCode +} + +func (o *StreamManagerMetricsIncomingByPeerGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StreamManagerMetricsIncomingByPeerGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StreamManagerMetricsIncomingByPeerGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_get_parameters.go new file mode 100644 index 00000000000..8bafb284fd9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStreamManagerMetricsIncomingGetParams creates a new StreamManagerMetricsIncomingGetParams object +// with the default values initialized. +func NewStreamManagerMetricsIncomingGetParams() *StreamManagerMetricsIncomingGetParams { + + return &StreamManagerMetricsIncomingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStreamManagerMetricsIncomingGetParamsWithTimeout creates a new StreamManagerMetricsIncomingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStreamManagerMetricsIncomingGetParamsWithTimeout(timeout time.Duration) *StreamManagerMetricsIncomingGetParams { + + return &StreamManagerMetricsIncomingGetParams{ + + timeout: timeout, + } +} + +// NewStreamManagerMetricsIncomingGetParamsWithContext creates a new StreamManagerMetricsIncomingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStreamManagerMetricsIncomingGetParamsWithContext(ctx context.Context) *StreamManagerMetricsIncomingGetParams { + + return &StreamManagerMetricsIncomingGetParams{ + + Context: ctx, + } +} + +// NewStreamManagerMetricsIncomingGetParamsWithHTTPClient creates a new StreamManagerMetricsIncomingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStreamManagerMetricsIncomingGetParamsWithHTTPClient(client *http.Client) *StreamManagerMetricsIncomingGetParams { + + return &StreamManagerMetricsIncomingGetParams{ + HTTPClient: client, + } +} + +/* +StreamManagerMetricsIncomingGetParams contains all the parameters to send to the API endpoint +for the stream manager metrics incoming get operation typically these are written to a http.Request +*/ +type StreamManagerMetricsIncomingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the stream manager metrics incoming get params +func (o *StreamManagerMetricsIncomingGetParams) WithTimeout(timeout time.Duration) *StreamManagerMetricsIncomingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the stream manager metrics incoming get params +func (o *StreamManagerMetricsIncomingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the stream manager metrics incoming get params +func (o *StreamManagerMetricsIncomingGetParams) WithContext(ctx context.Context) *StreamManagerMetricsIncomingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the stream manager metrics incoming get params +func (o *StreamManagerMetricsIncomingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the stream manager metrics incoming get params +func (o *StreamManagerMetricsIncomingGetParams) WithHTTPClient(client *http.Client) *StreamManagerMetricsIncomingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the stream manager metrics incoming get params +func (o *StreamManagerMetricsIncomingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StreamManagerMetricsIncomingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_get_responses.go new file mode 100644 index 00000000000..9469907ee79 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_incoming_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StreamManagerMetricsIncomingGetReader is a Reader for the StreamManagerMetricsIncomingGet structure. +type StreamManagerMetricsIncomingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StreamManagerMetricsIncomingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStreamManagerMetricsIncomingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStreamManagerMetricsIncomingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStreamManagerMetricsIncomingGetOK creates a StreamManagerMetricsIncomingGetOK with default headers values +func NewStreamManagerMetricsIncomingGetOK() *StreamManagerMetricsIncomingGetOK { + return &StreamManagerMetricsIncomingGetOK{} +} + +/* +StreamManagerMetricsIncomingGetOK handles this case with default header values. + +Success +*/ +type StreamManagerMetricsIncomingGetOK struct { + Payload int32 +} + +func (o *StreamManagerMetricsIncomingGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StreamManagerMetricsIncomingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStreamManagerMetricsIncomingGetDefault creates a StreamManagerMetricsIncomingGetDefault with default headers values +func NewStreamManagerMetricsIncomingGetDefault(code int) *StreamManagerMetricsIncomingGetDefault { + return &StreamManagerMetricsIncomingGetDefault{ + _statusCode: code, + } +} + +/* +StreamManagerMetricsIncomingGetDefault handles this case with default header values. + +internal server error +*/ +type StreamManagerMetricsIncomingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the stream manager metrics incoming get default response +func (o *StreamManagerMetricsIncomingGetDefault) Code() int { + return o._statusCode +} + +func (o *StreamManagerMetricsIncomingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StreamManagerMetricsIncomingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StreamManagerMetricsIncomingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outbound_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outbound_get_parameters.go new file mode 100644 index 00000000000..fef090ab38f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outbound_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStreamManagerMetricsOutboundGetParams creates a new StreamManagerMetricsOutboundGetParams object +// with the default values initialized. +func NewStreamManagerMetricsOutboundGetParams() *StreamManagerMetricsOutboundGetParams { + + return &StreamManagerMetricsOutboundGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStreamManagerMetricsOutboundGetParamsWithTimeout creates a new StreamManagerMetricsOutboundGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStreamManagerMetricsOutboundGetParamsWithTimeout(timeout time.Duration) *StreamManagerMetricsOutboundGetParams { + + return &StreamManagerMetricsOutboundGetParams{ + + timeout: timeout, + } +} + +// NewStreamManagerMetricsOutboundGetParamsWithContext creates a new StreamManagerMetricsOutboundGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStreamManagerMetricsOutboundGetParamsWithContext(ctx context.Context) *StreamManagerMetricsOutboundGetParams { + + return &StreamManagerMetricsOutboundGetParams{ + + Context: ctx, + } +} + +// NewStreamManagerMetricsOutboundGetParamsWithHTTPClient creates a new StreamManagerMetricsOutboundGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStreamManagerMetricsOutboundGetParamsWithHTTPClient(client *http.Client) *StreamManagerMetricsOutboundGetParams { + + return &StreamManagerMetricsOutboundGetParams{ + HTTPClient: client, + } +} + +/* +StreamManagerMetricsOutboundGetParams contains all the parameters to send to the API endpoint +for the stream manager metrics outbound get operation typically these are written to a http.Request +*/ +type StreamManagerMetricsOutboundGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the stream manager metrics outbound get params +func (o *StreamManagerMetricsOutboundGetParams) WithTimeout(timeout time.Duration) *StreamManagerMetricsOutboundGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the stream manager metrics outbound get params +func (o *StreamManagerMetricsOutboundGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the stream manager metrics outbound get params +func (o *StreamManagerMetricsOutboundGetParams) WithContext(ctx context.Context) *StreamManagerMetricsOutboundGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the stream manager metrics outbound get params +func (o *StreamManagerMetricsOutboundGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the stream manager metrics outbound get params +func (o *StreamManagerMetricsOutboundGetParams) WithHTTPClient(client *http.Client) *StreamManagerMetricsOutboundGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the stream manager metrics outbound get params +func (o *StreamManagerMetricsOutboundGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StreamManagerMetricsOutboundGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outbound_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outbound_get_responses.go new file mode 100644 index 00000000000..ec2cb119068 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outbound_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StreamManagerMetricsOutboundGetReader is a Reader for the StreamManagerMetricsOutboundGet structure. +type StreamManagerMetricsOutboundGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StreamManagerMetricsOutboundGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStreamManagerMetricsOutboundGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStreamManagerMetricsOutboundGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStreamManagerMetricsOutboundGetOK creates a StreamManagerMetricsOutboundGetOK with default headers values +func NewStreamManagerMetricsOutboundGetOK() *StreamManagerMetricsOutboundGetOK { + return &StreamManagerMetricsOutboundGetOK{} +} + +/* +StreamManagerMetricsOutboundGetOK handles this case with default header values. + +Success +*/ +type StreamManagerMetricsOutboundGetOK struct { + Payload int32 +} + +func (o *StreamManagerMetricsOutboundGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StreamManagerMetricsOutboundGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStreamManagerMetricsOutboundGetDefault creates a StreamManagerMetricsOutboundGetDefault with default headers values +func NewStreamManagerMetricsOutboundGetDefault(code int) *StreamManagerMetricsOutboundGetDefault { + return &StreamManagerMetricsOutboundGetDefault{ + _statusCode: code, + } +} + +/* +StreamManagerMetricsOutboundGetDefault handles this case with default header values. + +internal server error +*/ +type StreamManagerMetricsOutboundGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the stream manager metrics outbound get default response +func (o *StreamManagerMetricsOutboundGetDefault) Code() int { + return o._statusCode +} + +func (o *StreamManagerMetricsOutboundGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StreamManagerMetricsOutboundGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StreamManagerMetricsOutboundGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_by_peer_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_by_peer_get_parameters.go new file mode 100644 index 00000000000..0b2c4fddd45 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_by_peer_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStreamManagerMetricsOutgoingByPeerGetParams creates a new StreamManagerMetricsOutgoingByPeerGetParams object +// with the default values initialized. +func NewStreamManagerMetricsOutgoingByPeerGetParams() *StreamManagerMetricsOutgoingByPeerGetParams { + var () + return &StreamManagerMetricsOutgoingByPeerGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStreamManagerMetricsOutgoingByPeerGetParamsWithTimeout creates a new StreamManagerMetricsOutgoingByPeerGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStreamManagerMetricsOutgoingByPeerGetParamsWithTimeout(timeout time.Duration) *StreamManagerMetricsOutgoingByPeerGetParams { + var () + return &StreamManagerMetricsOutgoingByPeerGetParams{ + + timeout: timeout, + } +} + +// NewStreamManagerMetricsOutgoingByPeerGetParamsWithContext creates a new StreamManagerMetricsOutgoingByPeerGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStreamManagerMetricsOutgoingByPeerGetParamsWithContext(ctx context.Context) *StreamManagerMetricsOutgoingByPeerGetParams { + var () + return &StreamManagerMetricsOutgoingByPeerGetParams{ + + Context: ctx, + } +} + +// NewStreamManagerMetricsOutgoingByPeerGetParamsWithHTTPClient creates a new StreamManagerMetricsOutgoingByPeerGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStreamManagerMetricsOutgoingByPeerGetParamsWithHTTPClient(client *http.Client) *StreamManagerMetricsOutgoingByPeerGetParams { + var () + return &StreamManagerMetricsOutgoingByPeerGetParams{ + HTTPClient: client, + } +} + +/* +StreamManagerMetricsOutgoingByPeerGetParams contains all the parameters to send to the API endpoint +for the stream manager metrics outgoing by peer get operation typically these are written to a http.Request +*/ +type StreamManagerMetricsOutgoingByPeerGetParams struct { + + /*Peer + The stream peer + + */ + Peer string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the stream manager metrics outgoing by peer get params +func (o *StreamManagerMetricsOutgoingByPeerGetParams) WithTimeout(timeout time.Duration) *StreamManagerMetricsOutgoingByPeerGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the stream manager metrics outgoing by peer get params +func (o *StreamManagerMetricsOutgoingByPeerGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the stream manager metrics outgoing by peer get params +func (o *StreamManagerMetricsOutgoingByPeerGetParams) WithContext(ctx context.Context) *StreamManagerMetricsOutgoingByPeerGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the stream manager metrics outgoing by peer get params +func (o *StreamManagerMetricsOutgoingByPeerGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the stream manager metrics outgoing by peer get params +func (o *StreamManagerMetricsOutgoingByPeerGetParams) WithHTTPClient(client *http.Client) *StreamManagerMetricsOutgoingByPeerGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the stream manager metrics outgoing by peer get params +func (o *StreamManagerMetricsOutgoingByPeerGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithPeer adds the peer to the stream manager metrics outgoing by peer get params +func (o *StreamManagerMetricsOutgoingByPeerGetParams) WithPeer(peer string) *StreamManagerMetricsOutgoingByPeerGetParams { + o.SetPeer(peer) + return o +} + +// SetPeer adds the peer to the stream manager metrics outgoing by peer get params +func (o *StreamManagerMetricsOutgoingByPeerGetParams) SetPeer(peer string) { + o.Peer = peer +} + +// WriteToRequest writes these params to a swagger request +func (o *StreamManagerMetricsOutgoingByPeerGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param peer + if err := r.SetPathParam("peer", o.Peer); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_by_peer_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_by_peer_get_responses.go new file mode 100644 index 00000000000..c9a23f42dd2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_by_peer_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StreamManagerMetricsOutgoingByPeerGetReader is a Reader for the StreamManagerMetricsOutgoingByPeerGet structure. +type StreamManagerMetricsOutgoingByPeerGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StreamManagerMetricsOutgoingByPeerGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStreamManagerMetricsOutgoingByPeerGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStreamManagerMetricsOutgoingByPeerGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStreamManagerMetricsOutgoingByPeerGetOK creates a StreamManagerMetricsOutgoingByPeerGetOK with default headers values +func NewStreamManagerMetricsOutgoingByPeerGetOK() *StreamManagerMetricsOutgoingByPeerGetOK { + return &StreamManagerMetricsOutgoingByPeerGetOK{} +} + +/* +StreamManagerMetricsOutgoingByPeerGetOK handles this case with default header values. + +Success +*/ +type StreamManagerMetricsOutgoingByPeerGetOK struct { + Payload int32 +} + +func (o *StreamManagerMetricsOutgoingByPeerGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StreamManagerMetricsOutgoingByPeerGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStreamManagerMetricsOutgoingByPeerGetDefault creates a StreamManagerMetricsOutgoingByPeerGetDefault with default headers values +func NewStreamManagerMetricsOutgoingByPeerGetDefault(code int) *StreamManagerMetricsOutgoingByPeerGetDefault { + return &StreamManagerMetricsOutgoingByPeerGetDefault{ + _statusCode: code, + } +} + +/* +StreamManagerMetricsOutgoingByPeerGetDefault handles this case with default header values. + +internal server error +*/ +type StreamManagerMetricsOutgoingByPeerGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the stream manager metrics outgoing by peer get default response +func (o *StreamManagerMetricsOutgoingByPeerGetDefault) Code() int { + return o._statusCode +} + +func (o *StreamManagerMetricsOutgoingByPeerGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StreamManagerMetricsOutgoingByPeerGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StreamManagerMetricsOutgoingByPeerGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_get_parameters.go new file mode 100644 index 00000000000..b7f963d4c50 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewStreamManagerMetricsOutgoingGetParams creates a new StreamManagerMetricsOutgoingGetParams object +// with the default values initialized. +func NewStreamManagerMetricsOutgoingGetParams() *StreamManagerMetricsOutgoingGetParams { + + return &StreamManagerMetricsOutgoingGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewStreamManagerMetricsOutgoingGetParamsWithTimeout creates a new StreamManagerMetricsOutgoingGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewStreamManagerMetricsOutgoingGetParamsWithTimeout(timeout time.Duration) *StreamManagerMetricsOutgoingGetParams { + + return &StreamManagerMetricsOutgoingGetParams{ + + timeout: timeout, + } +} + +// NewStreamManagerMetricsOutgoingGetParamsWithContext creates a new StreamManagerMetricsOutgoingGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewStreamManagerMetricsOutgoingGetParamsWithContext(ctx context.Context) *StreamManagerMetricsOutgoingGetParams { + + return &StreamManagerMetricsOutgoingGetParams{ + + Context: ctx, + } +} + +// NewStreamManagerMetricsOutgoingGetParamsWithHTTPClient creates a new StreamManagerMetricsOutgoingGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewStreamManagerMetricsOutgoingGetParamsWithHTTPClient(client *http.Client) *StreamManagerMetricsOutgoingGetParams { + + return &StreamManagerMetricsOutgoingGetParams{ + HTTPClient: client, + } +} + +/* +StreamManagerMetricsOutgoingGetParams contains all the parameters to send to the API endpoint +for the stream manager metrics outgoing get operation typically these are written to a http.Request +*/ +type StreamManagerMetricsOutgoingGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the stream manager metrics outgoing get params +func (o *StreamManagerMetricsOutgoingGetParams) WithTimeout(timeout time.Duration) *StreamManagerMetricsOutgoingGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the stream manager metrics outgoing get params +func (o *StreamManagerMetricsOutgoingGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the stream manager metrics outgoing get params +func (o *StreamManagerMetricsOutgoingGetParams) WithContext(ctx context.Context) *StreamManagerMetricsOutgoingGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the stream manager metrics outgoing get params +func (o *StreamManagerMetricsOutgoingGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the stream manager metrics outgoing get params +func (o *StreamManagerMetricsOutgoingGetParams) WithHTTPClient(client *http.Client) *StreamManagerMetricsOutgoingGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the stream manager metrics outgoing get params +func (o *StreamManagerMetricsOutgoingGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *StreamManagerMetricsOutgoingGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_get_responses.go new file mode 100644 index 00000000000..26124af9167 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/stream_manager_metrics_outgoing_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// StreamManagerMetricsOutgoingGetReader is a Reader for the StreamManagerMetricsOutgoingGet structure. +type StreamManagerMetricsOutgoingGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *StreamManagerMetricsOutgoingGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewStreamManagerMetricsOutgoingGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewStreamManagerMetricsOutgoingGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewStreamManagerMetricsOutgoingGetOK creates a StreamManagerMetricsOutgoingGetOK with default headers values +func NewStreamManagerMetricsOutgoingGetOK() *StreamManagerMetricsOutgoingGetOK { + return &StreamManagerMetricsOutgoingGetOK{} +} + +/* +StreamManagerMetricsOutgoingGetOK handles this case with default header values. + +Success +*/ +type StreamManagerMetricsOutgoingGetOK struct { + Payload int32 +} + +func (o *StreamManagerMetricsOutgoingGetOK) GetPayload() int32 { + return o.Payload +} + +func (o *StreamManagerMetricsOutgoingGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewStreamManagerMetricsOutgoingGetDefault creates a StreamManagerMetricsOutgoingGetDefault with default headers values +func NewStreamManagerMetricsOutgoingGetDefault(code int) *StreamManagerMetricsOutgoingGetDefault { + return &StreamManagerMetricsOutgoingGetDefault{ + _statusCode: code, + } +} + +/* +StreamManagerMetricsOutgoingGetDefault handles this case with default header values. + +internal server error +*/ +type StreamManagerMetricsOutgoingGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the stream manager metrics outgoing get default response +func (o *StreamManagerMetricsOutgoingGetDefault) Code() int { + return o._statusCode +} + +func (o *StreamManagerMetricsOutgoingGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *StreamManagerMetricsOutgoingGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *StreamManagerMetricsOutgoingGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_get_parameters.go new file mode 100644 index 00000000000..5e0a082c56e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_get_parameters.go @@ -0,0 +1,136 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewSystemLoggerByNameGetParams creates a new SystemLoggerByNameGetParams object +// with the default values initialized. +func NewSystemLoggerByNameGetParams() *SystemLoggerByNameGetParams { + var () + return &SystemLoggerByNameGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewSystemLoggerByNameGetParamsWithTimeout creates a new SystemLoggerByNameGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewSystemLoggerByNameGetParamsWithTimeout(timeout time.Duration) *SystemLoggerByNameGetParams { + var () + return &SystemLoggerByNameGetParams{ + + timeout: timeout, + } +} + +// NewSystemLoggerByNameGetParamsWithContext creates a new SystemLoggerByNameGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewSystemLoggerByNameGetParamsWithContext(ctx context.Context) *SystemLoggerByNameGetParams { + var () + return &SystemLoggerByNameGetParams{ + + Context: ctx, + } +} + +// NewSystemLoggerByNameGetParamsWithHTTPClient creates a new SystemLoggerByNameGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewSystemLoggerByNameGetParamsWithHTTPClient(client *http.Client) *SystemLoggerByNameGetParams { + var () + return &SystemLoggerByNameGetParams{ + HTTPClient: client, + } +} + +/* +SystemLoggerByNameGetParams contains all the parameters to send to the API endpoint +for the system logger by name get operation typically these are written to a http.Request +*/ +type SystemLoggerByNameGetParams struct { + + /*Name + The logger to query about + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the system logger by name get params +func (o *SystemLoggerByNameGetParams) WithTimeout(timeout time.Duration) *SystemLoggerByNameGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the system logger by name get params +func (o *SystemLoggerByNameGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the system logger by name get params +func (o *SystemLoggerByNameGetParams) WithContext(ctx context.Context) *SystemLoggerByNameGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the system logger by name get params +func (o *SystemLoggerByNameGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the system logger by name get params +func (o *SystemLoggerByNameGetParams) WithHTTPClient(client *http.Client) *SystemLoggerByNameGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the system logger by name get params +func (o *SystemLoggerByNameGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithName adds the name to the system logger by name get params +func (o *SystemLoggerByNameGetParams) WithName(name string) *SystemLoggerByNameGetParams { + o.SetName(name) + return o +} + +// SetName adds the name to the system logger by name get params +func (o *SystemLoggerByNameGetParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *SystemLoggerByNameGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_get_responses.go new file mode 100644 index 00000000000..afc8b57db72 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// SystemLoggerByNameGetReader is a Reader for the SystemLoggerByNameGet structure. +type SystemLoggerByNameGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SystemLoggerByNameGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSystemLoggerByNameGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewSystemLoggerByNameGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSystemLoggerByNameGetOK creates a SystemLoggerByNameGetOK with default headers values +func NewSystemLoggerByNameGetOK() *SystemLoggerByNameGetOK { + return &SystemLoggerByNameGetOK{} +} + +/* +SystemLoggerByNameGetOK handles this case with default header values. + +Success +*/ +type SystemLoggerByNameGetOK struct { + Payload string +} + +func (o *SystemLoggerByNameGetOK) GetPayload() string { + return o.Payload +} + +func (o *SystemLoggerByNameGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSystemLoggerByNameGetDefault creates a SystemLoggerByNameGetDefault with default headers values +func NewSystemLoggerByNameGetDefault(code int) *SystemLoggerByNameGetDefault { + return &SystemLoggerByNameGetDefault{ + _statusCode: code, + } +} + +/* +SystemLoggerByNameGetDefault handles this case with default header values. + +internal server error +*/ +type SystemLoggerByNameGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the system logger by name get default response +func (o *SystemLoggerByNameGetDefault) Code() int { + return o._statusCode +} + +func (o *SystemLoggerByNameGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *SystemLoggerByNameGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *SystemLoggerByNameGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_post_parameters.go new file mode 100644 index 00000000000..65db85ad8cb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_post_parameters.go @@ -0,0 +1,161 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewSystemLoggerByNamePostParams creates a new SystemLoggerByNamePostParams object +// with the default values initialized. +func NewSystemLoggerByNamePostParams() *SystemLoggerByNamePostParams { + var () + return &SystemLoggerByNamePostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewSystemLoggerByNamePostParamsWithTimeout creates a new SystemLoggerByNamePostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewSystemLoggerByNamePostParamsWithTimeout(timeout time.Duration) *SystemLoggerByNamePostParams { + var () + return &SystemLoggerByNamePostParams{ + + timeout: timeout, + } +} + +// NewSystemLoggerByNamePostParamsWithContext creates a new SystemLoggerByNamePostParams object +// with the default values initialized, and the ability to set a context for a request +func NewSystemLoggerByNamePostParamsWithContext(ctx context.Context) *SystemLoggerByNamePostParams { + var () + return &SystemLoggerByNamePostParams{ + + Context: ctx, + } +} + +// NewSystemLoggerByNamePostParamsWithHTTPClient creates a new SystemLoggerByNamePostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewSystemLoggerByNamePostParamsWithHTTPClient(client *http.Client) *SystemLoggerByNamePostParams { + var () + return &SystemLoggerByNamePostParams{ + HTTPClient: client, + } +} + +/* +SystemLoggerByNamePostParams contains all the parameters to send to the API endpoint +for the system logger by name post operation typically these are written to a http.Request +*/ +type SystemLoggerByNamePostParams struct { + + /*Level + The new log level + + */ + Level string + /*Name + The logger to query about + + */ + Name string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the system logger by name post params +func (o *SystemLoggerByNamePostParams) WithTimeout(timeout time.Duration) *SystemLoggerByNamePostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the system logger by name post params +func (o *SystemLoggerByNamePostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the system logger by name post params +func (o *SystemLoggerByNamePostParams) WithContext(ctx context.Context) *SystemLoggerByNamePostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the system logger by name post params +func (o *SystemLoggerByNamePostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the system logger by name post params +func (o *SystemLoggerByNamePostParams) WithHTTPClient(client *http.Client) *SystemLoggerByNamePostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the system logger by name post params +func (o *SystemLoggerByNamePostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithLevel adds the level to the system logger by name post params +func (o *SystemLoggerByNamePostParams) WithLevel(level string) *SystemLoggerByNamePostParams { + o.SetLevel(level) + return o +} + +// SetLevel adds the level to the system logger by name post params +func (o *SystemLoggerByNamePostParams) SetLevel(level string) { + o.Level = level +} + +// WithName adds the name to the system logger by name post params +func (o *SystemLoggerByNamePostParams) WithName(name string) *SystemLoggerByNamePostParams { + o.SetName(name) + return o +} + +// SetName adds the name to the system logger by name post params +func (o *SystemLoggerByNamePostParams) SetName(name string) { + o.Name = name +} + +// WriteToRequest writes these params to a swagger request +func (o *SystemLoggerByNamePostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param level + qrLevel := o.Level + qLevel := qrLevel + if qLevel != "" { + if err := r.SetQueryParam("level", qLevel); err != nil { + return err + } + } + + // path param name + if err := r.SetPathParam("name", o.Name); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_post_responses.go new file mode 100644 index 00000000000..4215ecaad63 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_by_name_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// SystemLoggerByNamePostReader is a Reader for the SystemLoggerByNamePost structure. +type SystemLoggerByNamePostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SystemLoggerByNamePostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSystemLoggerByNamePostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewSystemLoggerByNamePostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSystemLoggerByNamePostOK creates a SystemLoggerByNamePostOK with default headers values +func NewSystemLoggerByNamePostOK() *SystemLoggerByNamePostOK { + return &SystemLoggerByNamePostOK{} +} + +/* +SystemLoggerByNamePostOK handles this case with default header values. + +Success +*/ +type SystemLoggerByNamePostOK struct { +} + +func (o *SystemLoggerByNamePostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSystemLoggerByNamePostDefault creates a SystemLoggerByNamePostDefault with default headers values +func NewSystemLoggerByNamePostDefault(code int) *SystemLoggerByNamePostDefault { + return &SystemLoggerByNamePostDefault{ + _statusCode: code, + } +} + +/* +SystemLoggerByNamePostDefault handles this case with default header values. + +internal server error +*/ +type SystemLoggerByNamePostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the system logger by name post default response +func (o *SystemLoggerByNamePostDefault) Code() int { + return o._statusCode +} + +func (o *SystemLoggerByNamePostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *SystemLoggerByNamePostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *SystemLoggerByNamePostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_get_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_get_parameters.go new file mode 100644 index 00000000000..f70d1eea53d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_get_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewSystemLoggerGetParams creates a new SystemLoggerGetParams object +// with the default values initialized. +func NewSystemLoggerGetParams() *SystemLoggerGetParams { + + return &SystemLoggerGetParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewSystemLoggerGetParamsWithTimeout creates a new SystemLoggerGetParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewSystemLoggerGetParamsWithTimeout(timeout time.Duration) *SystemLoggerGetParams { + + return &SystemLoggerGetParams{ + + timeout: timeout, + } +} + +// NewSystemLoggerGetParamsWithContext creates a new SystemLoggerGetParams object +// with the default values initialized, and the ability to set a context for a request +func NewSystemLoggerGetParamsWithContext(ctx context.Context) *SystemLoggerGetParams { + + return &SystemLoggerGetParams{ + + Context: ctx, + } +} + +// NewSystemLoggerGetParamsWithHTTPClient creates a new SystemLoggerGetParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewSystemLoggerGetParamsWithHTTPClient(client *http.Client) *SystemLoggerGetParams { + + return &SystemLoggerGetParams{ + HTTPClient: client, + } +} + +/* +SystemLoggerGetParams contains all the parameters to send to the API endpoint +for the system logger get operation typically these are written to a http.Request +*/ +type SystemLoggerGetParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the system logger get params +func (o *SystemLoggerGetParams) WithTimeout(timeout time.Duration) *SystemLoggerGetParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the system logger get params +func (o *SystemLoggerGetParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the system logger get params +func (o *SystemLoggerGetParams) WithContext(ctx context.Context) *SystemLoggerGetParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the system logger get params +func (o *SystemLoggerGetParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the system logger get params +func (o *SystemLoggerGetParams) WithHTTPClient(client *http.Client) *SystemLoggerGetParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the system logger get params +func (o *SystemLoggerGetParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *SystemLoggerGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_get_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_get_responses.go new file mode 100644 index 00000000000..36a0c46fb6a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_get_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// SystemLoggerGetReader is a Reader for the SystemLoggerGet structure. +type SystemLoggerGetReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SystemLoggerGetReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSystemLoggerGetOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewSystemLoggerGetDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSystemLoggerGetOK creates a SystemLoggerGetOK with default headers values +func NewSystemLoggerGetOK() *SystemLoggerGetOK { + return &SystemLoggerGetOK{} +} + +/* +SystemLoggerGetOK handles this case with default header values. + +Success +*/ +type SystemLoggerGetOK struct { + Payload []string +} + +func (o *SystemLoggerGetOK) GetPayload() []string { + return o.Payload +} + +func (o *SystemLoggerGetOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewSystemLoggerGetDefault creates a SystemLoggerGetDefault with default headers values +func NewSystemLoggerGetDefault(code int) *SystemLoggerGetDefault { + return &SystemLoggerGetDefault{ + _statusCode: code, + } +} + +/* +SystemLoggerGetDefault handles this case with default header values. + +internal server error +*/ +type SystemLoggerGetDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the system logger get default response +func (o *SystemLoggerGetDefault) Code() int { + return o._statusCode +} + +func (o *SystemLoggerGetDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *SystemLoggerGetDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *SystemLoggerGetDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_post_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_post_parameters.go new file mode 100644 index 00000000000..4106bce392a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_post_parameters.go @@ -0,0 +1,140 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewSystemLoggerPostParams creates a new SystemLoggerPostParams object +// with the default values initialized. +func NewSystemLoggerPostParams() *SystemLoggerPostParams { + var () + return &SystemLoggerPostParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewSystemLoggerPostParamsWithTimeout creates a new SystemLoggerPostParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewSystemLoggerPostParamsWithTimeout(timeout time.Duration) *SystemLoggerPostParams { + var () + return &SystemLoggerPostParams{ + + timeout: timeout, + } +} + +// NewSystemLoggerPostParamsWithContext creates a new SystemLoggerPostParams object +// with the default values initialized, and the ability to set a context for a request +func NewSystemLoggerPostParamsWithContext(ctx context.Context) *SystemLoggerPostParams { + var () + return &SystemLoggerPostParams{ + + Context: ctx, + } +} + +// NewSystemLoggerPostParamsWithHTTPClient creates a new SystemLoggerPostParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewSystemLoggerPostParamsWithHTTPClient(client *http.Client) *SystemLoggerPostParams { + var () + return &SystemLoggerPostParams{ + HTTPClient: client, + } +} + +/* +SystemLoggerPostParams contains all the parameters to send to the API endpoint +for the system logger post operation typically these are written to a http.Request +*/ +type SystemLoggerPostParams struct { + + /*Level + The new log level + + */ + Level string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the system logger post params +func (o *SystemLoggerPostParams) WithTimeout(timeout time.Duration) *SystemLoggerPostParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the system logger post params +func (o *SystemLoggerPostParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the system logger post params +func (o *SystemLoggerPostParams) WithContext(ctx context.Context) *SystemLoggerPostParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the system logger post params +func (o *SystemLoggerPostParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the system logger post params +func (o *SystemLoggerPostParams) WithHTTPClient(client *http.Client) *SystemLoggerPostParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the system logger post params +func (o *SystemLoggerPostParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithLevel adds the level to the system logger post params +func (o *SystemLoggerPostParams) WithLevel(level string) *SystemLoggerPostParams { + o.SetLevel(level) + return o +} + +// SetLevel adds the level to the system logger post params +func (o *SystemLoggerPostParams) SetLevel(level string) { + o.Level = level +} + +// WriteToRequest writes these params to a swagger request +func (o *SystemLoggerPostParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // query param level + qrLevel := o.Level + qLevel := qrLevel + if qLevel != "" { + if err := r.SetQueryParam("level", qLevel); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_post_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_post_responses.go new file mode 100644 index 00000000000..849bd9a3133 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations/system_logger_post_responses.go @@ -0,0 +1,104 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models" +) + +// SystemLoggerPostReader is a Reader for the SystemLoggerPost structure. +type SystemLoggerPostReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *SystemLoggerPostReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewSystemLoggerPostOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewSystemLoggerPostDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewSystemLoggerPostOK creates a SystemLoggerPostOK with default headers values +func NewSystemLoggerPostOK() *SystemLoggerPostOK { + return &SystemLoggerPostOK{} +} + +/* +SystemLoggerPostOK handles this case with default header values. + +Success +*/ +type SystemLoggerPostOK struct { +} + +func (o *SystemLoggerPostOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewSystemLoggerPostDefault creates a SystemLoggerPostDefault with default headers values +func NewSystemLoggerPostDefault(code int) *SystemLoggerPostDefault { + return &SystemLoggerPostDefault{ + _statusCode: code, + } +} + +/* +SystemLoggerPostDefault handles this case with default header values. + +internal server error +*/ +type SystemLoggerPostDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the system logger post default response +func (o *SystemLoggerPostDefault) Code() int { + return o._statusCode +} + +func (o *SystemLoggerPostDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *SystemLoggerPostDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *SystemLoggerPostDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/scylla_v1_client.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/scylla_v1_client.go new file mode 100644 index 00000000000..593127d2477 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/scylla_v1_client.go @@ -0,0 +1,111 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package client + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations" +) + +// Default scylla v1 HTTP client. +var Default = NewHTTPClient(nil) + +const ( + // DefaultHost is the default Host + // found in Meta (info) section of spec file + DefaultHost string = "scylla-manager.magic.host" + // DefaultBasePath is the default BasePath + // found in Meta (info) section of spec file + DefaultBasePath string = "/" +) + +// DefaultSchemes are the default schemes found in Meta (info) section of spec file +var DefaultSchemes = []string{"http"} + +// NewHTTPClient creates a new scylla v1 HTTP client. +func NewHTTPClient(formats strfmt.Registry) *ScyllaV1 { + return NewHTTPClientWithConfig(formats, nil) +} + +// NewHTTPClientWithConfig creates a new scylla v1 HTTP client, +// using a customizable transport config. +func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *ScyllaV1 { + // ensure nullable parameters have default + if cfg == nil { + cfg = DefaultTransportConfig() + } + + // create transport and client + transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) + return New(transport, formats) +} + +// New creates a new scylla v1 client +func New(transport runtime.ClientTransport, formats strfmt.Registry) *ScyllaV1 { + // ensure nullable parameters have default + if formats == nil { + formats = strfmt.Default + } + + cli := new(ScyllaV1) + cli.Transport = transport + cli.Operations = operations.New(transport, formats) + return cli +} + +// DefaultTransportConfig creates a TransportConfig with the +// default settings taken from the meta section of the spec file. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{ + Host: DefaultHost, + BasePath: DefaultBasePath, + Schemes: DefaultSchemes, + } +} + +// TransportConfig contains the transport related info, +// found in the meta section of the spec file. +type TransportConfig struct { + Host string + BasePath string + Schemes []string +} + +// WithHost overrides the default host, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithHost(host string) *TransportConfig { + cfg.Host = host + return cfg +} + +// WithBasePath overrides the default basePath, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { + cfg.BasePath = basePath + return cfg +} + +// WithSchemes overrides the default schemes, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { + cfg.Schemes = schemes + return cfg +} + +// ScyllaV1 is a client for scylla v1 +type ScyllaV1 struct { + Operations operations.ClientService + + Transport runtime.ClientTransport +} + +// SetTransport changes the transport on the client and all its subresources +func (c *ScyllaV1) SetTransport(transport runtime.ClientTransport) { + c.Transport = transport + c.Operations.SetTransport(transport) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/collectd_metric_status.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/collectd_metric_status.go new file mode 100644 index 00000000000..d2b5d5961ee --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/collectd_metric_status.go @@ -0,0 +1,76 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// CollectdMetricStatus collectd_metric_status +// +// # Holds a collectd id and an enable flag +// +// swagger:model collectd_metric_status +type CollectdMetricStatus struct { + + // Is the metric enabled + Enable bool `json:"enable,omitempty"` + + // id + ID *TypeInstanceID `json:"id,omitempty"` +} + +// Validate validates this collectd metric status +func (m *CollectdMetricStatus) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *CollectdMetricStatus) validateID(formats strfmt.Registry) error { + + if swag.IsZero(m.ID) { // not required + return nil + } + + if m.ID != nil { + if err := m.ID.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("id") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *CollectdMetricStatus) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CollectdMetricStatus) UnmarshalBinary(b []byte) error { + var res CollectdMetricStatus + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/collectd_value.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/collectd_value.go new file mode 100644 index 00000000000..ef166e52e28 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/collectd_value.go @@ -0,0 +1,45 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// CollectdValue collectd_value +// +// # Holds a collectd value +// +// swagger:model collectd_value +type CollectdValue struct { + + // An array of values + Values []interface{} `json:"values"` +} + +// Validate validates this collectd value +func (m *CollectdValue) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *CollectdValue) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CollectdValue) UnmarshalBinary(b []byte) error { + var res CollectdValue + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/column_family_info.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/column_family_info.go new file mode 100644 index 00000000000..386b8b99084 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/column_family_info.go @@ -0,0 +1,51 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ColumnFamilyInfo column_family_info +// +// # Information about column family +// +// swagger:model column_family_info +type ColumnFamilyInfo struct { + + // The column family + Cf string `json:"cf,omitempty"` + + // The Keyspace + Ks string `json:"ks,omitempty"` + + // The column family type + Type string `json:"type,omitempty"` +} + +// Validate validates this column family info +func (m *ColumnFamilyInfo) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ColumnFamilyInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ColumnFamilyInfo) UnmarshalBinary(b []byte) error { + var res ColumnFamilyInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/compaction_info.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/compaction_info.go new file mode 100644 index 00000000000..2595a70d3c9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/compaction_info.go @@ -0,0 +1,54 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// CompactionInfo compaction_info +// +// # A key value mapping +// +// swagger:model compaction_info +type CompactionInfo struct { + + // The current completed + Completed interface{} `json:"completed,omitempty"` + + // The operation type + OperationType string `json:"operation_type,omitempty"` + + // The total to compact + Total interface{} `json:"total,omitempty"` + + // The compacted unit + Unit string `json:"unit,omitempty"` +} + +// Validate validates this compaction info +func (m *CompactionInfo) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *CompactionInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *CompactionInfo) UnmarshalBinary(b []byte) error { + var res CompactionInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/direction.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/direction.go new file mode 100644 index 00000000000..cdb2114628a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/direction.go @@ -0,0 +1,65 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// Direction Direction +// +// # The file name +// +// swagger:model Direction +type Direction string + +const ( + + // DirectionOUT captures enum value "OUT" + DirectionOUT Direction = "OUT" + + // DirectionIN captures enum value "IN" + DirectionIN Direction = "IN" +) + +// for schema +var directionEnum []interface{} + +func init() { + var res []Direction + if err := json.Unmarshal([]byte(`["OUT","IN"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + directionEnum = append(directionEnum, v) + } +} + +func (m Direction) validateDirectionEnum(path, location string, value Direction) error { + if err := validate.EnumCase(path, location, value, directionEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this direction +func (m Direction) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateDirectionEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/endpoint_detail.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/endpoint_detail.go new file mode 100644 index 00000000000..3613a66da1a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/endpoint_detail.go @@ -0,0 +1,51 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// EndpointDetail endpoint_detail +// +// # Endpoint detail +// +// swagger:model endpoint_detail +type EndpointDetail struct { + + // The endpoint datacenter + Datacenter string `json:"datacenter,omitempty"` + + // The endpoint host + Host string `json:"host,omitempty"` + + // The endpoint rack + Rack string `json:"rack,omitempty"` +} + +// Validate validates this endpoint detail +func (m *EndpointDetail) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *EndpointDetail) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *EndpointDetail) UnmarshalBinary(b []byte) error { + var res EndpointDetail + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/endpoint_phi_value.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/endpoint_phi_value.go new file mode 100644 index 00000000000..7476f83fda4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/endpoint_phi_value.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// EndpointPhiValue endpoint_phi_value +// +// # Holds phi value for a single end point +// +// swagger:model endpoint_phi_value +type EndpointPhiValue struct { + + // end point address + Endpoint string `json:"endpoint,omitempty"` + + // Phi value + Phi interface{} `json:"phi,omitempty"` +} + +// Validate validates this endpoint phi value +func (m *EndpointPhiValue) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *EndpointPhiValue) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *EndpointPhiValue) UnmarshalBinary(b []byte) error { + var res EndpointPhiValue + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/endpoint_state.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/endpoint_state.go new file mode 100644 index 00000000000..8b93814a6e6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/endpoint_state.go @@ -0,0 +1,97 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// EndpointState endpoint_state +// +// # Holds an endpoint state +// +// swagger:model endpoint_state +type EndpointState struct { + + // The endpoint address + Addrs string `json:"addrs,omitempty"` + + // Is the endpoint alive + ApplicationState []*VersionValue `json:"application_state"` + + // The heart beat generation + Generation int32 `json:"generation,omitempty"` + + // Is the endpoint alive + IsAlive bool `json:"is_alive,omitempty"` + + // The update timestamp + UpdateTime interface{} `json:"update_time,omitempty"` + + // The heart beat version + Version int32 `json:"version,omitempty"` +} + +// Validate validates this endpoint state +func (m *EndpointState) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateApplicationState(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *EndpointState) validateApplicationState(formats strfmt.Registry) error { + + if swag.IsZero(m.ApplicationState) { // not required + return nil + } + + for i := 0; i < len(m.ApplicationState); i++ { + if swag.IsZero(m.ApplicationState[i]) { // not required + continue + } + + if m.ApplicationState[i] != nil { + if err := m.ApplicationState[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("application_state" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *EndpointState) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *EndpointState) UnmarshalBinary(b []byte) error { + var res EndpointState + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/error_model.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/error_model.go new file mode 100644 index 00000000000..9887571ce72 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/error_model.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ErrorModel error model +// +// swagger:model ErrorModel +type ErrorModel struct { + + // code + Code int64 `json:"code,omitempty"` + + // message + Message string `json:"message,omitempty"` +} + +// Validate validates this error model +func (m *ErrorModel) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ErrorModel) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ErrorModel) UnmarshalBinary(b []byte) error { + var res ErrorModel + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/estimated_histogram.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/estimated_histogram.go new file mode 100644 index 00000000000..acccc928e07 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/estimated_histogram.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// EstimatedHistogram estimated_histogram +// +// # An estimated histogram values +// +// swagger:model estimated_histogram +type EstimatedHistogram struct { + + // The series of values to which the counts in `buckets` correspond + BucketOffsets []interface{} `json:"bucket_offsets"` + + // The histogram buckets + Buckets []interface{} `json:"buckets"` +} + +// Validate validates this estimated histogram +func (m *EstimatedHistogram) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *EstimatedHistogram) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *EstimatedHistogram) UnmarshalBinary(b []byte) error { + var res EstimatedHistogram + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/histogram.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/histogram.go new file mode 100644 index 00000000000..54b8102a4c4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/histogram.go @@ -0,0 +1,63 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Histogram histogram +// +// # A histogram values +// +// swagger:model histogram +type Histogram struct { + + // Total count so far + Count interface{} `json:"count,omitempty"` + + // The max so far + Max interface{} `json:"max,omitempty"` + + // The mean + Mean interface{} `json:"mean,omitempty"` + + // The min so far + Min interface{} `json:"min,omitempty"` + + // A sample containing the last n elements + Sample []interface{} `json:"sample"` + + // Total sum so far + Sum interface{} `json:"sum,omitempty"` + + // The variance + Variance interface{} `json:"variance,omitempty"` +} + +// Validate validates this histogram +func (m *Histogram) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Histogram) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Histogram) UnmarshalBinary(b []byte) error { + var res Histogram + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/history.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/history.go new file mode 100644 index 00000000000..3ea3fff6277 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/history.go @@ -0,0 +1,100 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// History history +// +// # Compaction history information +// +// swagger:model history +type History struct { + + // Bytes in + BytesIn interface{} `json:"bytes_in,omitempty"` + + // Bytes out + BytesOut interface{} `json:"bytes_out,omitempty"` + + // The column family name + Cf string `json:"cf,omitempty"` + + // The time of compaction + CompactedAt interface{} `json:"compacted_at,omitempty"` + + // The UUID + ID string `json:"id,omitempty"` + + // The keyspace name + Ks string `json:"ks,omitempty"` + + // The merged rows + RowsMerged []*RowMerged `json:"rows_merged"` +} + +// Validate validates this history +func (m *History) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateRowsMerged(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *History) validateRowsMerged(formats strfmt.Registry) error { + + if swag.IsZero(m.RowsMerged) { // not required + return nil + } + + for i := 0; i < len(m.RowsMerged); i++ { + if swag.IsZero(m.RowsMerged[i]) { // not required + continue + } + + if m.RowsMerged[i] != nil { + if err := m.RowsMerged[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("rows_merged" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *History) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *History) UnmarshalBinary(b []byte) error { + var res History + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/level.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/level.go new file mode 100644 index 00000000000..038a049e2a0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/level.go @@ -0,0 +1,72 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// Level level +// +// swagger:model level +type Level string + +const ( + + // LevelError captures enum value "error" + LevelError Level = "error" + + // LevelWarn captures enum value "warn" + LevelWarn Level = "warn" + + // LevelInfo captures enum value "info" + LevelInfo Level = "info" + + // LevelDebug captures enum value "debug" + LevelDebug Level = "debug" + + // LevelTrace captures enum value "trace" + LevelTrace Level = "trace" +) + +// for schema +var levelEnum []interface{} + +func init() { + var res []Level + if err := json.Unmarshal([]byte(`["error","warn","info","debug","trace"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + levelEnum = append(levelEnum, v) + } +} + +func (m Level) validateLevelEnum(path, location string, value Level) error { + if err := validate.EnumCase(path, location, value, levelEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this level +func (m Level) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateLevelEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/map_string_double.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/map_string_double.go new file mode 100644 index 00000000000..133adcb96c3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/map_string_double.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MapStringDouble map_string_double +// +// # A key value mapping between a string and a double +// +// swagger:model map_string_double +type MapStringDouble struct { + + // The key + Key string `json:"key,omitempty"` + + // The value + Value interface{} `json:"value,omitempty"` +} + +// Validate validates this map string double +func (m *MapStringDouble) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MapStringDouble) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MapStringDouble) UnmarshalBinary(b []byte) error { + var res MapStringDouble + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/maplist_mapper.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/maplist_mapper.go new file mode 100644 index 00000000000..9d8950b968c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/maplist_mapper.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MaplistMapper maplist_mapper +// +// # A key value mapping, where key and value are list +// +// swagger:model maplist_mapper +type MaplistMapper struct { + + // The key + Key []string `json:"key"` + + // The value + Value []string `json:"value"` +} + +// Validate validates this maplist mapper +func (m *MaplistMapper) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MaplistMapper) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MaplistMapper) UnmarshalBinary(b []byte) error { + var res MaplistMapper + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/mapper.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/mapper.go new file mode 100644 index 00000000000..f7b7426538b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/mapper.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Mapper mapper +// +// # A key value mapping +// +// swagger:model mapper +type Mapper struct { + + // The key + Key string `json:"key,omitempty"` + + // The value + Value string `json:"value,omitempty"` +} + +// Validate validates this mapper +func (m *Mapper) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Mapper) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Mapper) UnmarshalBinary(b []byte) error { + var res Mapper + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/mapper_list.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/mapper_list.go new file mode 100644 index 00000000000..63295b88500 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/mapper_list.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MapperList mapper_list +// +// # Holds a key value which is a list +// +// swagger:model mapper_list +type MapperList struct { + + // The key + Key string `json:"key,omitempty"` + + // The value + Value []string `json:"value"` +} + +// Validate validates this mapper list +func (m *MapperList) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MapperList) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MapperList) UnmarshalBinary(b []byte) error { + var res MapperList + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/message_counter.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/message_counter.go new file mode 100644 index 00000000000..03f07f79a9c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/message_counter.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// MessageCounter message_counter +// +// # Holds command counters +// +// swagger:model message_counter +type MessageCounter struct { + + // key + Key string `json:"key,omitempty"` + + // value + Value interface{} `json:"value,omitempty"` +} + +// Validate validates this message counter +func (m *MessageCounter) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *MessageCounter) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *MessageCounter) UnmarshalBinary(b []byte) error { + var res MessageCounter + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/progress_info.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/progress_info.go new file mode 100644 index 00000000000..bf4028c3139 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/progress_info.go @@ -0,0 +1,86 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ProgressInfo progress_info +// +// # File transfer progress +// +// swagger:model progress_info +type ProgressInfo struct { + + // The current bytes + CurrentBytes interface{} `json:"current_bytes,omitempty"` + + // direction + Direction Direction `json:"direction,omitempty"` + + // The file name + FileName string `json:"file_name,omitempty"` + + // The peer address + Peer string `json:"peer,omitempty"` + + // The session index + SessionIndex int32 `json:"session_index,omitempty"` + + // The total bytes + TotalBytes interface{} `json:"total_bytes,omitempty"` +} + +// Validate validates this progress info +func (m *ProgressInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateDirection(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ProgressInfo) validateDirection(formats strfmt.Registry) error { + + if swag.IsZero(m.Direction) { // not required + return nil + } + + if err := m.Direction.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("direction") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ProgressInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ProgressInfo) UnmarshalBinary(b []byte) error { + var res ProgressInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/progress_info_mapper.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/progress_info_mapper.go new file mode 100644 index 00000000000..92a1d86c8a7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/progress_info_mapper.go @@ -0,0 +1,76 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ProgressInfoMapper progress_info_mapper +// +// # A mapping between file and its progress info +// +// swagger:model progress_info_mapper +type ProgressInfoMapper struct { + + // The key + Key string `json:"key,omitempty"` + + // value + Value *ProgressInfo `json:"value,omitempty"` +} + +// Validate validates this progress info mapper +func (m *ProgressInfoMapper) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *ProgressInfoMapper) validateValue(formats strfmt.Registry) error { + + if swag.IsZero(m.Value) { // not required + return nil + } + + if m.Value != nil { + if err := m.Value.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("value") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *ProgressInfoMapper) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ProgressInfoMapper) UnmarshalBinary(b []byte) error { + var res ProgressInfoMapper + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/rate_moving_average.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/rate_moving_average.go new file mode 100644 index 00000000000..93eeb6173dd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/rate_moving_average.go @@ -0,0 +1,51 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// RateMovingAverage rate_moving_average +// +// # A meter metric which measures mean throughput and one, five, and fifteen-minute exponentially-weighted moving average throughputs +// +// swagger:model rate_moving_average +type RateMovingAverage struct { + + // Total number of events from startup + Count interface{} `json:"count,omitempty"` + + // The mean rate from startup + MeanRate interface{} `json:"mean_rate,omitempty"` + + // One, five and fifteen mintues rates + Rates []interface{} `json:"rates"` +} + +// Validate validates this rate moving average +func (m *RateMovingAverage) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RateMovingAverage) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RateMovingAverage) UnmarshalBinary(b []byte) error { + var res RateMovingAverage + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/rate_moving_average_and_histogram.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/rate_moving_average_and_histogram.go new file mode 100644 index 00000000000..5b5a5270165 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/rate_moving_average_and_histogram.go @@ -0,0 +1,98 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// RateMovingAverageAndHistogram rate_moving_average_and_histogram +// +// # A timer metric which aggregates timing durations and provides duration statistics, plus throughput statistics +// +// swagger:model rate_moving_average_and_histogram +type RateMovingAverageAndHistogram struct { + + // hist + Hist *Histogram `json:"hist,omitempty"` + + // meter + Meter *RateMovingAverage `json:"meter,omitempty"` +} + +// Validate validates this rate moving average and histogram +func (m *RateMovingAverageAndHistogram) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateHist(formats); err != nil { + res = append(res, err) + } + + if err := m.validateMeter(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *RateMovingAverageAndHistogram) validateHist(formats strfmt.Registry) error { + + if swag.IsZero(m.Hist) { // not required + return nil + } + + if m.Hist != nil { + if err := m.Hist.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("hist") + } + return err + } + } + + return nil +} + +func (m *RateMovingAverageAndHistogram) validateMeter(formats strfmt.Registry) error { + + if swag.IsZero(m.Meter) { // not required + return nil + } + + if m.Meter != nil { + if err := m.Meter.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("meter") + } + return err + } + } + + return nil +} + +// MarshalBinary interface implementation +func (m *RateMovingAverageAndHistogram) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RateMovingAverageAndHistogram) UnmarshalBinary(b []byte) error { + var res RateMovingAverageAndHistogram + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/repair_async_status_response.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/repair_async_status_response.go new file mode 100644 index 00000000000..32777466661 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/repair_async_status_response.go @@ -0,0 +1,66 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// RepairAsyncStatusResponse repair_async_statusResponse +// +// swagger:model repair_async_statusResponse +type RepairAsyncStatusResponse string + +const ( + + // RepairAsyncStatusResponseRUNNING captures enum value "RUNNING" + RepairAsyncStatusResponseRUNNING RepairAsyncStatusResponse = "RUNNING" + + // RepairAsyncStatusResponseSUCCESSFUL captures enum value "SUCCESSFUL" + RepairAsyncStatusResponseSUCCESSFUL RepairAsyncStatusResponse = "SUCCESSFUL" + + // RepairAsyncStatusResponseFAILED captures enum value "FAILED" + RepairAsyncStatusResponseFAILED RepairAsyncStatusResponse = "FAILED" +) + +// for schema +var repairAsyncStatusResponseEnum []interface{} + +func init() { + var res []RepairAsyncStatusResponse + if err := json.Unmarshal([]byte(`["RUNNING","SUCCESSFUL","FAILED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + repairAsyncStatusResponseEnum = append(repairAsyncStatusResponseEnum, v) + } +} + +func (m RepairAsyncStatusResponse) validateRepairAsyncStatusResponseEnum(path, location string, value RepairAsyncStatusResponse) error { + if err := validate.EnumCase(path, location, value, repairAsyncStatusResponseEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this repair async status response +func (m RepairAsyncStatusResponse) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateRepairAsyncStatusResponseEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/row_merged.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/row_merged.go new file mode 100644 index 00000000000..f24f950ba80 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/row_merged.go @@ -0,0 +1,48 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// RowMerged row_merged +// +// # A row merged information +// +// swagger:model row_merged +type RowMerged struct { + + // The number of sstable + Key int32 `json:"key,omitempty"` + + // The number or row compacted + Value interface{} `json:"value,omitempty"` +} + +// Validate validates this row merged +func (m *RowMerged) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *RowMerged) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *RowMerged) UnmarshalBinary(b []byte) error { + var res RowMerged + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/slow_query_info.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/slow_query_info.go new file mode 100644 index 00000000000..1b0d9ffb314 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/slow_query_info.go @@ -0,0 +1,51 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// SlowQueryInfo slow_query_info +// +// # Slow query triggering information +// +// swagger:model slow_query_info +type SlowQueryInfo struct { + + // Is slow query logging enable or disable + Enable bool `json:"enable,omitempty"` + + // The slow query logging threshold in microseconds. Queries that takes longer, will be logged + Threshold interface{} `json:"threshold,omitempty"` + + // The slow query TTL in seconds + TTL interface{} `json:"ttl,omitempty"` +} + +// Validate validates this slow query info +func (m *SlowQueryInfo) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *SlowQueryInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *SlowQueryInfo) UnmarshalBinary(b []byte) error { + var res SlowQueryInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/snapshot.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/snapshot.go new file mode 100644 index 00000000000..f3cdcc1a9fe --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/snapshot.go @@ -0,0 +1,54 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Snapshot snapshot +// +// # Snapshot detail +// +// swagger:model snapshot +type Snapshot struct { + + // The column family + Cf string `json:"cf,omitempty"` + + // The key space snapshot key + Ks string `json:"ks,omitempty"` + + // The live snapshot size + Live interface{} `json:"live,omitempty"` + + // The total snapshot size + Total interface{} `json:"total,omitempty"` +} + +// Validate validates this snapshot +func (m *Snapshot) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Snapshot) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Snapshot) UnmarshalBinary(b []byte) error { + var res Snapshot + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/snapshots.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/snapshots.go new file mode 100644 index 00000000000..b2964b65ef6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/snapshots.go @@ -0,0 +1,85 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Snapshots snapshots +// +// # List of Snapshot detail +// +// swagger:model snapshots +type Snapshots struct { + + // The snapshot key + Key string `json:"key,omitempty"` + + // The column family + Value []*Snapshot `json:"value"` +} + +// Validate validates this snapshots +func (m *Snapshots) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateValue(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *Snapshots) validateValue(formats strfmt.Registry) error { + + if swag.IsZero(m.Value) { // not required + return nil + } + + for i := 0; i < len(m.Value); i++ { + if swag.IsZero(m.Value[i]) { // not required + continue + } + + if m.Value[i] != nil { + if err := m.Value[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("value" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *Snapshots) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Snapshots) UnmarshalBinary(b []byte) error { + var res Snapshots + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/state.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/state.go new file mode 100644 index 00000000000..15e50640a53 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/state.go @@ -0,0 +1,77 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// State State +// +// # Current session state +// +// swagger:model State +type State string + +const ( + + // StateINITIALIZED captures enum value "INITIALIZED" + StateINITIALIZED State = "INITIALIZED" + + // StatePREPARING captures enum value "PREPARING" + StatePREPARING State = "PREPARING" + + // StateSTREAMING captures enum value "STREAMING" + StateSTREAMING State = "STREAMING" + + // StateWAITCOMPLETE captures enum value "WAIT_COMPLETE" + StateWAITCOMPLETE State = "WAIT_COMPLETE" + + // StateCOMPLETE captures enum value "COMPLETE" + StateCOMPLETE State = "COMPLETE" + + // StateFAILED captures enum value "FAILED" + StateFAILED State = "FAILED" +) + +// for schema +var stateEnum []interface{} + +func init() { + var res []State + if err := json.Unmarshal([]byte(`["INITIALIZED","PREPARING","STREAMING","WAIT_COMPLETE","COMPLETE","FAILED"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + stateEnum = append(stateEnum, v) + } +} + +func (m State) validateStateEnum(path, location string, value State) error { + if err := validate.EnumCase(path, location, value, stateEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this state +func (m State) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateStateEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/stream_info.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/stream_info.go new file mode 100644 index 00000000000..11000e7d414 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/stream_info.go @@ -0,0 +1,210 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StreamInfo stream_info +// +// # Stream session info +// +// swagger:model stream_info +type StreamInfo struct { + + // connecting + Connecting string `json:"connecting,omitempty"` + + // The peer + Peer string `json:"peer,omitempty"` + + // Receiving files + ReceivingFiles []*ProgressInfoMapper `json:"receiving_files"` + + // Receiving summaries + ReceivingSummaries []*StreamSummary `json:"receiving_summaries"` + + // Sending files + SendingFiles []*ProgressInfoMapper `json:"sending_files"` + + // Sending summaries + SendingSummaries []*StreamSummary `json:"sending_summaries"` + + // The session index + SessionIndex int32 `json:"session_index,omitempty"` + + // state + State State `json:"state,omitempty"` +} + +// Validate validates this stream info +func (m *StreamInfo) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateReceivingFiles(formats); err != nil { + res = append(res, err) + } + + if err := m.validateReceivingSummaries(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSendingFiles(formats); err != nil { + res = append(res, err) + } + + if err := m.validateSendingSummaries(formats); err != nil { + res = append(res, err) + } + + if err := m.validateState(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StreamInfo) validateReceivingFiles(formats strfmt.Registry) error { + + if swag.IsZero(m.ReceivingFiles) { // not required + return nil + } + + for i := 0; i < len(m.ReceivingFiles); i++ { + if swag.IsZero(m.ReceivingFiles[i]) { // not required + continue + } + + if m.ReceivingFiles[i] != nil { + if err := m.ReceivingFiles[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("receiving_files" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *StreamInfo) validateReceivingSummaries(formats strfmt.Registry) error { + + if swag.IsZero(m.ReceivingSummaries) { // not required + return nil + } + + for i := 0; i < len(m.ReceivingSummaries); i++ { + if swag.IsZero(m.ReceivingSummaries[i]) { // not required + continue + } + + if m.ReceivingSummaries[i] != nil { + if err := m.ReceivingSummaries[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("receiving_summaries" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *StreamInfo) validateSendingFiles(formats strfmt.Registry) error { + + if swag.IsZero(m.SendingFiles) { // not required + return nil + } + + for i := 0; i < len(m.SendingFiles); i++ { + if swag.IsZero(m.SendingFiles[i]) { // not required + continue + } + + if m.SendingFiles[i] != nil { + if err := m.SendingFiles[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("sending_files" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *StreamInfo) validateSendingSummaries(formats strfmt.Registry) error { + + if swag.IsZero(m.SendingSummaries) { // not required + return nil + } + + for i := 0; i < len(m.SendingSummaries); i++ { + if swag.IsZero(m.SendingSummaries[i]) { // not required + continue + } + + if m.SendingSummaries[i] != nil { + if err := m.SendingSummaries[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("sending_summaries" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +func (m *StreamInfo) validateState(formats strfmt.Registry) error { + + if swag.IsZero(m.State) { // not required + return nil + } + + if err := m.State.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("state") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *StreamInfo) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StreamInfo) UnmarshalBinary(b []byte) error { + var res StreamInfo + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/stream_state.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/stream_state.go new file mode 100644 index 00000000000..b9ace137ba5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/stream_state.go @@ -0,0 +1,88 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StreamState stream_state +// +// # Current snapshot of streaming progress +// +// swagger:model stream_state +type StreamState struct { + + // The stream description + Description string `json:"description,omitempty"` + + // Plan UUID + PlanID string `json:"plan_id,omitempty"` + + // The sessions info + Sessions []*StreamInfo `json:"sessions"` +} + +// Validate validates this stream state +func (m *StreamState) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateSessions(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *StreamState) validateSessions(formats strfmt.Registry) error { + + if swag.IsZero(m.Sessions) { // not required + return nil + } + + for i := 0; i < len(m.Sessions); i++ { + if swag.IsZero(m.Sessions[i]) { // not required + continue + } + + if m.Sessions[i] != nil { + if err := m.Sessions[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("sessions" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *StreamState) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StreamState) UnmarshalBinary(b []byte) error { + var res StreamState + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/stream_summary.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/stream_summary.go new file mode 100644 index 00000000000..af9165a5768 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/stream_summary.go @@ -0,0 +1,51 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// StreamSummary stream_summary +// +// # Stream summary info +// +// swagger:model stream_summary +type StreamSummary struct { + + // The ID + CfID string `json:"cf_id,omitempty"` + + // Number of files to transfer. Can be 0 if nothing to transfer for some streaming request. + Files int32 `json:"files,omitempty"` + + // total size + TotalSize interface{} `json:"total_size,omitempty"` +} + +// Validate validates this stream summary +func (m *StreamSummary) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *StreamSummary) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *StreamSummary) UnmarshalBinary(b []byte) error { + var res StreamSummary + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/summary.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/summary.go new file mode 100644 index 00000000000..9dddef8016d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/summary.go @@ -0,0 +1,63 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// Summary summary +// +// # A compaction summary object +// +// swagger:model summary +type Summary struct { + + // The column family name + Cf string `json:"cf,omitempty"` + + // The number of units completed + Completed interface{} `json:"completed,omitempty"` + + // The UUID + ID string `json:"id,omitempty"` + + // The keyspace name + Ks string `json:"ks,omitempty"` + + // The task compaction type + TaskType string `json:"task_type,omitempty"` + + // The total number of units + Total interface{} `json:"total,omitempty"` + + // The units being used + Unit string `json:"unit,omitempty"` +} + +// Validate validates this summary +func (m *Summary) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *Summary) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *Summary) UnmarshalBinary(b []byte) error { + var res Summary + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/token_range.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/token_range.go new file mode 100644 index 00000000000..3b05b32d472 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/token_range.go @@ -0,0 +1,94 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// TokenRange token_range +// +// # Endpoint range information +// +// swagger:model token_range +type TokenRange struct { + + // The range start token + EndToken string `json:"end_token,omitempty"` + + // The endpoint details + EndpointDetails []*EndpointDetail `json:"endpoint_details"` + + // The endpoints + Endpoints []string `json:"endpoints"` + + // The rpc endpoints + RPCEndpoints []string `json:"rpc_endpoints"` + + // The range start token + StartToken string `json:"start_token,omitempty"` +} + +// Validate validates this token range +func (m *TokenRange) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateEndpointDetails(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *TokenRange) validateEndpointDetails(formats strfmt.Registry) error { + + if swag.IsZero(m.EndpointDetails) { // not required + return nil + } + + for i := 0; i < len(m.EndpointDetails); i++ { + if swag.IsZero(m.EndpointDetails[i]) { // not required + continue + } + + if m.EndpointDetails[i] != nil { + if err := m.EndpointDetails[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("endpoint_details" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// MarshalBinary interface implementation +func (m *TokenRange) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TokenRange) UnmarshalBinary(b []byte) error { + var res TokenRange + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/type.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/type.go new file mode 100644 index 00000000000..14acdb8d31d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/type.go @@ -0,0 +1,66 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// Type type +// +// swagger:model type +type Type string + +const ( + + // TypeAll captures enum value "all" + TypeAll Type = "all" + + // TypeUser captures enum value "user" + TypeUser Type = "user" + + // TypeNonLocalStrategy captures enum value "non_local_strategy" + TypeNonLocalStrategy Type = "non_local_strategy" +) + +// for schema +var typeEnum []interface{} + +func init() { + var res []Type + if err := json.Unmarshal([]byte(`["all","user","non_local_strategy"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + typeEnum = append(typeEnum, v) + } +} + +func (m Type) validateTypeEnum(path, location string, value Type) error { + if err := validate.EnumCase(path, location, value, typeEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this type +func (m Type) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateTypeEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/type_instance_id.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/type_instance_id.go new file mode 100644 index 00000000000..dd00aa469d4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/type_instance_id.go @@ -0,0 +1,54 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// TypeInstanceID type_instance_id +// +// # A type instance ID +// +// swagger:model type_instance_id +type TypeInstanceID struct { + + // The plugin ID + Plugin string `json:"plugin,omitempty"` + + // The plugin instance + PluginInstance string `json:"plugin_instance,omitempty"` + + // The plugin type + Type string `json:"type,omitempty"` + + // The plugin type instance + TypeInstance string `json:"type_instance,omitempty"` +} + +// Validate validates this type instance id +func (m *TypeInstanceID) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *TypeInstanceID) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *TypeInstanceID) UnmarshalBinary(b []byte) error { + var res TypeInstanceID + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/verb.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/verb.go new file mode 100644 index 00000000000..83fe15ea2c5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/verb.go @@ -0,0 +1,120 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "encoding/json" + + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/validate" +) + +// Verb Verb +// +// swagger:model Verb +type Verb string + +const ( + + // VerbCLIENTID captures enum value "CLIENT_ID" + VerbCLIENTID Verb = "CLIENT_ID" + + // VerbMUTATION captures enum value "MUTATION" + VerbMUTATION Verb = "MUTATION" + + // VerbMUTATIONDONE captures enum value "MUTATION_DONE" + VerbMUTATIONDONE Verb = "MUTATION_DONE" + + // VerbREADDATA captures enum value "READ_DATA" + VerbREADDATA Verb = "READ_DATA" + + // VerbREADMUTATIONDATA captures enum value "READ_MUTATION_DATA" + VerbREADMUTATIONDATA Verb = "READ_MUTATION_DATA" + + // VerbREADDIGEST captures enum value "READ_DIGEST" + VerbREADDIGEST Verb = "READ_DIGEST" + + // VerbGOSSIPECHO captures enum value "GOSSIP_ECHO" + VerbGOSSIPECHO Verb = "GOSSIP_ECHO" + + // VerbGOSSIPDIGESTSYN captures enum value "GOSSIP_DIGEST_SYN" + VerbGOSSIPDIGESTSYN Verb = "GOSSIP_DIGEST_SYN" + + // VerbGOSSIPDIGESTACK2 captures enum value "GOSSIP_DIGEST_ACK2" + VerbGOSSIPDIGESTACK2 Verb = "GOSSIP_DIGEST_ACK2" + + // VerbGOSSIPSHUTDOWN captures enum value "GOSSIP_SHUTDOWN" + VerbGOSSIPSHUTDOWN Verb = "GOSSIP_SHUTDOWN" + + // VerbDEFINITIONSUPDATE captures enum value "DEFINITIONS_UPDATE" + VerbDEFINITIONSUPDATE Verb = "DEFINITIONS_UPDATE" + + // VerbTRUNCATE captures enum value "TRUNCATE" + VerbTRUNCATE Verb = "TRUNCATE" + + // VerbREPLICATIONFINISHED captures enum value "REPLICATION_FINISHED" + VerbREPLICATIONFINISHED Verb = "REPLICATION_FINISHED" + + // VerbMIGRATIONREQUEST captures enum value "MIGRATION_REQUEST" + VerbMIGRATIONREQUEST Verb = "MIGRATION_REQUEST" + + // VerbPREPAREMESSAGE captures enum value "PREPARE_MESSAGE" + VerbPREPAREMESSAGE Verb = "PREPARE_MESSAGE" + + // VerbPREPAREDONEMESSAGE captures enum value "PREPARE_DONE_MESSAGE" + VerbPREPAREDONEMESSAGE Verb = "PREPARE_DONE_MESSAGE" + + // VerbSTREAMMUTATION captures enum value "STREAM_MUTATION" + VerbSTREAMMUTATION Verb = "STREAM_MUTATION" + + // VerbSTREAMMUTATIONDONE captures enum value "STREAM_MUTATION_DONE" + VerbSTREAMMUTATIONDONE Verb = "STREAM_MUTATION_DONE" + + // VerbCOMPLETEMESSAGE captures enum value "COMPLETE_MESSAGE" + VerbCOMPLETEMESSAGE Verb = "COMPLETE_MESSAGE" + + // VerbREPAIRCHECKSUMRANGE captures enum value "REPAIR_CHECKSUM_RANGE" + VerbREPAIRCHECKSUMRANGE Verb = "REPAIR_CHECKSUM_RANGE" + + // VerbGETSCHEMAVERSION captures enum value "GET_SCHEMA_VERSION" + VerbGETSCHEMAVERSION Verb = "GET_SCHEMA_VERSION" +) + +// for schema +var verbEnum []interface{} + +func init() { + var res []Verb + if err := json.Unmarshal([]byte(`["CLIENT_ID","MUTATION","MUTATION_DONE","READ_DATA","READ_MUTATION_DATA","READ_DIGEST","GOSSIP_ECHO","GOSSIP_DIGEST_SYN","GOSSIP_DIGEST_ACK2","GOSSIP_SHUTDOWN","DEFINITIONS_UPDATE","TRUNCATE","REPLICATION_FINISHED","MIGRATION_REQUEST","PREPARE_MESSAGE","PREPARE_DONE_MESSAGE","STREAM_MUTATION","STREAM_MUTATION_DONE","COMPLETE_MESSAGE","REPAIR_CHECKSUM_RANGE","GET_SCHEMA_VERSION"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + verbEnum = append(verbEnum, v) + } +} + +func (m Verb) validateVerbEnum(path, location string, value Verb) error { + if err := validate.EnumCase(path, location, value, verbEnum, true); err != nil { + return err + } + return nil +} + +// Validate validates this verb +func (m Verb) Validate(formats strfmt.Registry) error { + var res []error + + // value enum + if err := m.validateVerbEnum("", "body", m); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/verb_counter.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/verb_counter.go new file mode 100644 index 00000000000..7da0ed9318f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/verb_counter.go @@ -0,0 +1,74 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/errors" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// VerbCounter verb_counter +// +// # Holds verb counters +// +// swagger:model verb_counter +type VerbCounter struct { + + // count + Count interface{} `json:"count,omitempty"` + + // verb + Verb Verb `json:"verb,omitempty"` +} + +// Validate validates this verb counter +func (m *VerbCounter) Validate(formats strfmt.Registry) error { + var res []error + + if err := m.validateVerb(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (m *VerbCounter) validateVerb(formats strfmt.Registry) error { + + if swag.IsZero(m.Verb) { // not required + return nil + } + + if err := m.Verb.Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("verb") + } + return err + } + + return nil +} + +// MarshalBinary interface implementation +func (m *VerbCounter) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VerbCounter) UnmarshalBinary(b []byte) error { + var res VerbCounter + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/version_value.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/version_value.go new file mode 100644 index 00000000000..4f54d28e0ba --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models/version_value.go @@ -0,0 +1,51 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// VersionValue version_value +// +// # Holds a version value for an application state +// +// swagger:model version_value +type VersionValue struct { + + // The application state enum index + ApplicationState int32 `json:"application_state,omitempty"` + + // The version value + Value string `json:"value,omitempty"` + + // The application state version + Version int32 `json:"version,omitempty"` +} + +// Validate validates this version value +func (m *VersionValue) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *VersionValue) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *VersionValue) UnmarshalBinary(b []byte) error { + var res VersionValue + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/config_client.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/config_client.go new file mode 100644 index 00000000000..213ef58ed6d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/config_client.go @@ -0,0 +1,6687 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// New creates a new config API client. +func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientService { + return &Client{transport: transport, formats: formats} +} + +/* +Client for config API +*/ +type Client struct { + transport runtime.ClientTransport + formats strfmt.Registry +} + +// ClientService is the interface for Client methods +type ClientService interface { + FindConfigAbortOnLsaBadAlloc(params *FindConfigAbortOnLsaBadAllocParams) (*FindConfigAbortOnLsaBadAllocOK, error) + + FindConfigAlternatorAddress(params *FindConfigAlternatorAddressParams) (*FindConfigAlternatorAddressOK, error) + + FindConfigAlternatorEnforceAuthorization(params *FindConfigAlternatorEnforceAuthorizationParams) (*FindConfigAlternatorEnforceAuthorizationOK, error) + + FindConfigAlternatorHTTPSPort(params *FindConfigAlternatorHTTPSPortParams) (*FindConfigAlternatorHTTPSPortOK, error) + + FindConfigAlternatorPort(params *FindConfigAlternatorPortParams) (*FindConfigAlternatorPortOK, error) + + FindConfigAPIAddress(params *FindConfigAPIAddressParams) (*FindConfigAPIAddressOK, error) + + FindConfigAPIDocDir(params *FindConfigAPIDocDirParams) (*FindConfigAPIDocDirOK, error) + + FindConfigAPIPort(params *FindConfigAPIPortParams) (*FindConfigAPIPortOK, error) + + FindConfigAPIUIDir(params *FindConfigAPIUIDirParams) (*FindConfigAPIUIDirOK, error) + + FindConfigAuthenticator(params *FindConfigAuthenticatorParams) (*FindConfigAuthenticatorOK, error) + + FindConfigAuthorizer(params *FindConfigAuthorizerParams) (*FindConfigAuthorizerOK, error) + + FindConfigAutoAdjustFlushQuota(params *FindConfigAutoAdjustFlushQuotaParams) (*FindConfigAutoAdjustFlushQuotaOK, error) + + FindConfigAutoBootstrap(params *FindConfigAutoBootstrapParams) (*FindConfigAutoBootstrapOK, error) + + FindConfigAutoSnapshot(params *FindConfigAutoSnapshotParams) (*FindConfigAutoSnapshotOK, error) + + FindConfigBackgroundWriterSchedulingQuota(params *FindConfigBackgroundWriterSchedulingQuotaParams) (*FindConfigBackgroundWriterSchedulingQuotaOK, error) + + FindConfigBatchSizeFailThresholdInKb(params *FindConfigBatchSizeFailThresholdInKbParams) (*FindConfigBatchSizeFailThresholdInKbOK, error) + + FindConfigBatchSizeWarnThresholdInKb(params *FindConfigBatchSizeWarnThresholdInKbParams) (*FindConfigBatchSizeWarnThresholdInKbOK, error) + + FindConfigBatchlogReplayThrottleInKb(params *FindConfigBatchlogReplayThrottleInKbParams) (*FindConfigBatchlogReplayThrottleInKbOK, error) + + FindConfigBroadcastAddress(params *FindConfigBroadcastAddressParams) (*FindConfigBroadcastAddressOK, error) + + FindConfigBroadcastRPCAddress(params *FindConfigBroadcastRPCAddressParams) (*FindConfigBroadcastRPCAddressOK, error) + + FindConfigCacheHitRateReadBalancing(params *FindConfigCacheHitRateReadBalancingParams) (*FindConfigCacheHitRateReadBalancingOK, error) + + FindConfigCasContentionTimeoutInMs(params *FindConfigCasContentionTimeoutInMsParams) (*FindConfigCasContentionTimeoutInMsOK, error) + + FindConfigClientEncryptionOptions(params *FindConfigClientEncryptionOptionsParams) (*FindConfigClientEncryptionOptionsOK, error) + + FindConfigClusterName(params *FindConfigClusterNameParams) (*FindConfigClusterNameOK, error) + + FindConfigColumnIndexSizeInKb(params *FindConfigColumnIndexSizeInKbParams) (*FindConfigColumnIndexSizeInKbOK, error) + + FindConfigCommitFailurePolicy(params *FindConfigCommitFailurePolicyParams) (*FindConfigCommitFailurePolicyOK, error) + + FindConfigCommitlogDirectory(params *FindConfigCommitlogDirectoryParams) (*FindConfigCommitlogDirectoryOK, error) + + FindConfigCommitlogReuseSegments(params *FindConfigCommitlogReuseSegmentsParams) (*FindConfigCommitlogReuseSegmentsOK, error) + + FindConfigCommitlogSegmentSizeInMb(params *FindConfigCommitlogSegmentSizeInMbParams) (*FindConfigCommitlogSegmentSizeInMbOK, error) + + FindConfigCommitlogSync(params *FindConfigCommitlogSyncParams) (*FindConfigCommitlogSyncOK, error) + + FindConfigCommitlogSyncBatchWindowInMs(params *FindConfigCommitlogSyncBatchWindowInMsParams) (*FindConfigCommitlogSyncBatchWindowInMsOK, error) + + FindConfigCommitlogSyncPeriodInMs(params *FindConfigCommitlogSyncPeriodInMsParams) (*FindConfigCommitlogSyncPeriodInMsOK, error) + + FindConfigCommitlogTotalSpaceInMb(params *FindConfigCommitlogTotalSpaceInMbParams) (*FindConfigCommitlogTotalSpaceInMbOK, error) + + FindConfigCommitlogUseoDsync(params *FindConfigCommitlogUseoDsyncParams) (*FindConfigCommitlogUseODsyncOK, error) + + FindConfigCompactionEnforceMinThreshold(params *FindConfigCompactionEnforceMinThresholdParams) (*FindConfigCompactionEnforceMinThresholdOK, error) + + FindConfigCompactionLargeCellWarningThresholdMb(params *FindConfigCompactionLargeCellWarningThresholdMbParams) (*FindConfigCompactionLargeCellWarningThresholdMbOK, error) + + FindConfigCompactionLargePartitionWarningThresholdMb(params *FindConfigCompactionLargePartitionWarningThresholdMbParams) (*FindConfigCompactionLargePartitionWarningThresholdMbOK, error) + + FindConfigCompactionLargeRowWarningThresholdMb(params *FindConfigCompactionLargeRowWarningThresholdMbParams) (*FindConfigCompactionLargeRowWarningThresholdMbOK, error) + + FindConfigCompactionPreheatKeyCache(params *FindConfigCompactionPreheatKeyCacheParams) (*FindConfigCompactionPreheatKeyCacheOK, error) + + FindConfigCompactionRowsCountWarningThreshold(params *FindConfigCompactionRowsCountWarningThresholdParams) (*FindConfigCompactionRowsCountWarningThresholdOK, error) + + FindConfigCompactionStaticShares(params *FindConfigCompactionStaticSharesParams) (*FindConfigCompactionStaticSharesOK, error) + + FindConfigCompactionThroughputMbPerSec(params *FindConfigCompactionThroughputMbPerSecParams) (*FindConfigCompactionThroughputMbPerSecOK, error) + + FindConfigConcurrentCompactors(params *FindConfigConcurrentCompactorsParams) (*FindConfigConcurrentCompactorsOK, error) + + FindConfigConcurrentCounterWrites(params *FindConfigConcurrentCounterWritesParams) (*FindConfigConcurrentCounterWritesOK, error) + + FindConfigConcurrentReads(params *FindConfigConcurrentReadsParams) (*FindConfigConcurrentReadsOK, error) + + FindConfigConcurrentWrites(params *FindConfigConcurrentWritesParams) (*FindConfigConcurrentWritesOK, error) + + FindConfigConsistentClusterManagement(params *FindConfigConsistentClusterManagementParams) (*FindConfigConsistentClusterManagementOK, error) + + FindConfigConsistentRangemovement(params *FindConfigConsistentRangemovementParams) (*FindConfigConsistentRangemovementOK, error) + + FindConfigCounterCacheKeysToSave(params *FindConfigCounterCacheKeysToSaveParams) (*FindConfigCounterCacheKeysToSaveOK, error) + + FindConfigCounterCacheSavePeriod(params *FindConfigCounterCacheSavePeriodParams) (*FindConfigCounterCacheSavePeriodOK, error) + + FindConfigCounterCacheSizeInMb(params *FindConfigCounterCacheSizeInMbParams) (*FindConfigCounterCacheSizeInMbOK, error) + + FindConfigCounterWriteRequestTimeoutInMs(params *FindConfigCounterWriteRequestTimeoutInMsParams) (*FindConfigCounterWriteRequestTimeoutInMsOK, error) + + FindConfigCPUScheduler(params *FindConfigCPUSchedulerParams) (*FindConfigCPUSchedulerOK, error) + + FindConfigCrossNodeTimeout(params *FindConfigCrossNodeTimeoutParams) (*FindConfigCrossNodeTimeoutOK, error) + + FindConfigDataFileDirectories(params *FindConfigDataFileDirectoriesParams) (*FindConfigDataFileDirectoriesOK, error) + + FindConfigDefaultLogLevel(params *FindConfigDefaultLogLevelParams) (*FindConfigDefaultLogLevelOK, error) + + FindConfigDefragmentMemoryOnIdle(params *FindConfigDefragmentMemoryOnIdleParams) (*FindConfigDefragmentMemoryOnIdleOK, error) + + FindConfigDeveloperMode(params *FindConfigDeveloperModeParams) (*FindConfigDeveloperModeOK, error) + + FindConfigDiskFailurePolicy(params *FindConfigDiskFailurePolicyParams) (*FindConfigDiskFailurePolicyOK, error) + + FindConfigDynamicSnitchBadnessThreshold(params *FindConfigDynamicSnitchBadnessThresholdParams) (*FindConfigDynamicSnitchBadnessThresholdOK, error) + + FindConfigDynamicSnitchResetIntervalInMs(params *FindConfigDynamicSnitchResetIntervalInMsParams) (*FindConfigDynamicSnitchResetIntervalInMsOK, error) + + FindConfigDynamicSnitchUpdateIntervalInMs(params *FindConfigDynamicSnitchUpdateIntervalInMsParams) (*FindConfigDynamicSnitchUpdateIntervalInMsOK, error) + + FindConfigEnableCache(params *FindConfigEnableCacheParams) (*FindConfigEnableCacheOK, error) + + FindConfigEnableCommitlog(params *FindConfigEnableCommitlogParams) (*FindConfigEnableCommitlogOK, error) + + FindConfigEnableDangerousDirectImportOfCassandraCounters(params *FindConfigEnableDangerousDirectImportOfCassandraCountersParams) (*FindConfigEnableDangerousDirectImportOfCassandraCountersOK, error) + + FindConfigEnableDeprecatedPartitioners(params *FindConfigEnableDeprecatedPartitionersParams) (*FindConfigEnableDeprecatedPartitionersOK, error) + + FindConfigEnableInMemoryDataStore(params *FindConfigEnableInMemoryDataStoreParams) (*FindConfigEnableInMemoryDataStoreOK, error) + + FindConfigEnableKeyspaceColumnFamilyMetrics(params *FindConfigEnableKeyspaceColumnFamilyMetricsParams) (*FindConfigEnableKeyspaceColumnFamilyMetricsOK, error) + + FindConfigEnableShardAwareDrivers(params *FindConfigEnableShardAwareDriversParams) (*FindConfigEnableShardAwareDriversOK, error) + + FindConfigEnableSstableDataIntegrityCheck(params *FindConfigEnableSstableDataIntegrityCheckParams) (*FindConfigEnableSstableDataIntegrityCheckOK, error) + + FindConfigEnableSstablesMcFormat(params *FindConfigEnableSstablesMcFormatParams) (*FindConfigEnableSstablesMcFormatOK, error) + + FindConfigEndpointSnitch(params *FindConfigEndpointSnitchParams) (*FindConfigEndpointSnitchOK, error) + + FindConfigExperimental(params *FindConfigExperimentalParams) (*FindConfigExperimentalOK, error) + + FindConfigFdInitialValueMs(params *FindConfigFdInitialValueMsParams) (*FindConfigFdInitialValueMsOK, error) + + FindConfigFdMaxIntervalMs(params *FindConfigFdMaxIntervalMsParams) (*FindConfigFdMaxIntervalMsOK, error) + + FindConfigFileCacheSizeInMb(params *FindConfigFileCacheSizeInMbParams) (*FindConfigFileCacheSizeInMbOK, error) + + FindConfigHintedHandoffEnabled(params *FindConfigHintedHandoffEnabledParams) (*FindConfigHintedHandoffEnabledOK, error) + + FindConfigHintedHandoffThrottleInKb(params *FindConfigHintedHandoffThrottleInKbParams) (*FindConfigHintedHandoffThrottleInKbOK, error) + + FindConfigHintsDirectory(params *FindConfigHintsDirectoryParams) (*FindConfigHintsDirectoryOK, error) + + FindConfigInMemoryCompactionLimitInMb(params *FindConfigInMemoryCompactionLimitInMbParams) (*FindConfigInMemoryCompactionLimitInMbOK, error) + + FindConfigIncrementalBackups(params *FindConfigIncrementalBackupsParams) (*FindConfigIncrementalBackupsOK, error) + + FindConfigIndexSummaryCapacityInMb(params *FindConfigIndexSummaryCapacityInMbParams) (*FindConfigIndexSummaryCapacityInMbOK, error) + + FindConfigIndexSummaryResizeIntervalInMinutes(params *FindConfigIndexSummaryResizeIntervalInMinutesParams) (*FindConfigIndexSummaryResizeIntervalInMinutesOK, error) + + FindConfigInitialToken(params *FindConfigInitialTokenParams) (*FindConfigInitialTokenOK, error) + + FindConfigInterDcStreamThroughputOutboundMegabitsPerSec(params *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams) (*FindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK, error) + + FindConfigInterDcTCPNodelay(params *FindConfigInterDcTCPNodelayParams) (*FindConfigInterDcTCPNodelayOK, error) + + FindConfigInternodeAuthenticator(params *FindConfigInternodeAuthenticatorParams) (*FindConfigInternodeAuthenticatorOK, error) + + FindConfigInternodeCompression(params *FindConfigInternodeCompressionParams) (*FindConfigInternodeCompressionOK, error) + + FindConfigInternodeRecvBuffSizeInBytes(params *FindConfigInternodeRecvBuffSizeInBytesParams) (*FindConfigInternodeRecvBuffSizeInBytesOK, error) + + FindConfigInternodeSendBuffSizeInBytes(params *FindConfigInternodeSendBuffSizeInBytesParams) (*FindConfigInternodeSendBuffSizeInBytesOK, error) + + FindConfigJoinRing(params *FindConfigJoinRingParams) (*FindConfigJoinRingOK, error) + + FindConfigKeyCacheKeysToSave(params *FindConfigKeyCacheKeysToSaveParams) (*FindConfigKeyCacheKeysToSaveOK, error) + + FindConfigKeyCacheSavePeriod(params *FindConfigKeyCacheSavePeriodParams) (*FindConfigKeyCacheSavePeriodOK, error) + + FindConfigKeyCacheSizeInMb(params *FindConfigKeyCacheSizeInMbParams) (*FindConfigKeyCacheSizeInMbOK, error) + + FindConfigLargeMemoryAllocationWarningThreshold(params *FindConfigLargeMemoryAllocationWarningThresholdParams) (*FindConfigLargeMemoryAllocationWarningThresholdOK, error) + + FindConfigListenAddress(params *FindConfigListenAddressParams) (*FindConfigListenAddressOK, error) + + FindConfigListenInterface(params *FindConfigListenInterfaceParams) (*FindConfigListenInterfaceOK, error) + + FindConfigListenOnBroadcastAddress(params *FindConfigListenOnBroadcastAddressParams) (*FindConfigListenOnBroadcastAddressOK, error) + + FindConfigLoadBalance(params *FindConfigLoadBalanceParams) (*FindConfigLoadBalanceOK, error) + + FindConfigLoadRingState(params *FindConfigLoadRingStateParams) (*FindConfigLoadRingStateOK, error) + + FindConfigLogToStdout(params *FindConfigLogToStdoutParams) (*FindConfigLogToStdoutOK, error) + + FindConfigLogToSyslog(params *FindConfigLogToSyslogParams) (*FindConfigLogToSyslogOK, error) + + FindConfigLoggerLogLevel(params *FindConfigLoggerLogLevelParams) (*FindConfigLoggerLogLevelOK, error) + + FindConfigLsaReclamationStep(params *FindConfigLsaReclamationStepParams) (*FindConfigLsaReclamationStepOK, error) + + FindConfigMaxHintWindowInMs(params *FindConfigMaxHintWindowInMsParams) (*FindConfigMaxHintWindowInMsOK, error) + + FindConfigMaxHintsDeliveryThreads(params *FindConfigMaxHintsDeliveryThreadsParams) (*FindConfigMaxHintsDeliveryThreadsOK, error) + + FindConfigMemoryAllocator(params *FindConfigMemoryAllocatorParams) (*FindConfigMemoryAllocatorOK, error) + + FindConfigMemtableAllocationType(params *FindConfigMemtableAllocationTypeParams) (*FindConfigMemtableAllocationTypeOK, error) + + FindConfigMemtableCleanupThreshold(params *FindConfigMemtableCleanupThresholdParams) (*FindConfigMemtableCleanupThresholdOK, error) + + FindConfigMemtableFlushQueueSize(params *FindConfigMemtableFlushQueueSizeParams) (*FindConfigMemtableFlushQueueSizeOK, error) + + FindConfigMemtableFlushStaticShares(params *FindConfigMemtableFlushStaticSharesParams) (*FindConfigMemtableFlushStaticSharesOK, error) + + FindConfigMemtableFlushWriters(params *FindConfigMemtableFlushWritersParams) (*FindConfigMemtableFlushWritersOK, error) + + FindConfigMemtableHeapSpaceInMb(params *FindConfigMemtableHeapSpaceInMbParams) (*FindConfigMemtableHeapSpaceInMbOK, error) + + FindConfigMemtableOffheapSpaceInMb(params *FindConfigMemtableOffheapSpaceInMbParams) (*FindConfigMemtableOffheapSpaceInMbOK, error) + + FindConfigMemtableTotalSpaceInMb(params *FindConfigMemtableTotalSpaceInMbParams) (*FindConfigMemtableTotalSpaceInMbOK, error) + + FindConfigMurmur3PartitionerIgnoreMsbBits(params *FindConfigMurmur3PartitionerIgnoreMsbBitsParams) (*FindConfigMurmur3PartitionerIgnoreMsbBitsOK, error) + + FindConfigNativeTransportMaxFrameSizeInMb(params *FindConfigNativeTransportMaxFrameSizeInMbParams) (*FindConfigNativeTransportMaxFrameSizeInMbOK, error) + + FindConfigNativeTransportMaxThreads(params *FindConfigNativeTransportMaxThreadsParams) (*FindConfigNativeTransportMaxThreadsOK, error) + + FindConfigNativeTransportPort(params *FindConfigNativeTransportPortParams) (*FindConfigNativeTransportPortOK, error) + + FindConfigNativeTransportPortSsl(params *FindConfigNativeTransportPortSslParams) (*FindConfigNativeTransportPortSslOK, error) + + FindConfigNumTokens(params *FindConfigNumTokensParams) (*FindConfigNumTokensOK, error) + + FindConfigOverrideDecommission(params *FindConfigOverrideDecommissionParams) (*FindConfigOverrideDecommissionOK, error) + + FindConfigPartitioner(params *FindConfigPartitionerParams) (*FindConfigPartitionerOK, error) + + FindConfigPermissionsCacheMaxEntries(params *FindConfigPermissionsCacheMaxEntriesParams) (*FindConfigPermissionsCacheMaxEntriesOK, error) + + FindConfigPermissionsUpdateIntervalInMs(params *FindConfigPermissionsUpdateIntervalInMsParams) (*FindConfigPermissionsUpdateIntervalInMsOK, error) + + FindConfigPermissionsValidityInMs(params *FindConfigPermissionsValidityInMsParams) (*FindConfigPermissionsValidityInMsOK, error) + + FindConfigPhiConvictThreshold(params *FindConfigPhiConvictThresholdParams) (*FindConfigPhiConvictThresholdOK, error) + + FindConfigPreheatKernelPageCache(params *FindConfigPreheatKernelPageCacheParams) (*FindConfigPreheatKernelPageCacheOK, error) + + FindConfigPrometheusAddress(params *FindConfigPrometheusAddressParams) (*FindConfigPrometheusAddressOK, error) + + FindConfigPrometheusPort(params *FindConfigPrometheusPortParams) (*FindConfigPrometheusPortOK, error) + + FindConfigPrometheusPrefix(params *FindConfigPrometheusPrefixParams) (*FindConfigPrometheusPrefixOK, error) + + FindConfigRangeRequestTimeoutInMs(params *FindConfigRangeRequestTimeoutInMsParams) (*FindConfigRangeRequestTimeoutInMsOK, error) + + FindConfigReadRequestTimeoutInMs(params *FindConfigReadRequestTimeoutInMsParams) (*FindConfigReadRequestTimeoutInMsOK, error) + + FindConfigReduceCacheCapacityTo(params *FindConfigReduceCacheCapacityToParams) (*FindConfigReduceCacheCapacityToOK, error) + + FindConfigReduceCacheSizesAt(params *FindConfigReduceCacheSizesAtParams) (*FindConfigReduceCacheSizesAtOK, error) + + FindConfigReplaceAddress(params *FindConfigReplaceAddressParams) (*FindConfigReplaceAddressOK, error) + + FindConfigReplaceAddressFirstBoot(params *FindConfigReplaceAddressFirstBootParams) (*FindConfigReplaceAddressFirstBootOK, error) + + FindConfigReplaceNode(params *FindConfigReplaceNodeParams) (*FindConfigReplaceNodeOK, error) + + FindConfigReplaceToken(params *FindConfigReplaceTokenParams) (*FindConfigReplaceTokenOK, error) + + FindConfigRequestScheduler(params *FindConfigRequestSchedulerParams) (*FindConfigRequestSchedulerOK, error) + + FindConfigRequestSchedulerID(params *FindConfigRequestSchedulerIDParams) (*FindConfigRequestSchedulerIDOK, error) + + FindConfigRequestSchedulerOptions(params *FindConfigRequestSchedulerOptionsParams) (*FindConfigRequestSchedulerOptionsOK, error) + + FindConfigRequestTimeoutInMs(params *FindConfigRequestTimeoutInMsParams) (*FindConfigRequestTimeoutInMsOK, error) + + FindConfigRingDelayMs(params *FindConfigRingDelayMsParams) (*FindConfigRingDelayMsOK, error) + + FindConfigRoleManager(params *FindConfigRoleManagerParams) (*FindConfigRoleManagerOK, error) + + FindConfigRowCacheKeysToSave(params *FindConfigRowCacheKeysToSaveParams) (*FindConfigRowCacheKeysToSaveOK, error) + + FindConfigRowCacheSavePeriod(params *FindConfigRowCacheSavePeriodParams) (*FindConfigRowCacheSavePeriodOK, error) + + FindConfigRowCacheSizeInMb(params *FindConfigRowCacheSizeInMbParams) (*FindConfigRowCacheSizeInMbOK, error) + + FindConfigRPCAddress(params *FindConfigRPCAddressParams) (*FindConfigRPCAddressOK, error) + + FindConfigRPCInterface(params *FindConfigRPCInterfaceParams) (*FindConfigRPCInterfaceOK, error) + + FindConfigRPCKeepalive(params *FindConfigRPCKeepaliveParams) (*FindConfigRPCKeepaliveOK, error) + + FindConfigRPCMaxThreads(params *FindConfigRPCMaxThreadsParams) (*FindConfigRPCMaxThreadsOK, error) + + FindConfigRPCMinThreads(params *FindConfigRPCMinThreadsParams) (*FindConfigRPCMinThreadsOK, error) + + FindConfigRPCPort(params *FindConfigRPCPortParams) (*FindConfigRPCPortOK, error) + + FindConfigRPCRecvBuffSizeInBytes(params *FindConfigRPCRecvBuffSizeInBytesParams) (*FindConfigRPCRecvBuffSizeInBytesOK, error) + + FindConfigRPCSendBuffSizeInBytes(params *FindConfigRPCSendBuffSizeInBytesParams) (*FindConfigRPCSendBuffSizeInBytesOK, error) + + FindConfigRPCServerType(params *FindConfigRPCServerTypeParams) (*FindConfigRPCServerTypeOK, error) + + FindConfigSavedCachesDirectory(params *FindConfigSavedCachesDirectoryParams) (*FindConfigSavedCachesDirectoryOK, error) + + FindConfigSeedProvider(params *FindConfigSeedProviderParams) (*FindConfigSeedProviderOK, error) + + FindConfigServerEncryptionOptions(params *FindConfigServerEncryptionOptionsParams) (*FindConfigServerEncryptionOptionsOK, error) + + FindConfigShadowRoundMs(params *FindConfigShadowRoundMsParams) (*FindConfigShadowRoundMsOK, error) + + FindConfigShutdownAnnounceInMs(params *FindConfigShutdownAnnounceInMsParams) (*FindConfigShutdownAnnounceInMsOK, error) + + FindConfigSkipWaitForGossipToSettle(params *FindConfigSkipWaitForGossipToSettleParams) (*FindConfigSkipWaitForGossipToSettleOK, error) + + FindConfigSnapshotBeforeCompaction(params *FindConfigSnapshotBeforeCompactionParams) (*FindConfigSnapshotBeforeCompactionOK, error) + + FindConfigSslStoragePort(params *FindConfigSslStoragePortParams) (*FindConfigSslStoragePortOK, error) + + FindConfigSstablePreemptiveOpenIntervalInMb(params *FindConfigSstablePreemptiveOpenIntervalInMbParams) (*FindConfigSstablePreemptiveOpenIntervalInMbOK, error) + + FindConfigSstableSummaryRatio(params *FindConfigSstableSummaryRatioParams) (*FindConfigSstableSummaryRatioOK, error) + + FindConfigStartNativeTransport(params *FindConfigStartNativeTransportParams) (*FindConfigStartNativeTransportOK, error) + + FindConfigStartRPC(params *FindConfigStartRPCParams) (*FindConfigStartRPCOK, error) + + FindConfigStoragePort(params *FindConfigStoragePortParams) (*FindConfigStoragePortOK, error) + + FindConfigStreamThroughputOutboundMegabitsPerSec(params *FindConfigStreamThroughputOutboundMegabitsPerSecParams) (*FindConfigStreamThroughputOutboundMegabitsPerSecOK, error) + + FindConfigStreamingSocketTimeoutInMs(params *FindConfigStreamingSocketTimeoutInMsParams) (*FindConfigStreamingSocketTimeoutInMsOK, error) + + FindConfigThriftFramedTransportSizeInMb(params *FindConfigThriftFramedTransportSizeInMbParams) (*FindConfigThriftFramedTransportSizeInMbOK, error) + + FindConfigThriftMaxMessageLengthInMb(params *FindConfigThriftMaxMessageLengthInMbParams) (*FindConfigThriftMaxMessageLengthInMbOK, error) + + FindConfigTombstoneFailureThreshold(params *FindConfigTombstoneFailureThresholdParams) (*FindConfigTombstoneFailureThresholdOK, error) + + FindConfigTombstoneWarnThreshold(params *FindConfigTombstoneWarnThresholdParams) (*FindConfigTombstoneWarnThresholdOK, error) + + FindConfigTrickleFsync(params *FindConfigTrickleFsyncParams) (*FindConfigTrickleFsyncOK, error) + + FindConfigTrickleFsyncIntervalInKb(params *FindConfigTrickleFsyncIntervalInKbParams) (*FindConfigTrickleFsyncIntervalInKbOK, error) + + FindConfigTruncateRequestTimeoutInMs(params *FindConfigTruncateRequestTimeoutInMsParams) (*FindConfigTruncateRequestTimeoutInMsOK, error) + + FindConfigUUIDSstableIdentifiersEnabled(params *FindConfigUUIDSstableIdentifiersEnabledParams) (*FindConfigUUIDSstableIdentifiersEnabledOK, error) + + FindConfigViewBuilding(params *FindConfigViewBuildingParams) (*FindConfigViewBuildingOK, error) + + FindConfigViewHintsDirectory(params *FindConfigViewHintsDirectoryParams) (*FindConfigViewHintsDirectoryOK, error) + + FindConfigVirtualDirtySoftLimit(params *FindConfigVirtualDirtySoftLimitParams) (*FindConfigVirtualDirtySoftLimitOK, error) + + FindConfigVolatileSystemKeyspaceForTesting(params *FindConfigVolatileSystemKeyspaceForTestingParams) (*FindConfigVolatileSystemKeyspaceForTestingOK, error) + + FindConfigWriteRequestTimeoutInMs(params *FindConfigWriteRequestTimeoutInMsParams) (*FindConfigWriteRequestTimeoutInMsOK, error) + + SetTransport(transport runtime.ClientTransport) +} + +/* +FindConfigAbortOnLsaBadAlloc Abort when allocation in LSA region fails +*/ +func (a *Client) FindConfigAbortOnLsaBadAlloc(params *FindConfigAbortOnLsaBadAllocParams) (*FindConfigAbortOnLsaBadAllocOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAbortOnLsaBadAllocParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_abort_on_lsa_bad_alloc", + Method: "GET", + PathPattern: "/config/abort_on_lsa_bad_alloc", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAbortOnLsaBadAllocReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAbortOnLsaBadAllocOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAbortOnLsaBadAllocDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigAlternatorAddress The listen address for Alternator client connections +*/ +func (a *Client) FindConfigAlternatorAddress(params *FindConfigAlternatorAddressParams) (*FindConfigAlternatorAddressOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAlternatorAddressParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_alternator_address", + Method: "GET", + PathPattern: "/config/alternator_address", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAlternatorAddressReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAlternatorAddressOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAlternatorAddressDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigAlternatorEnforceAuthorization Whether alternator requires authorization +*/ +func (a *Client) FindConfigAlternatorEnforceAuthorization(params *FindConfigAlternatorEnforceAuthorizationParams) (*FindConfigAlternatorEnforceAuthorizationOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAlternatorEnforceAuthorizationParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_alternator_enforce_authorization", + Method: "GET", + PathPattern: "/config/alternator_enforce_authorization", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAlternatorEnforceAuthorizationReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAlternatorEnforceAuthorizationOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAlternatorEnforceAuthorizationDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigAlternatorHTTPSPort The listen port for Alternator HTTPS client connections +*/ +func (a *Client) FindConfigAlternatorHTTPSPort(params *FindConfigAlternatorHTTPSPortParams) (*FindConfigAlternatorHTTPSPortOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAlternatorHTTPSPortParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_alternator_https_port", + Method: "GET", + PathPattern: "/config/alternator_https_port", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAlternatorHTTPSPortReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAlternatorHTTPSPortOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAlternatorHTTPSPortDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigAlternatorPort The listen port for Alternator client connections +*/ +func (a *Client) FindConfigAlternatorPort(params *FindConfigAlternatorPortParams) (*FindConfigAlternatorPortOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAlternatorPortParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_alternator_port", + Method: "GET", + PathPattern: "/config/alternator_port", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAlternatorPortReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAlternatorPortOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAlternatorPortDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigAPIAddress Http Rest API address +*/ +func (a *Client) FindConfigAPIAddress(params *FindConfigAPIAddressParams) (*FindConfigAPIAddressOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAPIAddressParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_api_address", + Method: "GET", + PathPattern: "/config/api_address", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAPIAddressReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAPIAddressOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAPIAddressDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigAPIDocDir The API definition file directory +*/ +func (a *Client) FindConfigAPIDocDir(params *FindConfigAPIDocDirParams) (*FindConfigAPIDocDirOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAPIDocDirParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_api_doc_dir", + Method: "GET", + PathPattern: "/config/api_doc_dir", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAPIDocDirReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAPIDocDirOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAPIDocDirDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigAPIPort Http Rest API port +*/ +func (a *Client) FindConfigAPIPort(params *FindConfigAPIPortParams) (*FindConfigAPIPortOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAPIPortParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_api_port", + Method: "GET", + PathPattern: "/config/api_port", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAPIPortReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAPIPortOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAPIPortDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigAPIUIDir The directory location of the API GUI +*/ +func (a *Client) FindConfigAPIUIDir(params *FindConfigAPIUIDirParams) (*FindConfigAPIUIDirOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAPIUIDirParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_api_ui_dir", + Method: "GET", + PathPattern: "/config/api_ui_dir", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAPIUIDirReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAPIUIDirOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAPIUIDirDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigAuthenticator The authentication backend, used to identify users. The available authenticators are: + + org.apache.cassandra.auth.AllowAllAuthenticator : Disables authentication; no checks are performed. + org.apache.cassandra.auth.PasswordAuthenticator : Authenticates users with user names and hashed passwords stored in the system_auth.credentials table. If you use the default, 1, and the node with the lone replica goes down, you will not be able to log into the cluster because the system_auth keyspace was not replicated. + +Related information: Internal authentication +*/ +func (a *Client) FindConfigAuthenticator(params *FindConfigAuthenticatorParams) (*FindConfigAuthenticatorOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAuthenticatorParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_authenticator", + Method: "GET", + PathPattern: "/config/authenticator", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAuthenticatorReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAuthenticatorOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAuthenticatorDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigAuthorizer The authorization backend. It implements IAuthenticator, which limits access and provides permissions. The available authorizers are: + + AllowAllAuthorizer : Disables authorization; allows any action to any user. + CassandraAuthorizer : Stores permissions in system_auth.permissions table. If you use the default, 1, and the node with the lone replica goes down, you will not be able to log into the cluster because the system_auth keyspace was not replicated. + +Related information: Object permissions +*/ +func (a *Client) FindConfigAuthorizer(params *FindConfigAuthorizerParams) (*FindConfigAuthorizerOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAuthorizerParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_authorizer", + Method: "GET", + PathPattern: "/config/authorizer", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAuthorizerReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAuthorizerOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAuthorizerDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigAutoAdjustFlushQuota true: auto-adjust memtable shares for flush processes +*/ +func (a *Client) FindConfigAutoAdjustFlushQuota(params *FindConfigAutoAdjustFlushQuotaParams) (*FindConfigAutoAdjustFlushQuotaOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAutoAdjustFlushQuotaParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_auto_adjust_flush_quota", + Method: "GET", + PathPattern: "/config/auto_adjust_flush_quota", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAutoAdjustFlushQuotaReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAutoAdjustFlushQuotaOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAutoAdjustFlushQuotaDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigAutoBootstrap This setting has been removed from default configuration. It makes new (non-seed) nodes automatically migrate the right data to themselves. Do not set this to false unless you really know what you are doing. + +Related information: Initializing a multiple node cluster (single data center) and Initializing a multiple node cluster (multiple data centers). +*/ +func (a *Client) FindConfigAutoBootstrap(params *FindConfigAutoBootstrapParams) (*FindConfigAutoBootstrapOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAutoBootstrapParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_auto_bootstrap", + Method: "GET", + PathPattern: "/config/auto_bootstrap", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAutoBootstrapReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAutoBootstrapOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAutoBootstrapDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigAutoSnapshot Enable or disable whether a snapshot is taken of the data before keyspace truncation or dropping of tables. To prevent data loss, using the default setting is strongly advised. If you set to false, you will lose data on truncation or drop. +*/ +func (a *Client) FindConfigAutoSnapshot(params *FindConfigAutoSnapshotParams) (*FindConfigAutoSnapshotOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigAutoSnapshotParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_auto_snapshot", + Method: "GET", + PathPattern: "/config/auto_snapshot", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigAutoSnapshotReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigAutoSnapshotOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigAutoSnapshotDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigBackgroundWriterSchedulingQuota max cpu usage ratio (between 0 and 1) for compaction process. Not intended for setting in normal operations. Setting it to 1 or higher will disable it, recommended operational setting is 0.5. +*/ +func (a *Client) FindConfigBackgroundWriterSchedulingQuota(params *FindConfigBackgroundWriterSchedulingQuotaParams) (*FindConfigBackgroundWriterSchedulingQuotaOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigBackgroundWriterSchedulingQuotaParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_background_writer_scheduling_quota", + Method: "GET", + PathPattern: "/config/background_writer_scheduling_quota", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigBackgroundWriterSchedulingQuotaReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigBackgroundWriterSchedulingQuotaOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigBackgroundWriterSchedulingQuotaDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigBatchSizeFailThresholdInKb Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. +*/ +func (a *Client) FindConfigBatchSizeFailThresholdInKb(params *FindConfigBatchSizeFailThresholdInKbParams) (*FindConfigBatchSizeFailThresholdInKbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigBatchSizeFailThresholdInKbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_batch_size_fail_threshold_in_kb", + Method: "GET", + PathPattern: "/config/batch_size_fail_threshold_in_kb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigBatchSizeFailThresholdInKbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigBatchSizeFailThresholdInKbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigBatchSizeFailThresholdInKbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigBatchSizeWarnThresholdInKb Log WARN on any batch size exceeding this value in kilobytes. Caution should be taken on increasing the size of this threshold as it can lead to node instability. +*/ +func (a *Client) FindConfigBatchSizeWarnThresholdInKb(params *FindConfigBatchSizeWarnThresholdInKbParams) (*FindConfigBatchSizeWarnThresholdInKbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigBatchSizeWarnThresholdInKbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_batch_size_warn_threshold_in_kb", + Method: "GET", + PathPattern: "/config/batch_size_warn_threshold_in_kb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigBatchSizeWarnThresholdInKbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigBatchSizeWarnThresholdInKbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigBatchSizeWarnThresholdInKbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigBatchlogReplayThrottleInKb Total maximum throttle. Throttling is reduced proportionally to the number of nodes in the cluster. +*/ +func (a *Client) FindConfigBatchlogReplayThrottleInKb(params *FindConfigBatchlogReplayThrottleInKbParams) (*FindConfigBatchlogReplayThrottleInKbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigBatchlogReplayThrottleInKbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_batchlog_replay_throttle_in_kb", + Method: "GET", + PathPattern: "/config/batchlog_replay_throttle_in_kb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigBatchlogReplayThrottleInKbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigBatchlogReplayThrottleInKbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigBatchlogReplayThrottleInKbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigBroadcastAddress The IP address a node tells other nodes in the cluster to contact it by. It allows public and private address to be different. For example, use the broadcast_address parameter in topologies where not all nodes have access to other nodes by their private IP addresses. + +If your Scylla cluster is deployed across multiple Amazon EC2 regions and you use the EC2MultiRegionSnitch , set the broadcast_address to public IP address of the node and the listen_address to the private IP. +*/ +func (a *Client) FindConfigBroadcastAddress(params *FindConfigBroadcastAddressParams) (*FindConfigBroadcastAddressOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigBroadcastAddressParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_broadcast_address", + Method: "GET", + PathPattern: "/config/broadcast_address", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigBroadcastAddressReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigBroadcastAddressOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigBroadcastAddressDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigBroadcastRPCAddress RPC address to broadcast to drivers and other Scylla nodes. This cannot be set to 0.0.0.0. If blank, it is set to the value of the rpc_address or rpc_interface. If rpc_address or rpc_interfaceis set to 0.0.0.0, this property must be set. +*/ +func (a *Client) FindConfigBroadcastRPCAddress(params *FindConfigBroadcastRPCAddressParams) (*FindConfigBroadcastRPCAddressOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigBroadcastRPCAddressParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_broadcast_rpc_address", + Method: "GET", + PathPattern: "/config/broadcast_rpc_address", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigBroadcastRPCAddressReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigBroadcastRPCAddressOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigBroadcastRPCAddressDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCacheHitRateReadBalancing This boolean controls whether the replicas for read query will be choosen based on cache hit ratio +*/ +func (a *Client) FindConfigCacheHitRateReadBalancing(params *FindConfigCacheHitRateReadBalancingParams) (*FindConfigCacheHitRateReadBalancingOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCacheHitRateReadBalancingParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_cache_hit_rate_read_balancing", + Method: "GET", + PathPattern: "/config/cache_hit_rate_read_balancing", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCacheHitRateReadBalancingReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCacheHitRateReadBalancingOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCacheHitRateReadBalancingDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCasContentionTimeoutInMs The time that the coordinator continues to retry a CAS (compare and set) operation that contends with other proposals for the same row. +*/ +func (a *Client) FindConfigCasContentionTimeoutInMs(params *FindConfigCasContentionTimeoutInMsParams) (*FindConfigCasContentionTimeoutInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCasContentionTimeoutInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_cas_contention_timeout_in_ms", + Method: "GET", + PathPattern: "/config/cas_contention_timeout_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCasContentionTimeoutInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCasContentionTimeoutInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCasContentionTimeoutInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigClientEncryptionOptions Enable or disable client-to-node encryption. You must also generate keys and provide the appropriate key and certificate. No custom encryption options are currently enabled. The available options are: + + enabled : (Default: false ) To enable, set to true. + certificate: (Default: conf/scylla.crt) The location of a PEM-encoded x509 certificate used to identify and encrypt the client/server communication. + keyfile: (Default: conf/scylla.key) PEM Key file associated with certificate. + +truststore : (Default: ) Location of the truststore containing the trusted certificate for authenticating remote servers. + +The advanced settings are: + + priority_string : GnuTLS priority string controlling TLS algorithms used/allowed. + require_client_auth : (Default: false ) Enables or disables certificate authentication. + +Related information: Client-to-node encryption +*/ +func (a *Client) FindConfigClientEncryptionOptions(params *FindConfigClientEncryptionOptionsParams) (*FindConfigClientEncryptionOptionsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigClientEncryptionOptionsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_client_encryption_options", + Method: "GET", + PathPattern: "/config/client_encryption_options", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigClientEncryptionOptionsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigClientEncryptionOptionsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigClientEncryptionOptionsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigClusterName The name of the cluster; used to prevent machines in one logical cluster from joining another. All nodes participating in a cluster must have the same value. +*/ +func (a *Client) FindConfigClusterName(params *FindConfigClusterNameParams) (*FindConfigClusterNameOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigClusterNameParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_cluster_name", + Method: "GET", + PathPattern: "/config/cluster_name", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigClusterNameReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigClusterNameOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigClusterNameDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigColumnIndexSizeInKb Granularity of the index of rows within a partition. For huge rows, decrease this setting to improve seek time. If you use key cache, be careful not to make this setting too large because key cache will be overwhelmed. If you're unsure of the size of the rows, it's best to use the default setting. +*/ +func (a *Client) FindConfigColumnIndexSizeInKb(params *FindConfigColumnIndexSizeInKbParams) (*FindConfigColumnIndexSizeInKbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigColumnIndexSizeInKbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_column_index_size_in_kb", + Method: "GET", + PathPattern: "/config/column_index_size_in_kb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigColumnIndexSizeInKbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigColumnIndexSizeInKbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigColumnIndexSizeInKbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigCommitFailurePolicy Policy for commit disk failures: + +die Shut down gossip and Thrift and kill the JVM, so the node can be replaced. +stop Shut down gossip and Thrift, leaving the node effectively dead, but can be inspected using JMX. +stop_commit Shut down the commit log, letting writes collect but continuing to service reads (as in pre-2.0.5 Cassandra). +ignore Ignore fatal errors and let the batches fail. +*/ +func (a *Client) FindConfigCommitFailurePolicy(params *FindConfigCommitFailurePolicyParams) (*FindConfigCommitFailurePolicyOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCommitFailurePolicyParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_commit_failure_policy", + Method: "GET", + PathPattern: "/config/commit_failure_policy", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCommitFailurePolicyReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCommitFailurePolicyOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCommitFailurePolicyDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCommitlogDirectory The directory where the commit log is stored. For optimal write performance, it is recommended the commit log be on a separate disk partition (ideally, a separate physical device) from the data file directories. +*/ +func (a *Client) FindConfigCommitlogDirectory(params *FindConfigCommitlogDirectoryParams) (*FindConfigCommitlogDirectoryOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCommitlogDirectoryParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_commitlog_directory", + Method: "GET", + PathPattern: "/config/commitlog_directory", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCommitlogDirectoryReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCommitlogDirectoryOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCommitlogDirectoryDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCommitlogReuseSegments Whether or not to re-use commitlog segments when finished instead of deleting them. Can improve commitlog latency on some file systems. +*/ +func (a *Client) FindConfigCommitlogReuseSegments(params *FindConfigCommitlogReuseSegmentsParams) (*FindConfigCommitlogReuseSegmentsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCommitlogReuseSegmentsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_commitlog_reuse_segments", + Method: "GET", + PathPattern: "/config/commitlog_reuse_segments", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCommitlogReuseSegmentsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCommitlogReuseSegmentsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCommitlogReuseSegmentsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigCommitlogSegmentSizeInMb Sets the size of the individual commitlog file segments. A commitlog segment may be archived, deleted, or recycled after all its data has been flushed to SSTables. This amount of data can potentially include commitlog segments from every table in the system. The default size is usually suitable for most commitlog archiving, but if you want a finer granularity, 8 or 16 MB is reasonable. See Commit log archive configuration. + +Related information: Commit log archive configuration +*/ +func (a *Client) FindConfigCommitlogSegmentSizeInMb(params *FindConfigCommitlogSegmentSizeInMbParams) (*FindConfigCommitlogSegmentSizeInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCommitlogSegmentSizeInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_commitlog_segment_size_in_mb", + Method: "GET", + PathPattern: "/config/commitlog_segment_size_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCommitlogSegmentSizeInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCommitlogSegmentSizeInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCommitlogSegmentSizeInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigCommitlogSync The method that Scylla uses to acknowledge writes in milliseconds: + + periodic : Used with commitlog_sync_period_in_ms (Default: 10000 - 10 seconds ) to control how often the commit log is synchronized to disk. Periodic syncs are acknowledged immediately. + batch : Used with commitlog_sync_batch_window_in_ms (Default: disabled **) to control how long Scylla waits for other writes before performing a sync. When using this method, writes are not acknowledged until fsynced to disk. + +Related information: Durability +*/ +func (a *Client) FindConfigCommitlogSync(params *FindConfigCommitlogSyncParams) (*FindConfigCommitlogSyncOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCommitlogSyncParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_commitlog_sync", + Method: "GET", + PathPattern: "/config/commitlog_sync", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCommitlogSyncReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCommitlogSyncOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCommitlogSyncDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCommitlogSyncBatchWindowInMs Controls how long the system waits for other writes before performing a sync in ”batch” mode. +*/ +func (a *Client) FindConfigCommitlogSyncBatchWindowInMs(params *FindConfigCommitlogSyncBatchWindowInMsParams) (*FindConfigCommitlogSyncBatchWindowInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCommitlogSyncBatchWindowInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_commitlog_sync_batch_window_in_ms", + Method: "GET", + PathPattern: "/config/commitlog_sync_batch_window_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCommitlogSyncBatchWindowInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCommitlogSyncBatchWindowInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCommitlogSyncBatchWindowInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCommitlogSyncPeriodInMs Controls how long the system waits for other writes before performing a sync in ”periodic” mode. +*/ +func (a *Client) FindConfigCommitlogSyncPeriodInMs(params *FindConfigCommitlogSyncPeriodInMsParams) (*FindConfigCommitlogSyncPeriodInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCommitlogSyncPeriodInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_commitlog_sync_period_in_ms", + Method: "GET", + PathPattern: "/config/commitlog_sync_period_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCommitlogSyncPeriodInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCommitlogSyncPeriodInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCommitlogSyncPeriodInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigCommitlogTotalSpaceInMb Total space used for commitlogs. If the used space goes above this value, Scylla rounds up to the next nearest segment multiple and flushes memtables to disk for the oldest commitlog segments, removing those log segments. This reduces the amount of data to replay on startup, and prevents infrequently-updated tables from indefinitely keeping commitlog segments. A small total commitlog space tends to cause more flush activity on less-active tables. + +Related information: Configuring memtable throughput +*/ +func (a *Client) FindConfigCommitlogTotalSpaceInMb(params *FindConfigCommitlogTotalSpaceInMbParams) (*FindConfigCommitlogTotalSpaceInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCommitlogTotalSpaceInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_commitlog_total_space_in_mb", + Method: "GET", + PathPattern: "/config/commitlog_total_space_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCommitlogTotalSpaceInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCommitlogTotalSpaceInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCommitlogTotalSpaceInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCommitlogUseoDsync Whether or not to use O_DSYNC mode for commitlog segments IO. Can improve commitlog latency on some file systems. +*/ +func (a *Client) FindConfigCommitlogUseoDsync(params *FindConfigCommitlogUseoDsyncParams) (*FindConfigCommitlogUseODsyncOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCommitlogUseoDsyncParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_commitlog_use_o_dsync", + Method: "GET", + PathPattern: "/config/commitlog_use_o_dsync", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCommitlogUseoDsyncReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCommitlogUseODsyncOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCommitlogUseoDsyncDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCompactionEnforceMinThreshold If set to true, enforce the min_threshold option for compactions strictly. If false (default), Scylla may decide to compact even if below min_threshold +*/ +func (a *Client) FindConfigCompactionEnforceMinThreshold(params *FindConfigCompactionEnforceMinThresholdParams) (*FindConfigCompactionEnforceMinThresholdOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCompactionEnforceMinThresholdParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_compaction_enforce_min_threshold", + Method: "GET", + PathPattern: "/config/compaction_enforce_min_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCompactionEnforceMinThresholdReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCompactionEnforceMinThresholdOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCompactionEnforceMinThresholdDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCompactionLargeCellWarningThresholdMb Log a warning when writing cells larger than this value +*/ +func (a *Client) FindConfigCompactionLargeCellWarningThresholdMb(params *FindConfigCompactionLargeCellWarningThresholdMbParams) (*FindConfigCompactionLargeCellWarningThresholdMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCompactionLargeCellWarningThresholdMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_compaction_large_cell_warning_threshold_mb", + Method: "GET", + PathPattern: "/config/compaction_large_cell_warning_threshold_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCompactionLargeCellWarningThresholdMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCompactionLargeCellWarningThresholdMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCompactionLargeCellWarningThresholdMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCompactionLargePartitionWarningThresholdMb Log a warning when writing partitions larger than this value +*/ +func (a *Client) FindConfigCompactionLargePartitionWarningThresholdMb(params *FindConfigCompactionLargePartitionWarningThresholdMbParams) (*FindConfigCompactionLargePartitionWarningThresholdMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCompactionLargePartitionWarningThresholdMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_compaction_large_partition_warning_threshold_mb", + Method: "GET", + PathPattern: "/config/compaction_large_partition_warning_threshold_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCompactionLargePartitionWarningThresholdMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCompactionLargePartitionWarningThresholdMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCompactionLargePartitionWarningThresholdMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCompactionLargeRowWarningThresholdMb Log a warning when writing rows larger than this value +*/ +func (a *Client) FindConfigCompactionLargeRowWarningThresholdMb(params *FindConfigCompactionLargeRowWarningThresholdMbParams) (*FindConfigCompactionLargeRowWarningThresholdMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCompactionLargeRowWarningThresholdMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_compaction_large_row_warning_threshold_mb", + Method: "GET", + PathPattern: "/config/compaction_large_row_warning_threshold_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCompactionLargeRowWarningThresholdMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCompactionLargeRowWarningThresholdMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCompactionLargeRowWarningThresholdMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCompactionPreheatKeyCache When set to true , cached row keys are tracked during compaction, and re-cached to their new positions in the compacted SSTable. If you have extremely large key caches for tables, set the value to false ; see Global row and key caches properties. +*/ +func (a *Client) FindConfigCompactionPreheatKeyCache(params *FindConfigCompactionPreheatKeyCacheParams) (*FindConfigCompactionPreheatKeyCacheOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCompactionPreheatKeyCacheParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_compaction_preheat_key_cache", + Method: "GET", + PathPattern: "/config/compaction_preheat_key_cache", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCompactionPreheatKeyCacheReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCompactionPreheatKeyCacheOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCompactionPreheatKeyCacheDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCompactionRowsCountWarningThreshold Log a warning when writing a number of rows larger than this value +*/ +func (a *Client) FindConfigCompactionRowsCountWarningThreshold(params *FindConfigCompactionRowsCountWarningThresholdParams) (*FindConfigCompactionRowsCountWarningThresholdOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCompactionRowsCountWarningThresholdParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_compaction_rows_count_warning_threshold", + Method: "GET", + PathPattern: "/config/compaction_rows_count_warning_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCompactionRowsCountWarningThresholdReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCompactionRowsCountWarningThresholdOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCompactionRowsCountWarningThresholdDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCompactionStaticShares If set to higher than 0, ignore the controller's output and set the compaction shares statically. Do not set this unless you know what you are doing and suspect a problem in the controller. This option will be retired when the controller reaches more maturity +*/ +func (a *Client) FindConfigCompactionStaticShares(params *FindConfigCompactionStaticSharesParams) (*FindConfigCompactionStaticSharesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCompactionStaticSharesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_compaction_static_shares", + Method: "GET", + PathPattern: "/config/compaction_static_shares", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCompactionStaticSharesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCompactionStaticSharesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCompactionStaticSharesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigCompactionThroughputMbPerSec Throttles compaction to the specified total throughput across the entire system. The faster you insert data, the faster you need to compact in order to keep the SSTable count down. The recommended Value is 16 to 32 times the rate of write throughput (in MBs/second). Setting the value to 0 disables compaction throttling. + +Related information: Configuring compaction +*/ +func (a *Client) FindConfigCompactionThroughputMbPerSec(params *FindConfigCompactionThroughputMbPerSecParams) (*FindConfigCompactionThroughputMbPerSecOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCompactionThroughputMbPerSecParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_compaction_throughput_mb_per_sec", + Method: "GET", + PathPattern: "/config/compaction_throughput_mb_per_sec", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCompactionThroughputMbPerSecReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCompactionThroughputMbPerSecOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCompactionThroughputMbPerSecDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigConcurrentCompactors Sets the number of concurrent compaction processes allowed to run simultaneously on a node, not including validation compactions for anti-entropy repair. Simultaneous compactions help preserve read performance in a mixed read-write workload by mitigating the tendency of small SSTables to accumulate during a single long-running compaction. If compactions run too slowly or too fast, change compaction_throughput_mb_per_sec first. +*/ +func (a *Client) FindConfigConcurrentCompactors(params *FindConfigConcurrentCompactorsParams) (*FindConfigConcurrentCompactorsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigConcurrentCompactorsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_concurrent_compactors", + Method: "GET", + PathPattern: "/config/concurrent_compactors", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigConcurrentCompactorsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigConcurrentCompactorsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigConcurrentCompactorsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigConcurrentCounterWrites Counter writes read the current values before incrementing and writing them back. The recommended value is (16 × number_of_drives) . +*/ +func (a *Client) FindConfigConcurrentCounterWrites(params *FindConfigConcurrentCounterWritesParams) (*FindConfigConcurrentCounterWritesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigConcurrentCounterWritesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_concurrent_counter_writes", + Method: "GET", + PathPattern: "/config/concurrent_counter_writes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigConcurrentCounterWritesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigConcurrentCounterWritesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigConcurrentCounterWritesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigConcurrentReads For workloads with more data than can fit in memory, the bottleneck is reads fetching data from disk. Setting to (16 × number_of_drives) allows operations to queue low enough in the stack so that the OS and drives can reorder them. +*/ +func (a *Client) FindConfigConcurrentReads(params *FindConfigConcurrentReadsParams) (*FindConfigConcurrentReadsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigConcurrentReadsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_concurrent_reads", + Method: "GET", + PathPattern: "/config/concurrent_reads", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigConcurrentReadsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigConcurrentReadsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigConcurrentReadsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigConcurrentWrites Writes in Cassandra are rarely I/O bound, so the ideal number of concurrent writes depends on the number of CPU cores in your system. The recommended value is (8 x number_of_cpu_cores). +*/ +func (a *Client) FindConfigConcurrentWrites(params *FindConfigConcurrentWritesParams) (*FindConfigConcurrentWritesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigConcurrentWritesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_concurrent_writes", + Method: "GET", + PathPattern: "/config/concurrent_writes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigConcurrentWritesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigConcurrentWritesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigConcurrentWritesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigConsistentClusterManagement Return true if node uses RAFT for cluster management and DDL. +*/ +func (a *Client) FindConfigConsistentClusterManagement(params *FindConfigConsistentClusterManagementParams) (*FindConfigConsistentClusterManagementOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigConsistentClusterManagementParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_consistent_cluster_management", + Method: "GET", + PathPattern: "/config/consistent_cluster_management", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigConsistentClusterManagementReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigConsistentClusterManagementOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigConsistentClusterManagementDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigConsistentRangemovement When set to true, range movements will be consistent. It means: 1) it will refuse to bootstrap a new node if other bootstrapping/leaving/moving nodes detected. 2) data will be streamed to a new node only from the node which is no longer responsible for the token range. Same as -Dcassandra.consistent.rangemovement in cassandra +*/ +func (a *Client) FindConfigConsistentRangemovement(params *FindConfigConsistentRangemovementParams) (*FindConfigConsistentRangemovementOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigConsistentRangemovementParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_consistent_rangemovement", + Method: "GET", + PathPattern: "/config/consistent_rangemovement", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigConsistentRangemovementReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigConsistentRangemovementOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigConsistentRangemovementDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCounterCacheKeysToSave Number of keys from the counter cache to save. When disabled all keys are saved. +*/ +func (a *Client) FindConfigCounterCacheKeysToSave(params *FindConfigCounterCacheKeysToSaveParams) (*FindConfigCounterCacheKeysToSaveOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCounterCacheKeysToSaveParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_counter_cache_keys_to_save", + Method: "GET", + PathPattern: "/config/counter_cache_keys_to_save", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCounterCacheKeysToSaveReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCounterCacheKeysToSaveOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCounterCacheKeysToSaveDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCounterCacheSavePeriod Duration after which Cassandra should save the counter cache (keys only). Caches are saved to saved_caches_directory. +*/ +func (a *Client) FindConfigCounterCacheSavePeriod(params *FindConfigCounterCacheSavePeriodParams) (*FindConfigCounterCacheSavePeriodOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCounterCacheSavePeriodParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_counter_cache_save_period", + Method: "GET", + PathPattern: "/config/counter_cache_save_period", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCounterCacheSavePeriodReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCounterCacheSavePeriodOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCounterCacheSavePeriodDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCounterCacheSizeInMb When no value is specified a minimum of 2.5% of Heap or 50MB. If you perform counter deletes and rely on low gc_grace_seconds, you should disable the counter cache. To disable, set to 0 +*/ +func (a *Client) FindConfigCounterCacheSizeInMb(params *FindConfigCounterCacheSizeInMbParams) (*FindConfigCounterCacheSizeInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCounterCacheSizeInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_counter_cache_size_in_mb", + Method: "GET", + PathPattern: "/config/counter_cache_size_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCounterCacheSizeInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCounterCacheSizeInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCounterCacheSizeInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCounterWriteRequestTimeoutInMs The time that the coordinator waits for counter writes to complete. +*/ +func (a *Client) FindConfigCounterWriteRequestTimeoutInMs(params *FindConfigCounterWriteRequestTimeoutInMsParams) (*FindConfigCounterWriteRequestTimeoutInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCounterWriteRequestTimeoutInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_counter_write_request_timeout_in_ms", + Method: "GET", + PathPattern: "/config/counter_write_request_timeout_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCounterWriteRequestTimeoutInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCounterWriteRequestTimeoutInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCounterWriteRequestTimeoutInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigCPUScheduler Enable cpu scheduling +*/ +func (a *Client) FindConfigCPUScheduler(params *FindConfigCPUSchedulerParams) (*FindConfigCPUSchedulerOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCPUSchedulerParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_cpu_scheduler", + Method: "GET", + PathPattern: "/config/cpu_scheduler", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCPUSchedulerReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCPUSchedulerOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCPUSchedulerDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigCrossNodeTimeout Enable or disable operation timeout information exchange between nodes (to accurately measure request timeouts). If disabled Cassandra assumes the request was forwarded to the replica instantly by the coordinator. + +CAUTION: +Before enabling this property make sure NTP (network time protocol) is installed and the times are synchronized between the nodes. +*/ +func (a *Client) FindConfigCrossNodeTimeout(params *FindConfigCrossNodeTimeoutParams) (*FindConfigCrossNodeTimeoutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigCrossNodeTimeoutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_cross_node_timeout", + Method: "GET", + PathPattern: "/config/cross_node_timeout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigCrossNodeTimeoutReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigCrossNodeTimeoutOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigCrossNodeTimeoutDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigDataFileDirectories The directory location where table data (SSTables) is stored +*/ +func (a *Client) FindConfigDataFileDirectories(params *FindConfigDataFileDirectoriesParams) (*FindConfigDataFileDirectoriesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigDataFileDirectoriesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_data_file_directories", + Method: "GET", + PathPattern: "/config/data_file_directories", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigDataFileDirectoriesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigDataFileDirectoriesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigDataFileDirectoriesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigDefaultLogLevel Default Log level +*/ +func (a *Client) FindConfigDefaultLogLevel(params *FindConfigDefaultLogLevelParams) (*FindConfigDefaultLogLevelOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigDefaultLogLevelParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_default_log_level", + Method: "GET", + PathPattern: "/config/default_log_level", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigDefaultLogLevelReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigDefaultLogLevelOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigDefaultLogLevelDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigDefragmentMemoryOnIdle When set to true, will defragment memory when the cpu is idle. This reduces the amount of work Scylla performs when processing client requests. +*/ +func (a *Client) FindConfigDefragmentMemoryOnIdle(params *FindConfigDefragmentMemoryOnIdleParams) (*FindConfigDefragmentMemoryOnIdleOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigDefragmentMemoryOnIdleParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_defragment_memory_on_idle", + Method: "GET", + PathPattern: "/config/defragment_memory_on_idle", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigDefragmentMemoryOnIdleReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigDefragmentMemoryOnIdleOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigDefragmentMemoryOnIdleDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigDeveloperMode Relax environment checks. Setting to true can reduce performance and reliability significantly. +*/ +func (a *Client) FindConfigDeveloperMode(params *FindConfigDeveloperModeParams) (*FindConfigDeveloperModeOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigDeveloperModeParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_developer_mode", + Method: "GET", + PathPattern: "/config/developer_mode", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigDeveloperModeReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigDeveloperModeOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigDeveloperModeDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigDiskFailurePolicy Sets how Scylla responds to disk failure. Recommend settings are stop or best_effort. + + die Shut down gossip and Thrift and kill the JVM for any file system errors or single SSTable errors, so the node can be replaced. + stop_paranoid Shut down gossip and Thrift even for single SSTable errors. + stop Shut down gossip and Thrift, leaving the node effectively dead, but available for inspection using JMX. + best_effort Stop using the failed disk and respond to requests based on the remaining available SSTables. This means you will see obsolete data at consistency level of ONE. + ignore Ignores fatal errors and lets the requests fail; all file system errors are logged but otherwise ignored. Scylla acts as in versions prior to Cassandra 1.2. + +Related information: Handling Disk Failures In Cassandra 1.2 blog and Recovering from a single disk failure using JBOD. +*/ +func (a *Client) FindConfigDiskFailurePolicy(params *FindConfigDiskFailurePolicyParams) (*FindConfigDiskFailurePolicyOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigDiskFailurePolicyParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_disk_failure_policy", + Method: "GET", + PathPattern: "/config/disk_failure_policy", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigDiskFailurePolicyReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigDiskFailurePolicyOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigDiskFailurePolicyDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigDynamicSnitchBadnessThreshold Sets the performance threshold for dynamically routing requests away from a poorly performing node. A value of 0.2 means Cassandra continues to prefer the static snitch values until the node response time is 20% worse than the best performing node. Until the threshold is reached, incoming client requests are statically routed to the closest replica (as determined by the snitch). Having requests consistently routed to a given replica can help keep a working set of data hot when read repair is less than 1. +*/ +func (a *Client) FindConfigDynamicSnitchBadnessThreshold(params *FindConfigDynamicSnitchBadnessThresholdParams) (*FindConfigDynamicSnitchBadnessThresholdOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigDynamicSnitchBadnessThresholdParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_dynamic_snitch_badness_threshold", + Method: "GET", + PathPattern: "/config/dynamic_snitch_badness_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigDynamicSnitchBadnessThresholdReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigDynamicSnitchBadnessThresholdOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigDynamicSnitchBadnessThresholdDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigDynamicSnitchResetIntervalInMs Time interval in milliseconds to reset all node scores, which allows a bad node to recover. +*/ +func (a *Client) FindConfigDynamicSnitchResetIntervalInMs(params *FindConfigDynamicSnitchResetIntervalInMsParams) (*FindConfigDynamicSnitchResetIntervalInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigDynamicSnitchResetIntervalInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_dynamic_snitch_reset_interval_in_ms", + Method: "GET", + PathPattern: "/config/dynamic_snitch_reset_interval_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigDynamicSnitchResetIntervalInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigDynamicSnitchResetIntervalInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigDynamicSnitchResetIntervalInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigDynamicSnitchUpdateIntervalInMs The time interval for how often the snitch calculates node scores. Because score calculation is CPU intensive, be careful when reducing this interval. +*/ +func (a *Client) FindConfigDynamicSnitchUpdateIntervalInMs(params *FindConfigDynamicSnitchUpdateIntervalInMsParams) (*FindConfigDynamicSnitchUpdateIntervalInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigDynamicSnitchUpdateIntervalInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_dynamic_snitch_update_interval_in_ms", + Method: "GET", + PathPattern: "/config/dynamic_snitch_update_interval_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigDynamicSnitchUpdateIntervalInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigDynamicSnitchUpdateIntervalInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigDynamicSnitchUpdateIntervalInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigEnableCache Enable cache +*/ +func (a *Client) FindConfigEnableCache(params *FindConfigEnableCacheParams) (*FindConfigEnableCacheOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigEnableCacheParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_enable_cache", + Method: "GET", + PathPattern: "/config/enable_cache", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigEnableCacheReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigEnableCacheOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigEnableCacheDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigEnableCommitlog Enable commitlog +*/ +func (a *Client) FindConfigEnableCommitlog(params *FindConfigEnableCommitlogParams) (*FindConfigEnableCommitlogOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigEnableCommitlogParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_enable_commitlog", + Method: "GET", + PathPattern: "/config/enable_commitlog", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigEnableCommitlogReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigEnableCommitlogOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigEnableCommitlogDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigEnableDangerousDirectImportOfCassandraCounters Only turn this option on if you want to import tables from Cassandra containing counters, and you are SURE that no counters in that table were created in a version earlier than Cassandra 2.1. It is not enough to have ever since upgraded to newer versions of Cassandra. If you EVER used a version earlier than 2.1 in the cluster where these SSTables come from, DO NOT TURN ON THIS OPTION! You will corrupt your data. You have been warned. +*/ +func (a *Client) FindConfigEnableDangerousDirectImportOfCassandraCounters(params *FindConfigEnableDangerousDirectImportOfCassandraCountersParams) (*FindConfigEnableDangerousDirectImportOfCassandraCountersOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigEnableDangerousDirectImportOfCassandraCountersParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_enable_dangerous_direct_import_of_cassandra_counters", + Method: "GET", + PathPattern: "/config/enable_dangerous_direct_import_of_cassandra_counters", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigEnableDangerousDirectImportOfCassandraCountersReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigEnableDangerousDirectImportOfCassandraCountersOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigEnableDangerousDirectImportOfCassandraCountersDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigEnableDeprecatedPartitioners Enable the byteordered and murmurs partitioners. These partitioners are deprecated and will be removed in a future version. +*/ +func (a *Client) FindConfigEnableDeprecatedPartitioners(params *FindConfigEnableDeprecatedPartitionersParams) (*FindConfigEnableDeprecatedPartitionersOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigEnableDeprecatedPartitionersParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_enable_deprecated_partitioners", + Method: "GET", + PathPattern: "/config/enable_deprecated_partitioners", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigEnableDeprecatedPartitionersReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigEnableDeprecatedPartitionersOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigEnableDeprecatedPartitionersDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigEnableInMemoryDataStore Enable in memory mode (system tables are always persisted) +*/ +func (a *Client) FindConfigEnableInMemoryDataStore(params *FindConfigEnableInMemoryDataStoreParams) (*FindConfigEnableInMemoryDataStoreOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigEnableInMemoryDataStoreParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_enable_in_memory_data_store", + Method: "GET", + PathPattern: "/config/enable_in_memory_data_store", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigEnableInMemoryDataStoreReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigEnableInMemoryDataStoreOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigEnableInMemoryDataStoreDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigEnableKeyspaceColumnFamilyMetrics Enable per keyspace and per column family metrics reporting +*/ +func (a *Client) FindConfigEnableKeyspaceColumnFamilyMetrics(params *FindConfigEnableKeyspaceColumnFamilyMetricsParams) (*FindConfigEnableKeyspaceColumnFamilyMetricsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigEnableKeyspaceColumnFamilyMetricsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_enable_keyspace_column_family_metrics", + Method: "GET", + PathPattern: "/config/enable_keyspace_column_family_metrics", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigEnableKeyspaceColumnFamilyMetricsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigEnableKeyspaceColumnFamilyMetricsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigEnableKeyspaceColumnFamilyMetricsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigEnableShardAwareDrivers Enable native transport drivers to use connection-per-shard for better performance +*/ +func (a *Client) FindConfigEnableShardAwareDrivers(params *FindConfigEnableShardAwareDriversParams) (*FindConfigEnableShardAwareDriversOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigEnableShardAwareDriversParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_enable_shard_aware_drivers", + Method: "GET", + PathPattern: "/config/enable_shard_aware_drivers", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigEnableShardAwareDriversReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigEnableShardAwareDriversOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigEnableShardAwareDriversDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigEnableSstableDataIntegrityCheck Enable interposer which checks for integrity of every sstable write. Performance is affected to some extent as a result. Useful to help debugging problems that may arise at another layers. +*/ +func (a *Client) FindConfigEnableSstableDataIntegrityCheck(params *FindConfigEnableSstableDataIntegrityCheckParams) (*FindConfigEnableSstableDataIntegrityCheckOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigEnableSstableDataIntegrityCheckParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_enable_sstable_data_integrity_check", + Method: "GET", + PathPattern: "/config/enable_sstable_data_integrity_check", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigEnableSstableDataIntegrityCheckReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigEnableSstableDataIntegrityCheckOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigEnableSstableDataIntegrityCheckDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigEnableSstablesMcFormat Enable SSTables 'mc' format to be used as the default file format +*/ +func (a *Client) FindConfigEnableSstablesMcFormat(params *FindConfigEnableSstablesMcFormatParams) (*FindConfigEnableSstablesMcFormatOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigEnableSstablesMcFormatParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_enable_sstables_mc_format", + Method: "GET", + PathPattern: "/config/enable_sstables_mc_format", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigEnableSstablesMcFormatReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigEnableSstablesMcFormatOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigEnableSstablesMcFormatDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigEndpointSnitch Set to a class that implements the IEndpointSnitch. Scylla uses snitches for locating nodes and routing requests. + + SimpleSnitch: Use for single-data center deployments or single-zone in public clouds. Does not recognize data center or rack information. It treats strategy order as proximity, which can improve cache locality when disabling read repair. + + GossipingPropertyFileSnitch: Recommended for production. The rack and data center for the local node are defined in the cassandra-rackdc.properties file and propagated to other nodes via gossip. To allow migration from the PropertyFileSnitch, it uses the cassandra-topology.properties file if it is present. + + Ec2Snitch: For EC2 deployments in a single region. Loads region and availability zone information from the EC2 API. The region is treated as the data center and the availability zone as the rack. Uses only private IPs. Subsequently it does not work across multiple regions. + + Ec2MultiRegionSnitch: Uses public IPs as the broadcast_address to allow cross-region connectivity. This means you must also set seed addresses to the public IP and open the storage_port or ssl_storage_port on the public IP firewall. For intra-region traffic, Scylla switches to the private IP after establishing a connection. + + GoogleCloudSnitch: For deployments on Google Cloud Platform across one or more regions. The region is treated as a datacenter and the availability zone is treated as a rack within the datacenter. The communication should occur over private IPs within the same logical network. + + RackInferringSnitch: Proximity is determined by rack and data center, which are assumed to correspond to the 3rd and 2nd octet of each node's IP address, respectively. This snitch is best used as an example for writing a custom snitch class (unless this happens to match your deployment conventions). + +Related information: Snitches +*/ +func (a *Client) FindConfigEndpointSnitch(params *FindConfigEndpointSnitchParams) (*FindConfigEndpointSnitchOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigEndpointSnitchParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_endpoint_snitch", + Method: "GET", + PathPattern: "/config/endpoint_snitch", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigEndpointSnitchReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigEndpointSnitchOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigEndpointSnitchDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigExperimental Set to true to unlock experimental features. +*/ +func (a *Client) FindConfigExperimental(params *FindConfigExperimentalParams) (*FindConfigExperimentalOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigExperimentalParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_experimental", + Method: "GET", + PathPattern: "/config/experimental", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigExperimentalReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigExperimentalOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigExperimentalDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigFdInitialValueMs The initial failure_detector interval time in milliseconds. +*/ +func (a *Client) FindConfigFdInitialValueMs(params *FindConfigFdInitialValueMsParams) (*FindConfigFdInitialValueMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigFdInitialValueMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_fd_initial_value_ms", + Method: "GET", + PathPattern: "/config/fd_initial_value_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigFdInitialValueMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigFdInitialValueMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigFdInitialValueMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigFdMaxIntervalMs The maximum failure_detector interval time in milliseconds. Interval larger than the maximum will be ignored. Larger cluster may need to increase the default. +*/ +func (a *Client) FindConfigFdMaxIntervalMs(params *FindConfigFdMaxIntervalMsParams) (*FindConfigFdMaxIntervalMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigFdMaxIntervalMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_fd_max_interval_ms", + Method: "GET", + PathPattern: "/config/fd_max_interval_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigFdMaxIntervalMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigFdMaxIntervalMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigFdMaxIntervalMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigFileCacheSizeInMb Total memory to use for SSTable-reading buffers. +*/ +func (a *Client) FindConfigFileCacheSizeInMb(params *FindConfigFileCacheSizeInMbParams) (*FindConfigFileCacheSizeInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigFileCacheSizeInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_file_cache_size_in_mb", + Method: "GET", + PathPattern: "/config/file_cache_size_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigFileCacheSizeInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigFileCacheSizeInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigFileCacheSizeInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigHintedHandoffEnabled Enable or disable hinted handoff. To enable per data center, add data center list. For example: hinted_handoff_enabled: DC1,DC2. A hint indicates that the write needs to be replayed to an unavailable node. Related information: About hinted handoff writes +*/ +func (a *Client) FindConfigHintedHandoffEnabled(params *FindConfigHintedHandoffEnabledParams) (*FindConfigHintedHandoffEnabledOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigHintedHandoffEnabledParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_hinted_handoff_enabled", + Method: "GET", + PathPattern: "/config/hinted_handoff_enabled", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigHintedHandoffEnabledReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigHintedHandoffEnabledOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigHintedHandoffEnabledDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigHintedHandoffThrottleInKb Maximum throttle per delivery thread in kilobytes per second. This rate reduces proportionally to the number of nodes in the cluster. For example, if there are two nodes in the cluster, each delivery thread will use the maximum rate. If there are three, each node will throttle to half of the maximum, since the two nodes are expected to deliver hints simultaneously. +*/ +func (a *Client) FindConfigHintedHandoffThrottleInKb(params *FindConfigHintedHandoffThrottleInKbParams) (*FindConfigHintedHandoffThrottleInKbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigHintedHandoffThrottleInKbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_hinted_handoff_throttle_in_kb", + Method: "GET", + PathPattern: "/config/hinted_handoff_throttle_in_kb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigHintedHandoffThrottleInKbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigHintedHandoffThrottleInKbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigHintedHandoffThrottleInKbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigHintsDirectory The directory where hints files are stored if hinted handoff is enabled. +*/ +func (a *Client) FindConfigHintsDirectory(params *FindConfigHintsDirectoryParams) (*FindConfigHintsDirectoryOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigHintsDirectoryParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_hints_directory", + Method: "GET", + PathPattern: "/config/hints_directory", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigHintsDirectoryReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigHintsDirectoryOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigHintsDirectoryDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigInMemoryCompactionLimitInMb Size limit for rows being compacted in memory. Larger rows spill to disk and use a slower two-pass compaction process. When this occurs, a message is logged specifying the row key. The recommended value is 5 to 10 percent of the available Java heap size. +*/ +func (a *Client) FindConfigInMemoryCompactionLimitInMb(params *FindConfigInMemoryCompactionLimitInMbParams) (*FindConfigInMemoryCompactionLimitInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigInMemoryCompactionLimitInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_in_memory_compaction_limit_in_mb", + Method: "GET", + PathPattern: "/config/in_memory_compaction_limit_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigInMemoryCompactionLimitInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigInMemoryCompactionLimitInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigInMemoryCompactionLimitInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigIncrementalBackups Backs up data updated since the last snapshot was taken. When enabled, Scylla creates a hard link to each SSTable flushed or streamed locally in a backups/ subdirectory of the keyspace data. Removing these links is the operator's responsibility. + +Related information: Enabling incremental backups +*/ +func (a *Client) FindConfigIncrementalBackups(params *FindConfigIncrementalBackupsParams) (*FindConfigIncrementalBackupsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigIncrementalBackupsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_incremental_backups", + Method: "GET", + PathPattern: "/config/incremental_backups", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigIncrementalBackupsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigIncrementalBackupsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigIncrementalBackupsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigIndexSummaryCapacityInMb Fixed memory pool size in MB for SSTable index summaries. If the memory usage of all index summaries exceeds this limit, any SSTables with low read rates shrink their index summaries to meet this limit. This is a best-effort process. In extreme conditions, Cassandra may need to use more than this amount of memory. +*/ +func (a *Client) FindConfigIndexSummaryCapacityInMb(params *FindConfigIndexSummaryCapacityInMbParams) (*FindConfigIndexSummaryCapacityInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigIndexSummaryCapacityInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_index_summary_capacity_in_mb", + Method: "GET", + PathPattern: "/config/index_summary_capacity_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigIndexSummaryCapacityInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigIndexSummaryCapacityInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigIndexSummaryCapacityInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigIndexSummaryResizeIntervalInMinutes How frequently index summaries should be re-sampled. This is done periodically to redistribute memory from the fixed-size pool to SSTables proportional their recent read rates. To disable, set to -1. This leaves existing index summaries at their current sampling level. +*/ +func (a *Client) FindConfigIndexSummaryResizeIntervalInMinutes(params *FindConfigIndexSummaryResizeIntervalInMinutesParams) (*FindConfigIndexSummaryResizeIntervalInMinutesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigIndexSummaryResizeIntervalInMinutesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_index_summary_resize_interval_in_minutes", + Method: "GET", + PathPattern: "/config/index_summary_resize_interval_in_minutes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigIndexSummaryResizeIntervalInMinutesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigIndexSummaryResizeIntervalInMinutesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigIndexSummaryResizeIntervalInMinutesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigInitialToken Used in the single-node-per-token architecture, where a node owns exactly one contiguous range in the ring space. Setting this property overrides num_tokens. + +If you not using vnodes or have num_tokens set it to 1 or unspecified (#num_tokens), you should always specify this parameter when setting up a production cluster for the first time and when adding capacity. For more information, see this parameter in the Cassandra 1.1 Node and Cluster Configuration documentation. +This parameter can be used with num_tokens (vnodes ) in special cases such as Restoring from a snapshot. +*/ +func (a *Client) FindConfigInitialToken(params *FindConfigInitialTokenParams) (*FindConfigInitialTokenOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigInitialTokenParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_initial_token", + Method: "GET", + PathPattern: "/config/initial_token", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigInitialTokenReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigInitialTokenOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigInitialTokenDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigInterDcStreamThroughputOutboundMegabitsPerSec Throttles all streaming file transfer between the data centers. This setting allows throttles streaming throughput betweens data centers in addition to throttling all network stream traffic as configured with stream_throughput_outbound_megabits_per_sec. +*/ +func (a *Client) FindConfigInterDcStreamThroughputOutboundMegabitsPerSec(params *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams) (*FindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_inter_dc_stream_throughput_outbound_megabits_per_sec", + Method: "GET", + PathPattern: "/config/inter_dc_stream_throughput_outbound_megabits_per_sec", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigInterDcStreamThroughputOutboundMegabitsPerSecReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigInterDcTCPNodelay Enable or disable tcp_nodelay for inter-data center communication. When disabled larger, but fewer, network packets are sent. This reduces overhead from the TCP protocol itself. However, if cross data-center responses are blocked, it will increase latency. +*/ +func (a *Client) FindConfigInterDcTCPNodelay(params *FindConfigInterDcTCPNodelayParams) (*FindConfigInterDcTCPNodelayOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigInterDcTCPNodelayParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_inter_dc_tcp_nodelay", + Method: "GET", + PathPattern: "/config/inter_dc_tcp_nodelay", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigInterDcTCPNodelayReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigInterDcTCPNodelayOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigInterDcTCPNodelayDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigInternodeAuthenticator Internode authentication backend. It implements org.apache.cassandra.auth.AllowAllInternodeAuthenticator to allows or disallow connections from peer nodes. +*/ +func (a *Client) FindConfigInternodeAuthenticator(params *FindConfigInternodeAuthenticatorParams) (*FindConfigInternodeAuthenticatorOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigInternodeAuthenticatorParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_internode_authenticator", + Method: "GET", + PathPattern: "/config/internode_authenticator", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigInternodeAuthenticatorReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigInternodeAuthenticatorOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigInternodeAuthenticatorDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigInternodeCompression Controls whether traffic between nodes is compressed. The valid values are: + +all: All traffic is compressed. +dc : Traffic between data centers is compressed. +none : No compression. +*/ +func (a *Client) FindConfigInternodeCompression(params *FindConfigInternodeCompressionParams) (*FindConfigInternodeCompressionOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigInternodeCompressionParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_internode_compression", + Method: "GET", + PathPattern: "/config/internode_compression", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigInternodeCompressionReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigInternodeCompressionOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigInternodeCompressionDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigInternodeRecvBuffSizeInBytes Sets the receiving socket buffer size in bytes for inter-node calls. +*/ +func (a *Client) FindConfigInternodeRecvBuffSizeInBytes(params *FindConfigInternodeRecvBuffSizeInBytesParams) (*FindConfigInternodeRecvBuffSizeInBytesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigInternodeRecvBuffSizeInBytesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_internode_recv_buff_size_in_bytes", + Method: "GET", + PathPattern: "/config/internode_recv_buff_size_in_bytes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigInternodeRecvBuffSizeInBytesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigInternodeRecvBuffSizeInBytesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigInternodeRecvBuffSizeInBytesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigInternodeSendBuffSizeInBytes Sets the sending socket buffer size in bytes for inter-node calls. + +When setting this parameter and internode_recv_buff_size_in_bytes, the buffer size is limited by net.core.wmem_max. When unset, buffer size is defined by net.ipv4.tcp_wmem. See man tcp and: + + /proc/sys/net/core/wmem_max + /proc/sys/net/core/rmem_max + /proc/sys/net/ipv4/tcp_wmem + /proc/sys/net/ipv4/tcp_wmem +*/ +func (a *Client) FindConfigInternodeSendBuffSizeInBytes(params *FindConfigInternodeSendBuffSizeInBytesParams) (*FindConfigInternodeSendBuffSizeInBytesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigInternodeSendBuffSizeInBytesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_internode_send_buff_size_in_bytes", + Method: "GET", + PathPattern: "/config/internode_send_buff_size_in_bytes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigInternodeSendBuffSizeInBytesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigInternodeSendBuffSizeInBytesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigInternodeSendBuffSizeInBytesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigJoinRing When set to true, a node will join the token ring. When set to false, a node will not join the token ring. User can use nodetool join to initiate ring joinging later. Same as -Dcassandra.join_ring in cassandra. +*/ +func (a *Client) FindConfigJoinRing(params *FindConfigJoinRingParams) (*FindConfigJoinRingOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigJoinRingParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_join_ring", + Method: "GET", + PathPattern: "/config/join_ring", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigJoinRingReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigJoinRingOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigJoinRingDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigKeyCacheKeysToSave Number of keys from the key cache to save. (0: all) +*/ +func (a *Client) FindConfigKeyCacheKeysToSave(params *FindConfigKeyCacheKeysToSaveParams) (*FindConfigKeyCacheKeysToSaveOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigKeyCacheKeysToSaveParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_key_cache_keys_to_save", + Method: "GET", + PathPattern: "/config/key_cache_keys_to_save", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigKeyCacheKeysToSaveReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigKeyCacheKeysToSaveOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigKeyCacheKeysToSaveDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigKeyCacheSavePeriod Duration in seconds that keys are saved in cache. Caches are saved to saved_caches_directory. Saved caches greatly improve cold-start speeds and has relatively little effect on I/O. +*/ +func (a *Client) FindConfigKeyCacheSavePeriod(params *FindConfigKeyCacheSavePeriodParams) (*FindConfigKeyCacheSavePeriodOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigKeyCacheSavePeriodParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_key_cache_save_period", + Method: "GET", + PathPattern: "/config/key_cache_save_period", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigKeyCacheSavePeriodReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigKeyCacheSavePeriodOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigKeyCacheSavePeriodDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigKeyCacheSizeInMb A global cache setting for tables. It is the maximum size of the key cache in memory. To disable set to 0. + +Related information: nodetool setcachecapacity. +*/ +func (a *Client) FindConfigKeyCacheSizeInMb(params *FindConfigKeyCacheSizeInMbParams) (*FindConfigKeyCacheSizeInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigKeyCacheSizeInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_key_cache_size_in_mb", + Method: "GET", + PathPattern: "/config/key_cache_size_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigKeyCacheSizeInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigKeyCacheSizeInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigKeyCacheSizeInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigLargeMemoryAllocationWarningThreshold Warn about memory allocations above this size; set to zero to disable +*/ +func (a *Client) FindConfigLargeMemoryAllocationWarningThreshold(params *FindConfigLargeMemoryAllocationWarningThresholdParams) (*FindConfigLargeMemoryAllocationWarningThresholdOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigLargeMemoryAllocationWarningThresholdParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_large_memory_allocation_warning_threshold", + Method: "GET", + PathPattern: "/config/large_memory_allocation_warning_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigLargeMemoryAllocationWarningThresholdReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigLargeMemoryAllocationWarningThresholdOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigLargeMemoryAllocationWarningThresholdDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigListenAddress The IP address or hostname that Scylla binds to for connecting to other Scylla nodes. Set this parameter or listen_interface, not both. You must change the default setting for multiple nodes to communicate: + +Generally set to empty. If the node is properly configured (host name, name resolution, and so on), Scylla uses InetAddress.getLocalHost() to get the local address from the system. +For a single node cluster, you can use the default setting (localhost). +If Scylla can't find the correct address, you must specify the IP address or host name. +Never specify 0.0.0.0; it is always wrong. +*/ +func (a *Client) FindConfigListenAddress(params *FindConfigListenAddressParams) (*FindConfigListenAddressOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigListenAddressParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_listen_address", + Method: "GET", + PathPattern: "/config/listen_address", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigListenAddressReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigListenAddressOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigListenAddressDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigListenInterface The interface that Scylla binds to for connecting to other Scylla nodes. Interfaces must correspond to a single address, IP aliasing is not supported. See listen_address. +*/ +func (a *Client) FindConfigListenInterface(params *FindConfigListenInterfaceParams) (*FindConfigListenInterfaceOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigListenInterfaceParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_listen_interface", + Method: "GET", + PathPattern: "/config/listen_interface", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigListenInterfaceReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigListenInterfaceOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigListenInterfaceDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigListenOnBroadcastAddress When using multiple physical network interfaces, set this to true to listen on broadcast_address in addition to the listen_address, allowing nodes to communicate in both interfaces. Ignore this property if the network configuration automatically routes between the public and private networks such as EC2. +*/ +func (a *Client) FindConfigListenOnBroadcastAddress(params *FindConfigListenOnBroadcastAddressParams) (*FindConfigListenOnBroadcastAddressOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigListenOnBroadcastAddressParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_listen_on_broadcast_address", + Method: "GET", + PathPattern: "/config/listen_on_broadcast_address", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigListenOnBroadcastAddressReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigListenOnBroadcastAddressOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigListenOnBroadcastAddressDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigLoadBalance CQL request load balancing: 'none' or round-robin' +*/ +func (a *Client) FindConfigLoadBalance(params *FindConfigLoadBalanceParams) (*FindConfigLoadBalanceOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigLoadBalanceParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_load_balance", + Method: "GET", + PathPattern: "/config/load_balance", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigLoadBalanceReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigLoadBalanceOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigLoadBalanceDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigLoadRingState When set to true, load tokens and host_ids previously saved. Same as -Dcassandra.load_ring_state in cassandra. +*/ +func (a *Client) FindConfigLoadRingState(params *FindConfigLoadRingStateParams) (*FindConfigLoadRingStateOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigLoadRingStateParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_load_ring_state", + Method: "GET", + PathPattern: "/config/load_ring_state", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigLoadRingStateReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigLoadRingStateOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigLoadRingStateDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigLogToStdout Log to stdout +*/ +func (a *Client) FindConfigLogToStdout(params *FindConfigLogToStdoutParams) (*FindConfigLogToStdoutOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigLogToStdoutParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_log_to_stdout", + Method: "GET", + PathPattern: "/config/log_to_stdout", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigLogToStdoutReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigLogToStdoutOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigLogToStdoutDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigLogToSyslog Log to syslog +*/ +func (a *Client) FindConfigLogToSyslog(params *FindConfigLogToSyslogParams) (*FindConfigLogToSyslogOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigLogToSyslogParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_log_to_syslog", + Method: "GET", + PathPattern: "/config/log_to_syslog", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigLogToSyslogReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigLogToSyslogOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigLogToSyslogDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigLoggerLogLevel Logger log levels +*/ +func (a *Client) FindConfigLoggerLogLevel(params *FindConfigLoggerLogLevelParams) (*FindConfigLoggerLogLevelOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigLoggerLogLevelParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_logger_log_level", + Method: "GET", + PathPattern: "/config/logger_log_level", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigLoggerLogLevelReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigLoggerLogLevelOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigLoggerLogLevelDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigLsaReclamationStep Minimum number of segments to reclaim in a single step +*/ +func (a *Client) FindConfigLsaReclamationStep(params *FindConfigLsaReclamationStepParams) (*FindConfigLsaReclamationStepOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigLsaReclamationStepParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_lsa_reclamation_step", + Method: "GET", + PathPattern: "/config/lsa_reclamation_step", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigLsaReclamationStepReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigLsaReclamationStepOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigLsaReclamationStepDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigMaxHintWindowInMs Maximum amount of time that hints are generates hints for an unresponsive node. After this interval, new hints are no longer generated until the node is back up and responsive. If the node goes down again, a new interval begins. This setting can prevent a sudden demand for resources when a node is brought back online and the rest of the cluster attempts to replay a large volume of hinted writes. + +Related information: Failure detection and recovery +*/ +func (a *Client) FindConfigMaxHintWindowInMs(params *FindConfigMaxHintWindowInMsParams) (*FindConfigMaxHintWindowInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMaxHintWindowInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_max_hint_window_in_ms", + Method: "GET", + PathPattern: "/config/max_hint_window_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMaxHintWindowInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMaxHintWindowInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMaxHintWindowInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigMaxHintsDeliveryThreads Number of threads with which to deliver hints. In multiple data-center deployments, consider increasing this number because cross data-center handoff is generally slower. +*/ +func (a *Client) FindConfigMaxHintsDeliveryThreads(params *FindConfigMaxHintsDeliveryThreadsParams) (*FindConfigMaxHintsDeliveryThreadsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMaxHintsDeliveryThreadsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_max_hints_delivery_threads", + Method: "GET", + PathPattern: "/config/max_hints_delivery_threads", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMaxHintsDeliveryThreadsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMaxHintsDeliveryThreadsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMaxHintsDeliveryThreadsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigMemoryAllocator The off-heap memory allocator. In addition to caches, this property affects storage engine meta data. Supported values: + NativeAllocator + JEMallocAllocator + +Experiments show that jemalloc saves some memory compared to the native allocator because it is more fragmentation resistant. To use, install jemalloc as a library and modify cassandra-env.sh (instructions in file). +*/ +func (a *Client) FindConfigMemoryAllocator(params *FindConfigMemoryAllocatorParams) (*FindConfigMemoryAllocatorOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMemoryAllocatorParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_memory_allocator", + Method: "GET", + PathPattern: "/config/memory_allocator", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMemoryAllocatorReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMemoryAllocatorOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMemoryAllocatorDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigMemtableAllocationType Specify the way Cassandra allocates and manages memtable memory. See Off-heap memtables in Cassandra 2.1. Options are: + +heap_buffers On heap NIO (non-blocking I/O) buffers. +offheap_buffers Off heap (direct) NIO buffers. +offheap_objects Native memory, eliminating NIO buffer heap overhead. +*/ +func (a *Client) FindConfigMemtableAllocationType(params *FindConfigMemtableAllocationTypeParams) (*FindConfigMemtableAllocationTypeOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMemtableAllocationTypeParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_memtable_allocation_type", + Method: "GET", + PathPattern: "/config/memtable_allocation_type", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMemtableAllocationTypeReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMemtableAllocationTypeOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMemtableAllocationTypeDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigMemtableCleanupThreshold Ratio of occupied non-flushing memtable size to total permitted size for triggering a flush of the largest memtable. Larger values mean larger flushes and less compaction, but also less concurrent flush activity, which can make it difficult to keep your disks saturated under heavy write load. +*/ +func (a *Client) FindConfigMemtableCleanupThreshold(params *FindConfigMemtableCleanupThresholdParams) (*FindConfigMemtableCleanupThresholdOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMemtableCleanupThresholdParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_memtable_cleanup_threshold", + Method: "GET", + PathPattern: "/config/memtable_cleanup_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMemtableCleanupThresholdReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMemtableCleanupThresholdOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMemtableCleanupThresholdDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigMemtableFlushQueueSize The number of full memtables to allow pending flush (memtables waiting for a write thread). At a minimum, set to the maximum number of indexes created on a single table. + +Related information: Flushing data from the memtable +*/ +func (a *Client) FindConfigMemtableFlushQueueSize(params *FindConfigMemtableFlushQueueSizeParams) (*FindConfigMemtableFlushQueueSizeOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMemtableFlushQueueSizeParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_memtable_flush_queue_size", + Method: "GET", + PathPattern: "/config/memtable_flush_queue_size", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMemtableFlushQueueSizeReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMemtableFlushQueueSizeOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMemtableFlushQueueSizeDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigMemtableFlushStaticShares If set to higher than 0, ignore the controller's output and set the memtable shares statically. Do not set this unless you know what you are doing and suspect a problem in the controller. This option will be retired when the controller reaches more maturity +*/ +func (a *Client) FindConfigMemtableFlushStaticShares(params *FindConfigMemtableFlushStaticSharesParams) (*FindConfigMemtableFlushStaticSharesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMemtableFlushStaticSharesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_memtable_flush_static_shares", + Method: "GET", + PathPattern: "/config/memtable_flush_static_shares", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMemtableFlushStaticSharesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMemtableFlushStaticSharesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMemtableFlushStaticSharesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigMemtableFlushWriters Sets the number of memtable flush writer threads. These threads are blocked by disk I/O, and each one holds a memtable in memory while blocked. If you have a large Java heap size and many data directories, you can increase the value for better flush performance. +*/ +func (a *Client) FindConfigMemtableFlushWriters(params *FindConfigMemtableFlushWritersParams) (*FindConfigMemtableFlushWritersOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMemtableFlushWritersParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_memtable_flush_writers", + Method: "GET", + PathPattern: "/config/memtable_flush_writers", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMemtableFlushWritersReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMemtableFlushWritersOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMemtableFlushWritersDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigMemtableHeapSpaceInMb Total permitted memory to use for memtables. Triggers a flush based on memtable_cleanup_threshold. Cassandra stops accepting writes when the limit is exceeded until a flush completes. If unset, sets to default. +*/ +func (a *Client) FindConfigMemtableHeapSpaceInMb(params *FindConfigMemtableHeapSpaceInMbParams) (*FindConfigMemtableHeapSpaceInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMemtableHeapSpaceInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_memtable_heap_space_in_mb", + Method: "GET", + PathPattern: "/config/memtable_heap_space_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMemtableHeapSpaceInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMemtableHeapSpaceInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMemtableHeapSpaceInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigMemtableOffheapSpaceInMb See memtable_heap_space_in_mb +*/ +func (a *Client) FindConfigMemtableOffheapSpaceInMb(params *FindConfigMemtableOffheapSpaceInMbParams) (*FindConfigMemtableOffheapSpaceInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMemtableOffheapSpaceInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_memtable_offheap_space_in_mb", + Method: "GET", + PathPattern: "/config/memtable_offheap_space_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMemtableOffheapSpaceInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMemtableOffheapSpaceInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMemtableOffheapSpaceInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigMemtableTotalSpaceInMb Specifies the total memory used for all memtables on a node. This replaces the per-table storage settings memtable_operations_in_millions and memtable_throughput_in_mb. +*/ +func (a *Client) FindConfigMemtableTotalSpaceInMb(params *FindConfigMemtableTotalSpaceInMbParams) (*FindConfigMemtableTotalSpaceInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMemtableTotalSpaceInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_memtable_total_space_in_mb", + Method: "GET", + PathPattern: "/config/memtable_total_space_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMemtableTotalSpaceInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMemtableTotalSpaceInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMemtableTotalSpaceInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigMurmur3PartitionerIgnoreMsbBits Number of most siginificant token bits to ignore in murmur3 partitioner; increase for very large clusters +*/ +func (a *Client) FindConfigMurmur3PartitionerIgnoreMsbBits(params *FindConfigMurmur3PartitionerIgnoreMsbBitsParams) (*FindConfigMurmur3PartitionerIgnoreMsbBitsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigMurmur3PartitionerIgnoreMsbBitsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_murmur3_partitioner_ignore_msb_bits", + Method: "GET", + PathPattern: "/config/murmur3_partitioner_ignore_msb_bits", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigMurmur3PartitionerIgnoreMsbBitsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigMurmur3PartitionerIgnoreMsbBitsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigMurmur3PartitionerIgnoreMsbBitsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigNativeTransportMaxFrameSizeInMb The maximum size of allowed frame. Frame (requests) larger than this are rejected as invalid. +*/ +func (a *Client) FindConfigNativeTransportMaxFrameSizeInMb(params *FindConfigNativeTransportMaxFrameSizeInMbParams) (*FindConfigNativeTransportMaxFrameSizeInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigNativeTransportMaxFrameSizeInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_native_transport_max_frame_size_in_mb", + Method: "GET", + PathPattern: "/config/native_transport_max_frame_size_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigNativeTransportMaxFrameSizeInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigNativeTransportMaxFrameSizeInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigNativeTransportMaxFrameSizeInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigNativeTransportMaxThreads The maximum number of thread handling requests. The meaning is the same as rpc_max_threads. + +Default is different (128 versus unlimited). +No corresponding native_transport_min_threads. +Idle threads are stopped after 30 seconds. +*/ +func (a *Client) FindConfigNativeTransportMaxThreads(params *FindConfigNativeTransportMaxThreadsParams) (*FindConfigNativeTransportMaxThreadsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigNativeTransportMaxThreadsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_native_transport_max_threads", + Method: "GET", + PathPattern: "/config/native_transport_max_threads", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigNativeTransportMaxThreadsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigNativeTransportMaxThreadsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigNativeTransportMaxThreadsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigNativeTransportPort Port on which the CQL native transport listens for clients. +*/ +func (a *Client) FindConfigNativeTransportPort(params *FindConfigNativeTransportPortParams) (*FindConfigNativeTransportPortOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigNativeTransportPortParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_native_transport_port", + Method: "GET", + PathPattern: "/config/native_transport_port", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigNativeTransportPortReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigNativeTransportPortOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigNativeTransportPortDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigNativeTransportPortSsl Port on which the CQL TLS native transport listens for clients.Enabling client encryption and keeping native_transport_port_ssl disabled will use encryptionfor native_transport_port. Setting native_transport_port_ssl to a different valuefrom native_transport_port will use encryption for native_transport_port_ssl whilekeeping native_transport_port unencrypted +*/ +func (a *Client) FindConfigNativeTransportPortSsl(params *FindConfigNativeTransportPortSslParams) (*FindConfigNativeTransportPortSslOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigNativeTransportPortSslParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_native_transport_port_ssl", + Method: "GET", + PathPattern: "/config/native_transport_port_ssl", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigNativeTransportPortSslReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigNativeTransportPortSslOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigNativeTransportPortSslDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigNumTokens Defines the number of tokens randomly assigned to this node on the ring when using virtual nodes (vnodes). The more tokens, relative to other nodes, the larger the proportion of data that the node stores. Generally all nodes should have the same number of tokens assuming equal hardware capability. The recommended value is 256. If unspecified (#num_tokens), Scylla uses 1 (equivalent to #num_tokens : 1) for legacy compatibility and uses the initial_token setting. + +If not using vnodes, comment #num_tokens : 256 or set num_tokens : 1 and use initial_token. If you already have an existing cluster with one token per node and wish to migrate to vnodes, see Enabling virtual nodes on an existing production cluster. +Note: If using DataStax Enterprise, the default setting of this property depends on the type of node and type of install. +*/ +func (a *Client) FindConfigNumTokens(params *FindConfigNumTokensParams) (*FindConfigNumTokensOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigNumTokensParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_num_tokens", + Method: "GET", + PathPattern: "/config/num_tokens", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigNumTokensReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigNumTokensOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigNumTokensDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigOverrideDecommission Set true to force a decommissioned node to join the cluster +*/ +func (a *Client) FindConfigOverrideDecommission(params *FindConfigOverrideDecommissionParams) (*FindConfigOverrideDecommissionOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigOverrideDecommissionParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_override_decommission", + Method: "GET", + PathPattern: "/config/override_decommission", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigOverrideDecommissionReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigOverrideDecommissionOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigOverrideDecommissionDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigPartitioner Distributes rows (by partition key) across all nodes in the cluster. Any IPartitioner may be used, including your own as long as it is in the class path. For new clusters use the default partitioner. + +Scylla provides the following partitioners for backwards compatibility: + + RandomPartitioner + ByteOrderedPartitioner + OrderPreservingPartitioner (deprecated) + +Related information: Partitioners +*/ +func (a *Client) FindConfigPartitioner(params *FindConfigPartitionerParams) (*FindConfigPartitionerOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigPartitionerParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_partitioner", + Method: "GET", + PathPattern: "/config/partitioner", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigPartitionerReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigPartitionerOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigPartitionerDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigPermissionsCacheMaxEntries Maximum cached permission entries. Must have a non-zero value if permissions caching is enabled (see a permissions_validity_in_ms description). +*/ +func (a *Client) FindConfigPermissionsCacheMaxEntries(params *FindConfigPermissionsCacheMaxEntriesParams) (*FindConfigPermissionsCacheMaxEntriesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigPermissionsCacheMaxEntriesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_permissions_cache_max_entries", + Method: "GET", + PathPattern: "/config/permissions_cache_max_entries", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigPermissionsCacheMaxEntriesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigPermissionsCacheMaxEntriesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigPermissionsCacheMaxEntriesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigPermissionsUpdateIntervalInMs Refresh interval for permissions cache (if enabled). After this interval, cache entries become eligible for refresh. An async reload is scheduled every permissions_update_interval_in_ms time period and the old value is returned until it completes. If permissions_validity_in_ms has a non-zero value, then this property must also have a non-zero value. It's recommended to set this value to be at least 3 times smaller than the permissions_validity_in_ms. +*/ +func (a *Client) FindConfigPermissionsUpdateIntervalInMs(params *FindConfigPermissionsUpdateIntervalInMsParams) (*FindConfigPermissionsUpdateIntervalInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigPermissionsUpdateIntervalInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_permissions_update_interval_in_ms", + Method: "GET", + PathPattern: "/config/permissions_update_interval_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigPermissionsUpdateIntervalInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigPermissionsUpdateIntervalInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigPermissionsUpdateIntervalInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigPermissionsValidityInMs How long permissions in cache remain valid. Depending on the authorizer, such as CassandraAuthorizer, fetching permissions can be resource intensive. Permissions caching is disabled when this property is set to 0 or when AllowAllAuthorizer is used. The cached value is considered valid as long as both its value is not older than the permissions_validity_in_ms and the cached value has been read at least once during the permissions_validity_in_ms time frame. If any of these two conditions doesn't hold the cached value is going to be evicted from the cache. + +Related information: Object permissions +*/ +func (a *Client) FindConfigPermissionsValidityInMs(params *FindConfigPermissionsValidityInMsParams) (*FindConfigPermissionsValidityInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigPermissionsValidityInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_permissions_validity_in_ms", + Method: "GET", + PathPattern: "/config/permissions_validity_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigPermissionsValidityInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigPermissionsValidityInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigPermissionsValidityInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigPhiConvictThreshold Adjusts the sensitivity of the failure detector on an exponential scale. Generally this setting never needs adjusting. + +Related information: Failure detection and recovery +*/ +func (a *Client) FindConfigPhiConvictThreshold(params *FindConfigPhiConvictThresholdParams) (*FindConfigPhiConvictThresholdOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigPhiConvictThresholdParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_phi_convict_threshold", + Method: "GET", + PathPattern: "/config/phi_convict_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigPhiConvictThresholdReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigPhiConvictThresholdOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigPhiConvictThresholdDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigPreheatKernelPageCache Enable or disable kernel page cache preheating from contents of the key cache after compaction. When enabled it preheats only first page (4KB) of each row to optimize for sequential access. It can be harmful for fat rows, see CASSANDRA-4937 for more details. +*/ +func (a *Client) FindConfigPreheatKernelPageCache(params *FindConfigPreheatKernelPageCacheParams) (*FindConfigPreheatKernelPageCacheOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigPreheatKernelPageCacheParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_preheat_kernel_page_cache", + Method: "GET", + PathPattern: "/config/preheat_kernel_page_cache", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigPreheatKernelPageCacheReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigPreheatKernelPageCacheOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigPreheatKernelPageCacheDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigPrometheusAddress Prometheus listening address +*/ +func (a *Client) FindConfigPrometheusAddress(params *FindConfigPrometheusAddressParams) (*FindConfigPrometheusAddressOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigPrometheusAddressParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_prometheus_address", + Method: "GET", + PathPattern: "/config/prometheus_address", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigPrometheusAddressReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigPrometheusAddressOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigPrometheusAddressDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigPrometheusPort Prometheus port, set to zero to disable +*/ +func (a *Client) FindConfigPrometheusPort(params *FindConfigPrometheusPortParams) (*FindConfigPrometheusPortOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigPrometheusPortParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_prometheus_port", + Method: "GET", + PathPattern: "/config/prometheus_port", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigPrometheusPortReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigPrometheusPortOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigPrometheusPortDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigPrometheusPrefix Set the prefix of the exported Prometheus metrics. Changing this will break Scylla's dashboard compatibility, do not change unless you know what you are doing. +*/ +func (a *Client) FindConfigPrometheusPrefix(params *FindConfigPrometheusPrefixParams) (*FindConfigPrometheusPrefixOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigPrometheusPrefixParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_prometheus_prefix", + Method: "GET", + PathPattern: "/config/prometheus_prefix", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigPrometheusPrefixReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigPrometheusPrefixOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigPrometheusPrefixDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRangeRequestTimeoutInMs The time in milliseconds that the coordinator waits for sequential or index scans to complete. +*/ +func (a *Client) FindConfigRangeRequestTimeoutInMs(params *FindConfigRangeRequestTimeoutInMsParams) (*FindConfigRangeRequestTimeoutInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRangeRequestTimeoutInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_range_request_timeout_in_ms", + Method: "GET", + PathPattern: "/config/range_request_timeout_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRangeRequestTimeoutInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRangeRequestTimeoutInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRangeRequestTimeoutInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigReadRequestTimeoutInMs The time that the coordinator waits for read operations to complete +*/ +func (a *Client) FindConfigReadRequestTimeoutInMs(params *FindConfigReadRequestTimeoutInMsParams) (*FindConfigReadRequestTimeoutInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigReadRequestTimeoutInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_read_request_timeout_in_ms", + Method: "GET", + PathPattern: "/config/read_request_timeout_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigReadRequestTimeoutInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigReadRequestTimeoutInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigReadRequestTimeoutInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigReduceCacheCapacityTo Sets the size percentage to which maximum cache capacity is reduced when Java heap usage reaches the threshold defined by reduce_cache_sizes_at. Together with flush_largest_memtables_at, these properties constitute an emergency measure for preventing sudden out-of-memory (OOM) errors. +*/ +func (a *Client) FindConfigReduceCacheCapacityTo(params *FindConfigReduceCacheCapacityToParams) (*FindConfigReduceCacheCapacityToOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigReduceCacheCapacityToParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_reduce_cache_capacity_to", + Method: "GET", + PathPattern: "/config/reduce_cache_capacity_to", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigReduceCacheCapacityToReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigReduceCacheCapacityToOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigReduceCacheCapacityToDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigReduceCacheSizesAt When Java heap usage (after a full concurrent mark sweep (CMS) garbage collection) exceeds this percentage, Cassandra reduces the cache capacity to the fraction of the current size as specified by reduce_cache_capacity_to. To disable, set the value to 1.0. +*/ +func (a *Client) FindConfigReduceCacheSizesAt(params *FindConfigReduceCacheSizesAtParams) (*FindConfigReduceCacheSizesAtOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigReduceCacheSizesAtParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_reduce_cache_sizes_at", + Method: "GET", + PathPattern: "/config/reduce_cache_sizes_at", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigReduceCacheSizesAtReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigReduceCacheSizesAtOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigReduceCacheSizesAtDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigReplaceAddress The listen_address or broadcast_address of the dead node to replace. Same as -Dcassandra.replace_address. +*/ +func (a *Client) FindConfigReplaceAddress(params *FindConfigReplaceAddressParams) (*FindConfigReplaceAddressOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigReplaceAddressParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_replace_address", + Method: "GET", + PathPattern: "/config/replace_address", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigReplaceAddressReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigReplaceAddressOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigReplaceAddressDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigReplaceAddressFirstBoot Like replace_address option, but if the node has been bootstrapped successfully it will be ignored. Same as -Dcassandra.replace_address_first_boot. +*/ +func (a *Client) FindConfigReplaceAddressFirstBoot(params *FindConfigReplaceAddressFirstBootParams) (*FindConfigReplaceAddressFirstBootOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigReplaceAddressFirstBootParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_replace_address_first_boot", + Method: "GET", + PathPattern: "/config/replace_address_first_boot", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigReplaceAddressFirstBootReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigReplaceAddressFirstBootOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigReplaceAddressFirstBootDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigReplaceNode The UUID of the node to replace. Same as -Dcassandra.replace_node in cssandra. +*/ +func (a *Client) FindConfigReplaceNode(params *FindConfigReplaceNodeParams) (*FindConfigReplaceNodeOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigReplaceNodeParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_replace_node", + Method: "GET", + PathPattern: "/config/replace_node", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigReplaceNodeReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigReplaceNodeOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigReplaceNodeDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigReplaceToken The tokens of the node to replace. Same as -Dcassandra.replace_token in cassandra. +*/ +func (a *Client) FindConfigReplaceToken(params *FindConfigReplaceTokenParams) (*FindConfigReplaceTokenOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigReplaceTokenParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_replace_token", + Method: "GET", + PathPattern: "/config/replace_token", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigReplaceTokenReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigReplaceTokenOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigReplaceTokenDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigRequestScheduler Defines a scheduler to handle incoming client requests according to a defined policy. This scheduler is useful for throttling client requests in single clusters containing multiple keyspaces. This parameter is specifically for requests from the client and does not affect inter-node communication. Valid values are: + +org.apache.cassandra.scheduler.NoScheduler No scheduling takes place. +org.apache.cassandra.scheduler.RoundRobinScheduler Round robin of client requests to a node with a separate queue for each request_scheduler_id property. +A Java class that implements the RequestScheduler interface. +*/ +func (a *Client) FindConfigRequestScheduler(params *FindConfigRequestSchedulerParams) (*FindConfigRequestSchedulerOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRequestSchedulerParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_request_scheduler", + Method: "GET", + PathPattern: "/config/request_scheduler", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRequestSchedulerReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRequestSchedulerOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRequestSchedulerDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRequestSchedulerID An identifier on which to perform request scheduling. Currently the only valid value is keyspace. See weights. +*/ +func (a *Client) FindConfigRequestSchedulerID(params *FindConfigRequestSchedulerIDParams) (*FindConfigRequestSchedulerIDOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRequestSchedulerIDParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_request_scheduler_id", + Method: "GET", + PathPattern: "/config/request_scheduler_id", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRequestSchedulerIDReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRequestSchedulerIDOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRequestSchedulerIDDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigRequestSchedulerOptions Contains a list of properties that define configuration options for request_scheduler: + +throttle_limit: The number of in-flight requests per client. Requests beyond this limit are queued up until running requests complete. Recommended value is ((concurrent_reads + concurrent_writes) × 2) +default_weight: (Default: 1 **) How many requests are handled during each turn of the RoundRobin. +weights: (Default: Keyspace: 1) Takes a list of keyspaces. It sets how many requests are handled during each turn of the RoundRobin, based on the request_scheduler_id. +*/ +func (a *Client) FindConfigRequestSchedulerOptions(params *FindConfigRequestSchedulerOptionsParams) (*FindConfigRequestSchedulerOptionsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRequestSchedulerOptionsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_request_scheduler_options", + Method: "GET", + PathPattern: "/config/request_scheduler_options", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRequestSchedulerOptionsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRequestSchedulerOptionsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRequestSchedulerOptionsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigRequestTimeoutInMs The default timeout for other, miscellaneous operations. + +Related information: About hinted handoff writes +*/ +func (a *Client) FindConfigRequestTimeoutInMs(params *FindConfigRequestTimeoutInMsParams) (*FindConfigRequestTimeoutInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRequestTimeoutInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_request_timeout_in_ms", + Method: "GET", + PathPattern: "/config/request_timeout_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRequestTimeoutInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRequestTimeoutInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRequestTimeoutInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRingDelayMs Time a node waits to hear from other nodes before joining the ring in milliseconds. Same as -Dcassandra.ring_delay_ms in cassandra. +*/ +func (a *Client) FindConfigRingDelayMs(params *FindConfigRingDelayMsParams) (*FindConfigRingDelayMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRingDelayMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_ring_delay_ms", + Method: "GET", + PathPattern: "/config/ring_delay_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRingDelayMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRingDelayMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRingDelayMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigRoleManager The role-management backend, used to maintain grantts and memberships between roles. + +The available role-managers are: + + CassandraRoleManager : Stores role data in the system_auth keyspace. +*/ +func (a *Client) FindConfigRoleManager(params *FindConfigRoleManagerParams) (*FindConfigRoleManagerOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRoleManagerParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_role_manager", + Method: "GET", + PathPattern: "/config/role_manager", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRoleManagerReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRoleManagerOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRoleManagerDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRowCacheKeysToSave Number of keys from the row cache to save. +*/ +func (a *Client) FindConfigRowCacheKeysToSave(params *FindConfigRowCacheKeysToSaveParams) (*FindConfigRowCacheKeysToSaveOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRowCacheKeysToSaveParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_row_cache_keys_to_save", + Method: "GET", + PathPattern: "/config/row_cache_keys_to_save", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRowCacheKeysToSaveReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRowCacheKeysToSaveOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRowCacheKeysToSaveDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRowCacheSavePeriod Duration in seconds that rows are saved in cache. Caches are saved to saved_caches_directory. +*/ +func (a *Client) FindConfigRowCacheSavePeriod(params *FindConfigRowCacheSavePeriodParams) (*FindConfigRowCacheSavePeriodOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRowCacheSavePeriodParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_row_cache_save_period", + Method: "GET", + PathPattern: "/config/row_cache_save_period", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRowCacheSavePeriodReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRowCacheSavePeriodOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRowCacheSavePeriodDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRowCacheSizeInMb Maximum size of the row cache in memory. Row cache can save more time than key_cache_size_in_mb, but is space-intensive because it contains the entire row. Use the row cache only for hot rows or static rows. If you reduce the size, you may not get you hottest keys loaded on start up. +*/ +func (a *Client) FindConfigRowCacheSizeInMb(params *FindConfigRowCacheSizeInMbParams) (*FindConfigRowCacheSizeInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRowCacheSizeInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_row_cache_size_in_mb", + Method: "GET", + PathPattern: "/config/row_cache_size_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRowCacheSizeInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRowCacheSizeInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRowCacheSizeInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigRPCAddress The listen address for client connections (Thrift RPC service and native transport).Valid values are: + + unset: Resolves the address using the hostname configuration of the node. If left unset, the hostname must resolve to the IP address of this node using /etc/hostname, /etc/hosts, or DNS. + 0.0.0.0 : Listens on all configured interfaces, but you must set the broadcast_rpc_address to a value other than 0.0.0.0. + IP address + hostname + +Related information: Network +*/ +func (a *Client) FindConfigRPCAddress(params *FindConfigRPCAddressParams) (*FindConfigRPCAddressOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRPCAddressParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_rpc_address", + Method: "GET", + PathPattern: "/config/rpc_address", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRPCAddressReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRPCAddressOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRPCAddressDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRPCInterface The listen address for client connections. Interfaces must correspond to a single address, IP aliasing is not supported. See rpc_address. +*/ +func (a *Client) FindConfigRPCInterface(params *FindConfigRPCInterfaceParams) (*FindConfigRPCInterfaceOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRPCInterfaceParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_rpc_interface", + Method: "GET", + PathPattern: "/config/rpc_interface", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRPCInterfaceReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRPCInterfaceOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRPCInterfaceDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRPCKeepalive Enable or disable keepalive on client connections (RPC or native). +*/ +func (a *Client) FindConfigRPCKeepalive(params *FindConfigRPCKeepaliveParams) (*FindConfigRPCKeepaliveOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRPCKeepaliveParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_rpc_keepalive", + Method: "GET", + PathPattern: "/config/rpc_keepalive", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRPCKeepaliveReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRPCKeepaliveOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRPCKeepaliveDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRPCMaxThreads Regardless of your choice of RPC server (rpc_server_type), the number of maximum requests in the RPC thread pool dictates how many concurrent requests are possible. However, if you are using the parameter sync in the rpc_server_type, it also dictates the number of clients that can be connected. For a large number of client connections, this could cause excessive memory usage for the thread stack. Connection pooling on the client side is highly recommended. Setting a maximum thread pool size acts as a safeguard against misbehaved clients. If the maximum is reached, Cassandra blocks additional connections until a client disconnects. +*/ +func (a *Client) FindConfigRPCMaxThreads(params *FindConfigRPCMaxThreadsParams) (*FindConfigRPCMaxThreadsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRPCMaxThreadsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_rpc_max_threads", + Method: "GET", + PathPattern: "/config/rpc_max_threads", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRPCMaxThreadsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRPCMaxThreadsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRPCMaxThreadsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRPCMinThreads Sets the minimum thread pool size for remote procedure calls. +*/ +func (a *Client) FindConfigRPCMinThreads(params *FindConfigRPCMinThreadsParams) (*FindConfigRPCMinThreadsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRPCMinThreadsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_rpc_min_threads", + Method: "GET", + PathPattern: "/config/rpc_min_threads", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRPCMinThreadsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRPCMinThreadsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRPCMinThreadsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRPCPort Thrift port for client connections. +*/ +func (a *Client) FindConfigRPCPort(params *FindConfigRPCPortParams) (*FindConfigRPCPortOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRPCPortParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_rpc_port", + Method: "GET", + PathPattern: "/config/rpc_port", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRPCPortReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRPCPortOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRPCPortDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRPCRecvBuffSizeInBytes Sets the receiving socket buffer size for remote procedure calls. +*/ +func (a *Client) FindConfigRPCRecvBuffSizeInBytes(params *FindConfigRPCRecvBuffSizeInBytesParams) (*FindConfigRPCRecvBuffSizeInBytesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRPCRecvBuffSizeInBytesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_rpc_recv_buff_size_in_bytes", + Method: "GET", + PathPattern: "/config/rpc_recv_buff_size_in_bytes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRPCRecvBuffSizeInBytesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRPCRecvBuffSizeInBytesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRPCRecvBuffSizeInBytesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigRPCSendBuffSizeInBytes Sets the sending socket buffer size in bytes for remote procedure calls. +*/ +func (a *Client) FindConfigRPCSendBuffSizeInBytes(params *FindConfigRPCSendBuffSizeInBytesParams) (*FindConfigRPCSendBuffSizeInBytesOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRPCSendBuffSizeInBytesParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_rpc_send_buff_size_in_bytes", + Method: "GET", + PathPattern: "/config/rpc_send_buff_size_in_bytes", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRPCSendBuffSizeInBytesReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRPCSendBuffSizeInBytesOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRPCSendBuffSizeInBytesDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigRPCServerType Cassandra provides three options for the RPC server. On Windows, sync is about 30% slower than hsha. On Linux, sync and hsha performance is about the same, but hsha uses less memory. + +sync (Default One thread per Thrift connection.) For a very large number of clients, memory is the limiting factor. On a 64-bit JVM, 180KB is the minimum stack size per thread and corresponds to your use of virtual memory. Physical memory may be limited depending on use of stack space. +hsh Half synchronous, half asynchronous. All Thrift clients are handled asynchronously using a small number of threads that does not vary with the number of clients and thus scales well to many clients. The RPC requests are synchronous (one thread per active request). + + Note: When selecting this option, you must change the default value (unlimited) of rpc_max_threads. + +Your own RPC server: You must provide a fully-qualified class name of an o.a.c.t.TServerFactory that can create a server instance. +*/ +func (a *Client) FindConfigRPCServerType(params *FindConfigRPCServerTypeParams) (*FindConfigRPCServerTypeOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigRPCServerTypeParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_rpc_server_type", + Method: "GET", + PathPattern: "/config/rpc_server_type", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigRPCServerTypeReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigRPCServerTypeOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigRPCServerTypeDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigSavedCachesDirectory The directory location where table key and row caches are stored. +*/ +func (a *Client) FindConfigSavedCachesDirectory(params *FindConfigSavedCachesDirectoryParams) (*FindConfigSavedCachesDirectoryOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigSavedCachesDirectoryParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_saved_caches_directory", + Method: "GET", + PathPattern: "/config/saved_caches_directory", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigSavedCachesDirectoryReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigSavedCachesDirectoryOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigSavedCachesDirectoryDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigSeedProvider The addresses of hosts deemed contact points. Scylla nodes use the -seeds list to find each other and learn the topology of the ring. + + class_name (Default: org.apache.cassandra.locator.SimpleSeedProvider) + The class within Scylla that handles the seed logic. It can be customized, but this is typically not required. + - seeds (Default: 127.0.0.1) A comma-delimited list of IP addresses used by gossip for bootstrapping new nodes joining a cluster. When running multiple nodes, you must change the list from the default value. In multiple data-center clusters, the seed list should include at least one node from each data center (replication group). More than a single seed node per data center is recommended for fault tolerance. Otherwise, gossip has to communicate with another data center when bootstrapping a node. Making every node a seed node is not recommended because of increased maintenance and reduced gossip performance. Gossip optimization is not critical, but it is recommended to use a small seed list (approximately three nodes per data center). + +Related information: Initializing a multiple node cluster (single data center) and Initializing a multiple node cluster (multiple data centers). +*/ +func (a *Client) FindConfigSeedProvider(params *FindConfigSeedProviderParams) (*FindConfigSeedProviderOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigSeedProviderParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_seed_provider", + Method: "GET", + PathPattern: "/config/seed_provider", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigSeedProviderReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigSeedProviderOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigSeedProviderDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigServerEncryptionOptions Enable or disable inter-node encryption. You must also generate keys and provide the appropriate key and trust store locations and passwords. No custom encryption options are currently enabled. The available options are: + +internode_encryption : (Default: none ) Enable or disable encryption of inter-node communication using the TLS_RSA_WITH_AES_128_CBC_SHA cipher suite for authentication, key exchange, and encryption of data transfers. The available inter-node options are: + + all : Encrypt all inter-node communications. + none : No encryption. + dc : Encrypt the traffic between the data centers (server only). + rack : Encrypt the traffic between the racks(server only). + +certificate : (Default: conf/scylla.crt) The location of a PEM-encoded x509 certificate used to identify and encrypt the internode communication. +keyfile : (Default: conf/scylla.key) PEM Key file associated with certificate. +truststore : (Default: ) Location of the truststore containing the trusted certificate for authenticating remote servers. + +The advanced settings are: + + priority_string : GnuTLS priority string controlling TLS algorithms used/allowed. + require_client_auth : (Default: false ) Enables or disables certificate authentication. + +Related information: Node-to-node encryption +*/ +func (a *Client) FindConfigServerEncryptionOptions(params *FindConfigServerEncryptionOptionsParams) (*FindConfigServerEncryptionOptionsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigServerEncryptionOptionsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_server_encryption_options", + Method: "GET", + PathPattern: "/config/server_encryption_options", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigServerEncryptionOptionsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigServerEncryptionOptionsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigServerEncryptionOptionsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigShadowRoundMs The maximum gossip shadow round time. Can be used to reduce the gossip feature check time during node boot up. +*/ +func (a *Client) FindConfigShadowRoundMs(params *FindConfigShadowRoundMsParams) (*FindConfigShadowRoundMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigShadowRoundMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_shadow_round_ms", + Method: "GET", + PathPattern: "/config/shadow_round_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigShadowRoundMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigShadowRoundMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigShadowRoundMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigShutdownAnnounceInMs Time a node waits after sending gossip shutdown message in milliseconds. Same as -Dcassandra.shutdown_announce_in_ms in cassandra. +*/ +func (a *Client) FindConfigShutdownAnnounceInMs(params *FindConfigShutdownAnnounceInMsParams) (*FindConfigShutdownAnnounceInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigShutdownAnnounceInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_shutdown_announce_in_ms", + Method: "GET", + PathPattern: "/config/shutdown_announce_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigShutdownAnnounceInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigShutdownAnnounceInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigShutdownAnnounceInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigSkipWaitForGossipToSettle An integer to configure the wait for gossip to settle. -1: wait normally, 0: do not wait at all, n: wait for at most n polls. Same as -Dcassandra.skip_wait_for_gossip_to_settle in cassandra. +*/ +func (a *Client) FindConfigSkipWaitForGossipToSettle(params *FindConfigSkipWaitForGossipToSettleParams) (*FindConfigSkipWaitForGossipToSettleOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigSkipWaitForGossipToSettleParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_skip_wait_for_gossip_to_settle", + Method: "GET", + PathPattern: "/config/skip_wait_for_gossip_to_settle", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigSkipWaitForGossipToSettleReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigSkipWaitForGossipToSettleOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigSkipWaitForGossipToSettleDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigSnapshotBeforeCompaction Enable or disable taking a snapshot before each compaction. This option is useful to back up data when there is a data format change. Be careful using this option because Cassandra does not clean up older snapshots automatically. + +Related information: Configuring compaction +*/ +func (a *Client) FindConfigSnapshotBeforeCompaction(params *FindConfigSnapshotBeforeCompactionParams) (*FindConfigSnapshotBeforeCompactionOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigSnapshotBeforeCompactionParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_snapshot_before_compaction", + Method: "GET", + PathPattern: "/config/snapshot_before_compaction", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigSnapshotBeforeCompactionReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigSnapshotBeforeCompactionOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigSnapshotBeforeCompactionDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigSslStoragePort The SSL port for encrypted communication. Unused unless enabled in encryption_options. +*/ +func (a *Client) FindConfigSslStoragePort(params *FindConfigSslStoragePortParams) (*FindConfigSslStoragePortOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigSslStoragePortParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_ssl_storage_port", + Method: "GET", + PathPattern: "/config/ssl_storage_port", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigSslStoragePortReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigSslStoragePortOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigSslStoragePortDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigSstablePreemptiveOpenIntervalInMb When compacting, the replacement opens SSTables before they are completely written and uses in place of the prior SSTables for any range previously written. This setting helps to smoothly transfer reads between the SSTables by reducing page cache churn and keeps hot rows hot. +*/ +func (a *Client) FindConfigSstablePreemptiveOpenIntervalInMb(params *FindConfigSstablePreemptiveOpenIntervalInMbParams) (*FindConfigSstablePreemptiveOpenIntervalInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigSstablePreemptiveOpenIntervalInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_sstable_preemptive_open_interval_in_mb", + Method: "GET", + PathPattern: "/config/sstable_preemptive_open_interval_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigSstablePreemptiveOpenIntervalInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigSstablePreemptiveOpenIntervalInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigSstablePreemptiveOpenIntervalInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigSstableSummaryRatio Enforces that 1 byte of summary is written for every N (2000 by default) bytes written to data file. Value must be between 0 and 1. +*/ +func (a *Client) FindConfigSstableSummaryRatio(params *FindConfigSstableSummaryRatioParams) (*FindConfigSstableSummaryRatioOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigSstableSummaryRatioParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_sstable_summary_ratio", + Method: "GET", + PathPattern: "/config/sstable_summary_ratio", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigSstableSummaryRatioReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigSstableSummaryRatioOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigSstableSummaryRatioDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigStartNativeTransport Enable or disable the native transport server. Uses the same address as the rpc_address, but the port is different from the rpc_port. See native_transport_port. +*/ +func (a *Client) FindConfigStartNativeTransport(params *FindConfigStartNativeTransportParams) (*FindConfigStartNativeTransportOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigStartNativeTransportParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_start_native_transport", + Method: "GET", + PathPattern: "/config/start_native_transport", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigStartNativeTransportReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigStartNativeTransportOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigStartNativeTransportDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigStartRPC Starts the Thrift RPC server +*/ +func (a *Client) FindConfigStartRPC(params *FindConfigStartRPCParams) (*FindConfigStartRPCOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigStartRPCParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_start_rpc", + Method: "GET", + PathPattern: "/config/start_rpc", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigStartRPCReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigStartRPCOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigStartRPCDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigStoragePort The port for inter-node communication. +*/ +func (a *Client) FindConfigStoragePort(params *FindConfigStoragePortParams) (*FindConfigStoragePortOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigStoragePortParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_storage_port", + Method: "GET", + PathPattern: "/config/storage_port", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigStoragePortReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigStoragePortOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigStoragePortDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigStreamThroughputOutboundMegabitsPerSec Throttles all outbound streaming file transfers on a node to the specified throughput. Cassandra does mostly sequential I/O when streaming data during bootstrap or repair, which can lead to saturating the network connection and degrading client (RPC) performance. +*/ +func (a *Client) FindConfigStreamThroughputOutboundMegabitsPerSec(params *FindConfigStreamThroughputOutboundMegabitsPerSecParams) (*FindConfigStreamThroughputOutboundMegabitsPerSecOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigStreamThroughputOutboundMegabitsPerSecParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_stream_throughput_outbound_megabits_per_sec", + Method: "GET", + PathPattern: "/config/stream_throughput_outbound_megabits_per_sec", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigStreamThroughputOutboundMegabitsPerSecReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigStreamThroughputOutboundMegabitsPerSecOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigStreamThroughputOutboundMegabitsPerSecDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigStreamingSocketTimeoutInMs Enable or disable socket timeout for streaming operations. When a timeout occurs during streaming, streaming is retried from the start of the current file. Avoid setting this value too low, as it can result in a significant amount of data re-streaming. +*/ +func (a *Client) FindConfigStreamingSocketTimeoutInMs(params *FindConfigStreamingSocketTimeoutInMsParams) (*FindConfigStreamingSocketTimeoutInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigStreamingSocketTimeoutInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_streaming_socket_timeout_in_ms", + Method: "GET", + PathPattern: "/config/streaming_socket_timeout_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigStreamingSocketTimeoutInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigStreamingSocketTimeoutInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigStreamingSocketTimeoutInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigThriftFramedTransportSizeInMb Frame size (maximum field length) for Thrift. The frame is the row or part of the row the application is inserting. +*/ +func (a *Client) FindConfigThriftFramedTransportSizeInMb(params *FindConfigThriftFramedTransportSizeInMbParams) (*FindConfigThriftFramedTransportSizeInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigThriftFramedTransportSizeInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_thrift_framed_transport_size_in_mb", + Method: "GET", + PathPattern: "/config/thrift_framed_transport_size_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigThriftFramedTransportSizeInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigThriftFramedTransportSizeInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigThriftFramedTransportSizeInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigThriftMaxMessageLengthInMb The maximum length of a Thrift message in megabytes, including all fields and internal Thrift overhead (1 byte of overhead for each frame). Message length is usually used in conjunction with batches. A frame length greater than or equal to 24 accommodates a batch with four inserts, each of which is 24 bytes. The required message length is greater than or equal to 24+24+24+24+4 (number of frames). +*/ +func (a *Client) FindConfigThriftMaxMessageLengthInMb(params *FindConfigThriftMaxMessageLengthInMbParams) (*FindConfigThriftMaxMessageLengthInMbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigThriftMaxMessageLengthInMbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_thrift_max_message_length_in_mb", + Method: "GET", + PathPattern: "/config/thrift_max_message_length_in_mb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigThriftMaxMessageLengthInMbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigThriftMaxMessageLengthInMbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigThriftMaxMessageLengthInMbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigTombstoneFailureThreshold The maximum number of tombstones a query can scan before aborting. +*/ +func (a *Client) FindConfigTombstoneFailureThreshold(params *FindConfigTombstoneFailureThresholdParams) (*FindConfigTombstoneFailureThresholdOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigTombstoneFailureThresholdParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_tombstone_failure_threshold", + Method: "GET", + PathPattern: "/config/tombstone_failure_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigTombstoneFailureThresholdReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigTombstoneFailureThresholdOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigTombstoneFailureThresholdDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigTombstoneWarnThreshold The maximum number of tombstones a query can scan before warning. +*/ +func (a *Client) FindConfigTombstoneWarnThreshold(params *FindConfigTombstoneWarnThresholdParams) (*FindConfigTombstoneWarnThresholdOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigTombstoneWarnThresholdParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_tombstone_warn_threshold", + Method: "GET", + PathPattern: "/config/tombstone_warn_threshold", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigTombstoneWarnThresholdReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigTombstoneWarnThresholdOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigTombstoneWarnThresholdDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigTrickleFsync When doing sequential writing, enabling this option tells fsync to force the operating system to flush the dirty buffers at a set interval trickle_fsync_interval_in_kb. Enable this parameter to avoid sudden dirty buffer flushing from impacting read latencies. Recommended to use on SSDs, but not on HDDs. +*/ +func (a *Client) FindConfigTrickleFsync(params *FindConfigTrickleFsyncParams) (*FindConfigTrickleFsyncOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigTrickleFsyncParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_trickle_fsync", + Method: "GET", + PathPattern: "/config/trickle_fsync", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigTrickleFsyncReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigTrickleFsyncOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigTrickleFsyncDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigTrickleFsyncIntervalInKb Sets the size of the fsync in kilobytes. +*/ +func (a *Client) FindConfigTrickleFsyncIntervalInKb(params *FindConfigTrickleFsyncIntervalInKbParams) (*FindConfigTrickleFsyncIntervalInKbOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigTrickleFsyncIntervalInKbParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_trickle_fsync_interval_in_kb", + Method: "GET", + PathPattern: "/config/trickle_fsync_interval_in_kb", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigTrickleFsyncIntervalInKbReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigTrickleFsyncIntervalInKbOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigTrickleFsyncIntervalInKbDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigTruncateRequestTimeoutInMs The time that the coordinator waits for truncates (remove all data from a table) to complete. The long default value allows for a snapshot to be taken before removing the data. If auto_snapshot is disabled (not recommended), you can reduce this time. +*/ +func (a *Client) FindConfigTruncateRequestTimeoutInMs(params *FindConfigTruncateRequestTimeoutInMsParams) (*FindConfigTruncateRequestTimeoutInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigTruncateRequestTimeoutInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_truncate_request_timeout_in_ms", + Method: "GET", + PathPattern: "/config/truncate_request_timeout_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigTruncateRequestTimeoutInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigTruncateRequestTimeoutInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigTruncateRequestTimeoutInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigUUIDSstableIdentifiersEnabled Return true if node is using uuid sstable format: me-3g7k_098r_4wtqo2asamoc1i8h9n-big-CompressionInfo.db +*/ +func (a *Client) FindConfigUUIDSstableIdentifiersEnabled(params *FindConfigUUIDSstableIdentifiersEnabledParams) (*FindConfigUUIDSstableIdentifiersEnabledOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigUUIDSstableIdentifiersEnabledParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_uuid_sstable_identifiers_enabled", + Method: "GET", + PathPattern: "/config/uuid_sstable_identifiers_enabled", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigUUIDSstableIdentifiersEnabledReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigUUIDSstableIdentifiersEnabledOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigUUIDSstableIdentifiersEnabledDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigViewBuilding Enable view building; should only be set to false when the node is experience issues due to view building +*/ +func (a *Client) FindConfigViewBuilding(params *FindConfigViewBuildingParams) (*FindConfigViewBuildingOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigViewBuildingParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_view_building", + Method: "GET", + PathPattern: "/config/view_building", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigViewBuildingReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigViewBuildingOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigViewBuildingDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigViewHintsDirectory The directory where materialized-view updates are stored while a view replica is unreachable. +*/ +func (a *Client) FindConfigViewHintsDirectory(params *FindConfigViewHintsDirectoryParams) (*FindConfigViewHintsDirectoryOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigViewHintsDirectoryParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_view_hints_directory", + Method: "GET", + PathPattern: "/config/view_hints_directory", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigViewHintsDirectoryReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigViewHintsDirectoryOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigViewHintsDirectoryDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigVirtualDirtySoftLimit Soft limit of virtual dirty memory expressed as a portion of the hard limit +*/ +func (a *Client) FindConfigVirtualDirtySoftLimit(params *FindConfigVirtualDirtySoftLimitParams) (*FindConfigVirtualDirtySoftLimitOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigVirtualDirtySoftLimitParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_virtual_dirty_soft_limit", + Method: "GET", + PathPattern: "/config/virtual_dirty_soft_limit", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigVirtualDirtySoftLimitReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigVirtualDirtySoftLimitOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigVirtualDirtySoftLimitDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* +FindConfigVolatileSystemKeyspaceForTesting Don't persist system keyspace - testing only! +*/ +func (a *Client) FindConfigVolatileSystemKeyspaceForTesting(params *FindConfigVolatileSystemKeyspaceForTestingParams) (*FindConfigVolatileSystemKeyspaceForTestingOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigVolatileSystemKeyspaceForTestingParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_volatile_system_keyspace_for_testing", + Method: "GET", + PathPattern: "/config/volatile_system_keyspace_for_testing", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigVolatileSystemKeyspaceForTestingReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigVolatileSystemKeyspaceForTestingOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigVolatileSystemKeyspaceForTestingDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +/* + FindConfigWriteRequestTimeoutInMs The time in milliseconds that the coordinator waits for write operations to complete. + +Related information: About hinted handoff writes +*/ +func (a *Client) FindConfigWriteRequestTimeoutInMs(params *FindConfigWriteRequestTimeoutInMsParams) (*FindConfigWriteRequestTimeoutInMsOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewFindConfigWriteRequestTimeoutInMsParams() + } + + result, err := a.transport.Submit(&runtime.ClientOperation{ + ID: "find_config_write_request_timeout_in_ms", + Method: "GET", + PathPattern: "/config/write_request_timeout_in_ms", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &FindConfigWriteRequestTimeoutInMsReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + }) + if err != nil { + return nil, err + } + success, ok := result.(*FindConfigWriteRequestTimeoutInMsOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*FindConfigWriteRequestTimeoutInMsDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + +// SetTransport changes the transport on the client +func (a *Client) SetTransport(transport runtime.ClientTransport) { + a.transport = transport +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_abort_on_lsa_bad_alloc_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_abort_on_lsa_bad_alloc_parameters.go new file mode 100644 index 00000000000..77ca653a461 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_abort_on_lsa_bad_alloc_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAbortOnLsaBadAllocParams creates a new FindConfigAbortOnLsaBadAllocParams object +// with the default values initialized. +func NewFindConfigAbortOnLsaBadAllocParams() *FindConfigAbortOnLsaBadAllocParams { + + return &FindConfigAbortOnLsaBadAllocParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAbortOnLsaBadAllocParamsWithTimeout creates a new FindConfigAbortOnLsaBadAllocParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAbortOnLsaBadAllocParamsWithTimeout(timeout time.Duration) *FindConfigAbortOnLsaBadAllocParams { + + return &FindConfigAbortOnLsaBadAllocParams{ + + timeout: timeout, + } +} + +// NewFindConfigAbortOnLsaBadAllocParamsWithContext creates a new FindConfigAbortOnLsaBadAllocParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAbortOnLsaBadAllocParamsWithContext(ctx context.Context) *FindConfigAbortOnLsaBadAllocParams { + + return &FindConfigAbortOnLsaBadAllocParams{ + + Context: ctx, + } +} + +// NewFindConfigAbortOnLsaBadAllocParamsWithHTTPClient creates a new FindConfigAbortOnLsaBadAllocParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAbortOnLsaBadAllocParamsWithHTTPClient(client *http.Client) *FindConfigAbortOnLsaBadAllocParams { + + return &FindConfigAbortOnLsaBadAllocParams{ + HTTPClient: client, + } +} + +/* +FindConfigAbortOnLsaBadAllocParams contains all the parameters to send to the API endpoint +for the find config abort on lsa bad alloc operation typically these are written to a http.Request +*/ +type FindConfigAbortOnLsaBadAllocParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config abort on lsa bad alloc params +func (o *FindConfigAbortOnLsaBadAllocParams) WithTimeout(timeout time.Duration) *FindConfigAbortOnLsaBadAllocParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config abort on lsa bad alloc params +func (o *FindConfigAbortOnLsaBadAllocParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config abort on lsa bad alloc params +func (o *FindConfigAbortOnLsaBadAllocParams) WithContext(ctx context.Context) *FindConfigAbortOnLsaBadAllocParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config abort on lsa bad alloc params +func (o *FindConfigAbortOnLsaBadAllocParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config abort on lsa bad alloc params +func (o *FindConfigAbortOnLsaBadAllocParams) WithHTTPClient(client *http.Client) *FindConfigAbortOnLsaBadAllocParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config abort on lsa bad alloc params +func (o *FindConfigAbortOnLsaBadAllocParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAbortOnLsaBadAllocParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_abort_on_lsa_bad_alloc_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_abort_on_lsa_bad_alloc_responses.go new file mode 100644 index 00000000000..9f6330fbdd5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_abort_on_lsa_bad_alloc_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAbortOnLsaBadAllocReader is a Reader for the FindConfigAbortOnLsaBadAlloc structure. +type FindConfigAbortOnLsaBadAllocReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAbortOnLsaBadAllocReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAbortOnLsaBadAllocOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAbortOnLsaBadAllocDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAbortOnLsaBadAllocOK creates a FindConfigAbortOnLsaBadAllocOK with default headers values +func NewFindConfigAbortOnLsaBadAllocOK() *FindConfigAbortOnLsaBadAllocOK { + return &FindConfigAbortOnLsaBadAllocOK{} +} + +/* +FindConfigAbortOnLsaBadAllocOK handles this case with default header values. + +Config value +*/ +type FindConfigAbortOnLsaBadAllocOK struct { + Payload bool +} + +func (o *FindConfigAbortOnLsaBadAllocOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigAbortOnLsaBadAllocOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAbortOnLsaBadAllocDefault creates a FindConfigAbortOnLsaBadAllocDefault with default headers values +func NewFindConfigAbortOnLsaBadAllocDefault(code int) *FindConfigAbortOnLsaBadAllocDefault { + return &FindConfigAbortOnLsaBadAllocDefault{ + _statusCode: code, + } +} + +/* +FindConfigAbortOnLsaBadAllocDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAbortOnLsaBadAllocDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config abort on lsa bad alloc default response +func (o *FindConfigAbortOnLsaBadAllocDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAbortOnLsaBadAllocDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAbortOnLsaBadAllocDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAbortOnLsaBadAllocDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_address_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_address_parameters.go new file mode 100644 index 00000000000..3f1278acd6d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_address_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAlternatorAddressParams creates a new FindConfigAlternatorAddressParams object +// with the default values initialized. +func NewFindConfigAlternatorAddressParams() *FindConfigAlternatorAddressParams { + + return &FindConfigAlternatorAddressParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAlternatorAddressParamsWithTimeout creates a new FindConfigAlternatorAddressParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAlternatorAddressParamsWithTimeout(timeout time.Duration) *FindConfigAlternatorAddressParams { + + return &FindConfigAlternatorAddressParams{ + + timeout: timeout, + } +} + +// NewFindConfigAlternatorAddressParamsWithContext creates a new FindConfigAlternatorAddressParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAlternatorAddressParamsWithContext(ctx context.Context) *FindConfigAlternatorAddressParams { + + return &FindConfigAlternatorAddressParams{ + + Context: ctx, + } +} + +// NewFindConfigAlternatorAddressParamsWithHTTPClient creates a new FindConfigAlternatorAddressParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAlternatorAddressParamsWithHTTPClient(client *http.Client) *FindConfigAlternatorAddressParams { + + return &FindConfigAlternatorAddressParams{ + HTTPClient: client, + } +} + +/* +FindConfigAlternatorAddressParams contains all the parameters to send to the API endpoint +for the find config alternator address operation typically these are written to a http.Request +*/ +type FindConfigAlternatorAddressParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config alternator address params +func (o *FindConfigAlternatorAddressParams) WithTimeout(timeout time.Duration) *FindConfigAlternatorAddressParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config alternator address params +func (o *FindConfigAlternatorAddressParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config alternator address params +func (o *FindConfigAlternatorAddressParams) WithContext(ctx context.Context) *FindConfigAlternatorAddressParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config alternator address params +func (o *FindConfigAlternatorAddressParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config alternator address params +func (o *FindConfigAlternatorAddressParams) WithHTTPClient(client *http.Client) *FindConfigAlternatorAddressParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config alternator address params +func (o *FindConfigAlternatorAddressParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAlternatorAddressParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_address_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_address_responses.go new file mode 100644 index 00000000000..d61b6dbd7f8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_address_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAlternatorAddressReader is a Reader for the FindConfigAlternatorAddress structure. +type FindConfigAlternatorAddressReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAlternatorAddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAlternatorAddressOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAlternatorAddressDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAlternatorAddressOK creates a FindConfigAlternatorAddressOK with default headers values +func NewFindConfigAlternatorAddressOK() *FindConfigAlternatorAddressOK { + return &FindConfigAlternatorAddressOK{} +} + +/* +FindConfigAlternatorAddressOK handles this case with default header values. + +Config value +*/ +type FindConfigAlternatorAddressOK struct { + Payload string +} + +func (o *FindConfigAlternatorAddressOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigAlternatorAddressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAlternatorAddressDefault creates a FindConfigAlternatorAddressDefault with default headers values +func NewFindConfigAlternatorAddressDefault(code int) *FindConfigAlternatorAddressDefault { + return &FindConfigAlternatorAddressDefault{ + _statusCode: code, + } +} + +/* +FindConfigAlternatorAddressDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAlternatorAddressDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config alternator address default response +func (o *FindConfigAlternatorAddressDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAlternatorAddressDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAlternatorAddressDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAlternatorAddressDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_enforce_authorization_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_enforce_authorization_parameters.go new file mode 100644 index 00000000000..f82d5919e85 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_enforce_authorization_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAlternatorEnforceAuthorizationParams creates a new FindConfigAlternatorEnforceAuthorizationParams object +// with the default values initialized. +func NewFindConfigAlternatorEnforceAuthorizationParams() *FindConfigAlternatorEnforceAuthorizationParams { + + return &FindConfigAlternatorEnforceAuthorizationParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAlternatorEnforceAuthorizationParamsWithTimeout creates a new FindConfigAlternatorEnforceAuthorizationParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAlternatorEnforceAuthorizationParamsWithTimeout(timeout time.Duration) *FindConfigAlternatorEnforceAuthorizationParams { + + return &FindConfigAlternatorEnforceAuthorizationParams{ + + timeout: timeout, + } +} + +// NewFindConfigAlternatorEnforceAuthorizationParamsWithContext creates a new FindConfigAlternatorEnforceAuthorizationParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAlternatorEnforceAuthorizationParamsWithContext(ctx context.Context) *FindConfigAlternatorEnforceAuthorizationParams { + + return &FindConfigAlternatorEnforceAuthorizationParams{ + + Context: ctx, + } +} + +// NewFindConfigAlternatorEnforceAuthorizationParamsWithHTTPClient creates a new FindConfigAlternatorEnforceAuthorizationParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAlternatorEnforceAuthorizationParamsWithHTTPClient(client *http.Client) *FindConfigAlternatorEnforceAuthorizationParams { + + return &FindConfigAlternatorEnforceAuthorizationParams{ + HTTPClient: client, + } +} + +/* +FindConfigAlternatorEnforceAuthorizationParams contains all the parameters to send to the API endpoint +for the find config alternator enforce authorization operation typically these are written to a http.Request +*/ +type FindConfigAlternatorEnforceAuthorizationParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config alternator enforce authorization params +func (o *FindConfigAlternatorEnforceAuthorizationParams) WithTimeout(timeout time.Duration) *FindConfigAlternatorEnforceAuthorizationParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config alternator enforce authorization params +func (o *FindConfigAlternatorEnforceAuthorizationParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config alternator enforce authorization params +func (o *FindConfigAlternatorEnforceAuthorizationParams) WithContext(ctx context.Context) *FindConfigAlternatorEnforceAuthorizationParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config alternator enforce authorization params +func (o *FindConfigAlternatorEnforceAuthorizationParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config alternator enforce authorization params +func (o *FindConfigAlternatorEnforceAuthorizationParams) WithHTTPClient(client *http.Client) *FindConfigAlternatorEnforceAuthorizationParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config alternator enforce authorization params +func (o *FindConfigAlternatorEnforceAuthorizationParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAlternatorEnforceAuthorizationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_enforce_authorization_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_enforce_authorization_responses.go new file mode 100644 index 00000000000..e484c42e2ec --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_enforce_authorization_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAlternatorEnforceAuthorizationReader is a Reader for the FindConfigAlternatorEnforceAuthorization structure. +type FindConfigAlternatorEnforceAuthorizationReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAlternatorEnforceAuthorizationReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAlternatorEnforceAuthorizationOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAlternatorEnforceAuthorizationDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAlternatorEnforceAuthorizationOK creates a FindConfigAlternatorEnforceAuthorizationOK with default headers values +func NewFindConfigAlternatorEnforceAuthorizationOK() *FindConfigAlternatorEnforceAuthorizationOK { + return &FindConfigAlternatorEnforceAuthorizationOK{} +} + +/* +FindConfigAlternatorEnforceAuthorizationOK handles this case with default header values. + +Config value +*/ +type FindConfigAlternatorEnforceAuthorizationOK struct { + Payload bool +} + +func (o *FindConfigAlternatorEnforceAuthorizationOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigAlternatorEnforceAuthorizationOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAlternatorEnforceAuthorizationDefault creates a FindConfigAlternatorEnforceAuthorizationDefault with default headers values +func NewFindConfigAlternatorEnforceAuthorizationDefault(code int) *FindConfigAlternatorEnforceAuthorizationDefault { + return &FindConfigAlternatorEnforceAuthorizationDefault{ + _statusCode: code, + } +} + +/* +FindConfigAlternatorEnforceAuthorizationDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAlternatorEnforceAuthorizationDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config alternator enforce authorization default response +func (o *FindConfigAlternatorEnforceAuthorizationDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAlternatorEnforceAuthorizationDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAlternatorEnforceAuthorizationDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAlternatorEnforceAuthorizationDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_https_port_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_https_port_parameters.go new file mode 100644 index 00000000000..66b3525730f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_https_port_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAlternatorHTTPSPortParams creates a new FindConfigAlternatorHTTPSPortParams object +// with the default values initialized. +func NewFindConfigAlternatorHTTPSPortParams() *FindConfigAlternatorHTTPSPortParams { + + return &FindConfigAlternatorHTTPSPortParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAlternatorHTTPSPortParamsWithTimeout creates a new FindConfigAlternatorHTTPSPortParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAlternatorHTTPSPortParamsWithTimeout(timeout time.Duration) *FindConfigAlternatorHTTPSPortParams { + + return &FindConfigAlternatorHTTPSPortParams{ + + timeout: timeout, + } +} + +// NewFindConfigAlternatorHTTPSPortParamsWithContext creates a new FindConfigAlternatorHTTPSPortParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAlternatorHTTPSPortParamsWithContext(ctx context.Context) *FindConfigAlternatorHTTPSPortParams { + + return &FindConfigAlternatorHTTPSPortParams{ + + Context: ctx, + } +} + +// NewFindConfigAlternatorHTTPSPortParamsWithHTTPClient creates a new FindConfigAlternatorHTTPSPortParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAlternatorHTTPSPortParamsWithHTTPClient(client *http.Client) *FindConfigAlternatorHTTPSPortParams { + + return &FindConfigAlternatorHTTPSPortParams{ + HTTPClient: client, + } +} + +/* +FindConfigAlternatorHTTPSPortParams contains all the parameters to send to the API endpoint +for the find config alternator https port operation typically these are written to a http.Request +*/ +type FindConfigAlternatorHTTPSPortParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config alternator https port params +func (o *FindConfigAlternatorHTTPSPortParams) WithTimeout(timeout time.Duration) *FindConfigAlternatorHTTPSPortParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config alternator https port params +func (o *FindConfigAlternatorHTTPSPortParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config alternator https port params +func (o *FindConfigAlternatorHTTPSPortParams) WithContext(ctx context.Context) *FindConfigAlternatorHTTPSPortParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config alternator https port params +func (o *FindConfigAlternatorHTTPSPortParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config alternator https port params +func (o *FindConfigAlternatorHTTPSPortParams) WithHTTPClient(client *http.Client) *FindConfigAlternatorHTTPSPortParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config alternator https port params +func (o *FindConfigAlternatorHTTPSPortParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAlternatorHTTPSPortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_https_port_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_https_port_responses.go new file mode 100644 index 00000000000..7019a6455b3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_https_port_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAlternatorHTTPSPortReader is a Reader for the FindConfigAlternatorHTTPSPort structure. +type FindConfigAlternatorHTTPSPortReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAlternatorHTTPSPortReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAlternatorHTTPSPortOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAlternatorHTTPSPortDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAlternatorHTTPSPortOK creates a FindConfigAlternatorHTTPSPortOK with default headers values +func NewFindConfigAlternatorHTTPSPortOK() *FindConfigAlternatorHTTPSPortOK { + return &FindConfigAlternatorHTTPSPortOK{} +} + +/* +FindConfigAlternatorHTTPSPortOK handles this case with default header values. + +Config value +*/ +type FindConfigAlternatorHTTPSPortOK struct { + Payload int64 +} + +func (o *FindConfigAlternatorHTTPSPortOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigAlternatorHTTPSPortOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAlternatorHTTPSPortDefault creates a FindConfigAlternatorHTTPSPortDefault with default headers values +func NewFindConfigAlternatorHTTPSPortDefault(code int) *FindConfigAlternatorHTTPSPortDefault { + return &FindConfigAlternatorHTTPSPortDefault{ + _statusCode: code, + } +} + +/* +FindConfigAlternatorHTTPSPortDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAlternatorHTTPSPortDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config alternator https port default response +func (o *FindConfigAlternatorHTTPSPortDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAlternatorHTTPSPortDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAlternatorHTTPSPortDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAlternatorHTTPSPortDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_port_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_port_parameters.go new file mode 100644 index 00000000000..5cf22903deb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_port_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAlternatorPortParams creates a new FindConfigAlternatorPortParams object +// with the default values initialized. +func NewFindConfigAlternatorPortParams() *FindConfigAlternatorPortParams { + + return &FindConfigAlternatorPortParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAlternatorPortParamsWithTimeout creates a new FindConfigAlternatorPortParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAlternatorPortParamsWithTimeout(timeout time.Duration) *FindConfigAlternatorPortParams { + + return &FindConfigAlternatorPortParams{ + + timeout: timeout, + } +} + +// NewFindConfigAlternatorPortParamsWithContext creates a new FindConfigAlternatorPortParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAlternatorPortParamsWithContext(ctx context.Context) *FindConfigAlternatorPortParams { + + return &FindConfigAlternatorPortParams{ + + Context: ctx, + } +} + +// NewFindConfigAlternatorPortParamsWithHTTPClient creates a new FindConfigAlternatorPortParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAlternatorPortParamsWithHTTPClient(client *http.Client) *FindConfigAlternatorPortParams { + + return &FindConfigAlternatorPortParams{ + HTTPClient: client, + } +} + +/* +FindConfigAlternatorPortParams contains all the parameters to send to the API endpoint +for the find config alternator port operation typically these are written to a http.Request +*/ +type FindConfigAlternatorPortParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config alternator port params +func (o *FindConfigAlternatorPortParams) WithTimeout(timeout time.Duration) *FindConfigAlternatorPortParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config alternator port params +func (o *FindConfigAlternatorPortParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config alternator port params +func (o *FindConfigAlternatorPortParams) WithContext(ctx context.Context) *FindConfigAlternatorPortParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config alternator port params +func (o *FindConfigAlternatorPortParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config alternator port params +func (o *FindConfigAlternatorPortParams) WithHTTPClient(client *http.Client) *FindConfigAlternatorPortParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config alternator port params +func (o *FindConfigAlternatorPortParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAlternatorPortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_port_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_port_responses.go new file mode 100644 index 00000000000..2484fcbb42f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_alternator_port_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAlternatorPortReader is a Reader for the FindConfigAlternatorPort structure. +type FindConfigAlternatorPortReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAlternatorPortReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAlternatorPortOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAlternatorPortDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAlternatorPortOK creates a FindConfigAlternatorPortOK with default headers values +func NewFindConfigAlternatorPortOK() *FindConfigAlternatorPortOK { + return &FindConfigAlternatorPortOK{} +} + +/* +FindConfigAlternatorPortOK handles this case with default header values. + +Config value +*/ +type FindConfigAlternatorPortOK struct { + Payload int64 +} + +func (o *FindConfigAlternatorPortOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigAlternatorPortOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAlternatorPortDefault creates a FindConfigAlternatorPortDefault with default headers values +func NewFindConfigAlternatorPortDefault(code int) *FindConfigAlternatorPortDefault { + return &FindConfigAlternatorPortDefault{ + _statusCode: code, + } +} + +/* +FindConfigAlternatorPortDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAlternatorPortDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config alternator port default response +func (o *FindConfigAlternatorPortDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAlternatorPortDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAlternatorPortDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAlternatorPortDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_address_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_address_parameters.go new file mode 100644 index 00000000000..b9cf9d7971f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_address_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAPIAddressParams creates a new FindConfigAPIAddressParams object +// with the default values initialized. +func NewFindConfigAPIAddressParams() *FindConfigAPIAddressParams { + + return &FindConfigAPIAddressParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAPIAddressParamsWithTimeout creates a new FindConfigAPIAddressParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAPIAddressParamsWithTimeout(timeout time.Duration) *FindConfigAPIAddressParams { + + return &FindConfigAPIAddressParams{ + + timeout: timeout, + } +} + +// NewFindConfigAPIAddressParamsWithContext creates a new FindConfigAPIAddressParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAPIAddressParamsWithContext(ctx context.Context) *FindConfigAPIAddressParams { + + return &FindConfigAPIAddressParams{ + + Context: ctx, + } +} + +// NewFindConfigAPIAddressParamsWithHTTPClient creates a new FindConfigAPIAddressParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAPIAddressParamsWithHTTPClient(client *http.Client) *FindConfigAPIAddressParams { + + return &FindConfigAPIAddressParams{ + HTTPClient: client, + } +} + +/* +FindConfigAPIAddressParams contains all the parameters to send to the API endpoint +for the find config api address operation typically these are written to a http.Request +*/ +type FindConfigAPIAddressParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config api address params +func (o *FindConfigAPIAddressParams) WithTimeout(timeout time.Duration) *FindConfigAPIAddressParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config api address params +func (o *FindConfigAPIAddressParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config api address params +func (o *FindConfigAPIAddressParams) WithContext(ctx context.Context) *FindConfigAPIAddressParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config api address params +func (o *FindConfigAPIAddressParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config api address params +func (o *FindConfigAPIAddressParams) WithHTTPClient(client *http.Client) *FindConfigAPIAddressParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config api address params +func (o *FindConfigAPIAddressParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAPIAddressParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_address_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_address_responses.go new file mode 100644 index 00000000000..bc9c0db62b2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_address_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAPIAddressReader is a Reader for the FindConfigAPIAddress structure. +type FindConfigAPIAddressReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAPIAddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAPIAddressOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAPIAddressDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAPIAddressOK creates a FindConfigAPIAddressOK with default headers values +func NewFindConfigAPIAddressOK() *FindConfigAPIAddressOK { + return &FindConfigAPIAddressOK{} +} + +/* +FindConfigAPIAddressOK handles this case with default header values. + +Config value +*/ +type FindConfigAPIAddressOK struct { + Payload string +} + +func (o *FindConfigAPIAddressOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigAPIAddressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAPIAddressDefault creates a FindConfigAPIAddressDefault with default headers values +func NewFindConfigAPIAddressDefault(code int) *FindConfigAPIAddressDefault { + return &FindConfigAPIAddressDefault{ + _statusCode: code, + } +} + +/* +FindConfigAPIAddressDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAPIAddressDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config api address default response +func (o *FindConfigAPIAddressDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAPIAddressDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAPIAddressDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAPIAddressDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_doc_dir_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_doc_dir_parameters.go new file mode 100644 index 00000000000..d0780ab6298 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_doc_dir_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAPIDocDirParams creates a new FindConfigAPIDocDirParams object +// with the default values initialized. +func NewFindConfigAPIDocDirParams() *FindConfigAPIDocDirParams { + + return &FindConfigAPIDocDirParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAPIDocDirParamsWithTimeout creates a new FindConfigAPIDocDirParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAPIDocDirParamsWithTimeout(timeout time.Duration) *FindConfigAPIDocDirParams { + + return &FindConfigAPIDocDirParams{ + + timeout: timeout, + } +} + +// NewFindConfigAPIDocDirParamsWithContext creates a new FindConfigAPIDocDirParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAPIDocDirParamsWithContext(ctx context.Context) *FindConfigAPIDocDirParams { + + return &FindConfigAPIDocDirParams{ + + Context: ctx, + } +} + +// NewFindConfigAPIDocDirParamsWithHTTPClient creates a new FindConfigAPIDocDirParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAPIDocDirParamsWithHTTPClient(client *http.Client) *FindConfigAPIDocDirParams { + + return &FindConfigAPIDocDirParams{ + HTTPClient: client, + } +} + +/* +FindConfigAPIDocDirParams contains all the parameters to send to the API endpoint +for the find config api doc dir operation typically these are written to a http.Request +*/ +type FindConfigAPIDocDirParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config api doc dir params +func (o *FindConfigAPIDocDirParams) WithTimeout(timeout time.Duration) *FindConfigAPIDocDirParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config api doc dir params +func (o *FindConfigAPIDocDirParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config api doc dir params +func (o *FindConfigAPIDocDirParams) WithContext(ctx context.Context) *FindConfigAPIDocDirParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config api doc dir params +func (o *FindConfigAPIDocDirParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config api doc dir params +func (o *FindConfigAPIDocDirParams) WithHTTPClient(client *http.Client) *FindConfigAPIDocDirParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config api doc dir params +func (o *FindConfigAPIDocDirParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAPIDocDirParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_doc_dir_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_doc_dir_responses.go new file mode 100644 index 00000000000..e8e69ff3132 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_doc_dir_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAPIDocDirReader is a Reader for the FindConfigAPIDocDir structure. +type FindConfigAPIDocDirReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAPIDocDirReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAPIDocDirOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAPIDocDirDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAPIDocDirOK creates a FindConfigAPIDocDirOK with default headers values +func NewFindConfigAPIDocDirOK() *FindConfigAPIDocDirOK { + return &FindConfigAPIDocDirOK{} +} + +/* +FindConfigAPIDocDirOK handles this case with default header values. + +Config value +*/ +type FindConfigAPIDocDirOK struct { + Payload string +} + +func (o *FindConfigAPIDocDirOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigAPIDocDirOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAPIDocDirDefault creates a FindConfigAPIDocDirDefault with default headers values +func NewFindConfigAPIDocDirDefault(code int) *FindConfigAPIDocDirDefault { + return &FindConfigAPIDocDirDefault{ + _statusCode: code, + } +} + +/* +FindConfigAPIDocDirDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAPIDocDirDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config api doc dir default response +func (o *FindConfigAPIDocDirDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAPIDocDirDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAPIDocDirDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAPIDocDirDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_port_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_port_parameters.go new file mode 100644 index 00000000000..d336f256432 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_port_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAPIPortParams creates a new FindConfigAPIPortParams object +// with the default values initialized. +func NewFindConfigAPIPortParams() *FindConfigAPIPortParams { + + return &FindConfigAPIPortParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAPIPortParamsWithTimeout creates a new FindConfigAPIPortParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAPIPortParamsWithTimeout(timeout time.Duration) *FindConfigAPIPortParams { + + return &FindConfigAPIPortParams{ + + timeout: timeout, + } +} + +// NewFindConfigAPIPortParamsWithContext creates a new FindConfigAPIPortParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAPIPortParamsWithContext(ctx context.Context) *FindConfigAPIPortParams { + + return &FindConfigAPIPortParams{ + + Context: ctx, + } +} + +// NewFindConfigAPIPortParamsWithHTTPClient creates a new FindConfigAPIPortParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAPIPortParamsWithHTTPClient(client *http.Client) *FindConfigAPIPortParams { + + return &FindConfigAPIPortParams{ + HTTPClient: client, + } +} + +/* +FindConfigAPIPortParams contains all the parameters to send to the API endpoint +for the find config api port operation typically these are written to a http.Request +*/ +type FindConfigAPIPortParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config api port params +func (o *FindConfigAPIPortParams) WithTimeout(timeout time.Duration) *FindConfigAPIPortParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config api port params +func (o *FindConfigAPIPortParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config api port params +func (o *FindConfigAPIPortParams) WithContext(ctx context.Context) *FindConfigAPIPortParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config api port params +func (o *FindConfigAPIPortParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config api port params +func (o *FindConfigAPIPortParams) WithHTTPClient(client *http.Client) *FindConfigAPIPortParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config api port params +func (o *FindConfigAPIPortParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAPIPortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_port_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_port_responses.go new file mode 100644 index 00000000000..f71b9522196 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_port_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAPIPortReader is a Reader for the FindConfigAPIPort structure. +type FindConfigAPIPortReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAPIPortReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAPIPortOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAPIPortDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAPIPortOK creates a FindConfigAPIPortOK with default headers values +func NewFindConfigAPIPortOK() *FindConfigAPIPortOK { + return &FindConfigAPIPortOK{} +} + +/* +FindConfigAPIPortOK handles this case with default header values. + +Config value +*/ +type FindConfigAPIPortOK struct { + Payload int64 +} + +func (o *FindConfigAPIPortOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigAPIPortOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAPIPortDefault creates a FindConfigAPIPortDefault with default headers values +func NewFindConfigAPIPortDefault(code int) *FindConfigAPIPortDefault { + return &FindConfigAPIPortDefault{ + _statusCode: code, + } +} + +/* +FindConfigAPIPortDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAPIPortDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config api port default response +func (o *FindConfigAPIPortDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAPIPortDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAPIPortDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAPIPortDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_ui_dir_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_ui_dir_parameters.go new file mode 100644 index 00000000000..5fbb417c3a9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_ui_dir_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAPIUIDirParams creates a new FindConfigAPIUIDirParams object +// with the default values initialized. +func NewFindConfigAPIUIDirParams() *FindConfigAPIUIDirParams { + + return &FindConfigAPIUIDirParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAPIUIDirParamsWithTimeout creates a new FindConfigAPIUIDirParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAPIUIDirParamsWithTimeout(timeout time.Duration) *FindConfigAPIUIDirParams { + + return &FindConfigAPIUIDirParams{ + + timeout: timeout, + } +} + +// NewFindConfigAPIUIDirParamsWithContext creates a new FindConfigAPIUIDirParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAPIUIDirParamsWithContext(ctx context.Context) *FindConfigAPIUIDirParams { + + return &FindConfigAPIUIDirParams{ + + Context: ctx, + } +} + +// NewFindConfigAPIUIDirParamsWithHTTPClient creates a new FindConfigAPIUIDirParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAPIUIDirParamsWithHTTPClient(client *http.Client) *FindConfigAPIUIDirParams { + + return &FindConfigAPIUIDirParams{ + HTTPClient: client, + } +} + +/* +FindConfigAPIUIDirParams contains all the parameters to send to the API endpoint +for the find config api ui dir operation typically these are written to a http.Request +*/ +type FindConfigAPIUIDirParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config api ui dir params +func (o *FindConfigAPIUIDirParams) WithTimeout(timeout time.Duration) *FindConfigAPIUIDirParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config api ui dir params +func (o *FindConfigAPIUIDirParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config api ui dir params +func (o *FindConfigAPIUIDirParams) WithContext(ctx context.Context) *FindConfigAPIUIDirParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config api ui dir params +func (o *FindConfigAPIUIDirParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config api ui dir params +func (o *FindConfigAPIUIDirParams) WithHTTPClient(client *http.Client) *FindConfigAPIUIDirParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config api ui dir params +func (o *FindConfigAPIUIDirParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAPIUIDirParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_ui_dir_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_ui_dir_responses.go new file mode 100644 index 00000000000..2fb22ac96ac --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_api_ui_dir_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAPIUIDirReader is a Reader for the FindConfigAPIUIDir structure. +type FindConfigAPIUIDirReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAPIUIDirReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAPIUIDirOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAPIUIDirDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAPIUIDirOK creates a FindConfigAPIUIDirOK with default headers values +func NewFindConfigAPIUIDirOK() *FindConfigAPIUIDirOK { + return &FindConfigAPIUIDirOK{} +} + +/* +FindConfigAPIUIDirOK handles this case with default header values. + +Config value +*/ +type FindConfigAPIUIDirOK struct { + Payload string +} + +func (o *FindConfigAPIUIDirOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigAPIUIDirOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAPIUIDirDefault creates a FindConfigAPIUIDirDefault with default headers values +func NewFindConfigAPIUIDirDefault(code int) *FindConfigAPIUIDirDefault { + return &FindConfigAPIUIDirDefault{ + _statusCode: code, + } +} + +/* +FindConfigAPIUIDirDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAPIUIDirDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config api ui dir default response +func (o *FindConfigAPIUIDirDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAPIUIDirDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAPIUIDirDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAPIUIDirDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authenticator_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authenticator_parameters.go new file mode 100644 index 00000000000..9778189004a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authenticator_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAuthenticatorParams creates a new FindConfigAuthenticatorParams object +// with the default values initialized. +func NewFindConfigAuthenticatorParams() *FindConfigAuthenticatorParams { + + return &FindConfigAuthenticatorParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAuthenticatorParamsWithTimeout creates a new FindConfigAuthenticatorParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAuthenticatorParamsWithTimeout(timeout time.Duration) *FindConfigAuthenticatorParams { + + return &FindConfigAuthenticatorParams{ + + timeout: timeout, + } +} + +// NewFindConfigAuthenticatorParamsWithContext creates a new FindConfigAuthenticatorParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAuthenticatorParamsWithContext(ctx context.Context) *FindConfigAuthenticatorParams { + + return &FindConfigAuthenticatorParams{ + + Context: ctx, + } +} + +// NewFindConfigAuthenticatorParamsWithHTTPClient creates a new FindConfigAuthenticatorParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAuthenticatorParamsWithHTTPClient(client *http.Client) *FindConfigAuthenticatorParams { + + return &FindConfigAuthenticatorParams{ + HTTPClient: client, + } +} + +/* +FindConfigAuthenticatorParams contains all the parameters to send to the API endpoint +for the find config authenticator operation typically these are written to a http.Request +*/ +type FindConfigAuthenticatorParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config authenticator params +func (o *FindConfigAuthenticatorParams) WithTimeout(timeout time.Duration) *FindConfigAuthenticatorParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config authenticator params +func (o *FindConfigAuthenticatorParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config authenticator params +func (o *FindConfigAuthenticatorParams) WithContext(ctx context.Context) *FindConfigAuthenticatorParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config authenticator params +func (o *FindConfigAuthenticatorParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config authenticator params +func (o *FindConfigAuthenticatorParams) WithHTTPClient(client *http.Client) *FindConfigAuthenticatorParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config authenticator params +func (o *FindConfigAuthenticatorParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAuthenticatorParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authenticator_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authenticator_responses.go new file mode 100644 index 00000000000..f638bb7206f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authenticator_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAuthenticatorReader is a Reader for the FindConfigAuthenticator structure. +type FindConfigAuthenticatorReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAuthenticatorReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAuthenticatorOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAuthenticatorDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAuthenticatorOK creates a FindConfigAuthenticatorOK with default headers values +func NewFindConfigAuthenticatorOK() *FindConfigAuthenticatorOK { + return &FindConfigAuthenticatorOK{} +} + +/* +FindConfigAuthenticatorOK handles this case with default header values. + +Config value +*/ +type FindConfigAuthenticatorOK struct { + Payload string +} + +func (o *FindConfigAuthenticatorOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigAuthenticatorOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAuthenticatorDefault creates a FindConfigAuthenticatorDefault with default headers values +func NewFindConfigAuthenticatorDefault(code int) *FindConfigAuthenticatorDefault { + return &FindConfigAuthenticatorDefault{ + _statusCode: code, + } +} + +/* +FindConfigAuthenticatorDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAuthenticatorDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config authenticator default response +func (o *FindConfigAuthenticatorDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAuthenticatorDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAuthenticatorDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAuthenticatorDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authorizer_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authorizer_parameters.go new file mode 100644 index 00000000000..0c48493df8e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authorizer_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAuthorizerParams creates a new FindConfigAuthorizerParams object +// with the default values initialized. +func NewFindConfigAuthorizerParams() *FindConfigAuthorizerParams { + + return &FindConfigAuthorizerParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAuthorizerParamsWithTimeout creates a new FindConfigAuthorizerParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAuthorizerParamsWithTimeout(timeout time.Duration) *FindConfigAuthorizerParams { + + return &FindConfigAuthorizerParams{ + + timeout: timeout, + } +} + +// NewFindConfigAuthorizerParamsWithContext creates a new FindConfigAuthorizerParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAuthorizerParamsWithContext(ctx context.Context) *FindConfigAuthorizerParams { + + return &FindConfigAuthorizerParams{ + + Context: ctx, + } +} + +// NewFindConfigAuthorizerParamsWithHTTPClient creates a new FindConfigAuthorizerParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAuthorizerParamsWithHTTPClient(client *http.Client) *FindConfigAuthorizerParams { + + return &FindConfigAuthorizerParams{ + HTTPClient: client, + } +} + +/* +FindConfigAuthorizerParams contains all the parameters to send to the API endpoint +for the find config authorizer operation typically these are written to a http.Request +*/ +type FindConfigAuthorizerParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config authorizer params +func (o *FindConfigAuthorizerParams) WithTimeout(timeout time.Duration) *FindConfigAuthorizerParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config authorizer params +func (o *FindConfigAuthorizerParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config authorizer params +func (o *FindConfigAuthorizerParams) WithContext(ctx context.Context) *FindConfigAuthorizerParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config authorizer params +func (o *FindConfigAuthorizerParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config authorizer params +func (o *FindConfigAuthorizerParams) WithHTTPClient(client *http.Client) *FindConfigAuthorizerParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config authorizer params +func (o *FindConfigAuthorizerParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAuthorizerParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authorizer_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authorizer_responses.go new file mode 100644 index 00000000000..5683020ad3a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_authorizer_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAuthorizerReader is a Reader for the FindConfigAuthorizer structure. +type FindConfigAuthorizerReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAuthorizerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAuthorizerOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAuthorizerDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAuthorizerOK creates a FindConfigAuthorizerOK with default headers values +func NewFindConfigAuthorizerOK() *FindConfigAuthorizerOK { + return &FindConfigAuthorizerOK{} +} + +/* +FindConfigAuthorizerOK handles this case with default header values. + +Config value +*/ +type FindConfigAuthorizerOK struct { + Payload string +} + +func (o *FindConfigAuthorizerOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigAuthorizerOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAuthorizerDefault creates a FindConfigAuthorizerDefault with default headers values +func NewFindConfigAuthorizerDefault(code int) *FindConfigAuthorizerDefault { + return &FindConfigAuthorizerDefault{ + _statusCode: code, + } +} + +/* +FindConfigAuthorizerDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAuthorizerDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config authorizer default response +func (o *FindConfigAuthorizerDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAuthorizerDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAuthorizerDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAuthorizerDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_adjust_flush_quota_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_adjust_flush_quota_parameters.go new file mode 100644 index 00000000000..9f3a02f91e0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_adjust_flush_quota_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAutoAdjustFlushQuotaParams creates a new FindConfigAutoAdjustFlushQuotaParams object +// with the default values initialized. +func NewFindConfigAutoAdjustFlushQuotaParams() *FindConfigAutoAdjustFlushQuotaParams { + + return &FindConfigAutoAdjustFlushQuotaParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAutoAdjustFlushQuotaParamsWithTimeout creates a new FindConfigAutoAdjustFlushQuotaParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAutoAdjustFlushQuotaParamsWithTimeout(timeout time.Duration) *FindConfigAutoAdjustFlushQuotaParams { + + return &FindConfigAutoAdjustFlushQuotaParams{ + + timeout: timeout, + } +} + +// NewFindConfigAutoAdjustFlushQuotaParamsWithContext creates a new FindConfigAutoAdjustFlushQuotaParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAutoAdjustFlushQuotaParamsWithContext(ctx context.Context) *FindConfigAutoAdjustFlushQuotaParams { + + return &FindConfigAutoAdjustFlushQuotaParams{ + + Context: ctx, + } +} + +// NewFindConfigAutoAdjustFlushQuotaParamsWithHTTPClient creates a new FindConfigAutoAdjustFlushQuotaParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAutoAdjustFlushQuotaParamsWithHTTPClient(client *http.Client) *FindConfigAutoAdjustFlushQuotaParams { + + return &FindConfigAutoAdjustFlushQuotaParams{ + HTTPClient: client, + } +} + +/* +FindConfigAutoAdjustFlushQuotaParams contains all the parameters to send to the API endpoint +for the find config auto adjust flush quota operation typically these are written to a http.Request +*/ +type FindConfigAutoAdjustFlushQuotaParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config auto adjust flush quota params +func (o *FindConfigAutoAdjustFlushQuotaParams) WithTimeout(timeout time.Duration) *FindConfigAutoAdjustFlushQuotaParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config auto adjust flush quota params +func (o *FindConfigAutoAdjustFlushQuotaParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config auto adjust flush quota params +func (o *FindConfigAutoAdjustFlushQuotaParams) WithContext(ctx context.Context) *FindConfigAutoAdjustFlushQuotaParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config auto adjust flush quota params +func (o *FindConfigAutoAdjustFlushQuotaParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config auto adjust flush quota params +func (o *FindConfigAutoAdjustFlushQuotaParams) WithHTTPClient(client *http.Client) *FindConfigAutoAdjustFlushQuotaParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config auto adjust flush quota params +func (o *FindConfigAutoAdjustFlushQuotaParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAutoAdjustFlushQuotaParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_adjust_flush_quota_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_adjust_flush_quota_responses.go new file mode 100644 index 00000000000..502efa5c7b9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_adjust_flush_quota_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAutoAdjustFlushQuotaReader is a Reader for the FindConfigAutoAdjustFlushQuota structure. +type FindConfigAutoAdjustFlushQuotaReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAutoAdjustFlushQuotaReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAutoAdjustFlushQuotaOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAutoAdjustFlushQuotaDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAutoAdjustFlushQuotaOK creates a FindConfigAutoAdjustFlushQuotaOK with default headers values +func NewFindConfigAutoAdjustFlushQuotaOK() *FindConfigAutoAdjustFlushQuotaOK { + return &FindConfigAutoAdjustFlushQuotaOK{} +} + +/* +FindConfigAutoAdjustFlushQuotaOK handles this case with default header values. + +Config value +*/ +type FindConfigAutoAdjustFlushQuotaOK struct { + Payload bool +} + +func (o *FindConfigAutoAdjustFlushQuotaOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigAutoAdjustFlushQuotaOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAutoAdjustFlushQuotaDefault creates a FindConfigAutoAdjustFlushQuotaDefault with default headers values +func NewFindConfigAutoAdjustFlushQuotaDefault(code int) *FindConfigAutoAdjustFlushQuotaDefault { + return &FindConfigAutoAdjustFlushQuotaDefault{ + _statusCode: code, + } +} + +/* +FindConfigAutoAdjustFlushQuotaDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAutoAdjustFlushQuotaDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config auto adjust flush quota default response +func (o *FindConfigAutoAdjustFlushQuotaDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAutoAdjustFlushQuotaDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAutoAdjustFlushQuotaDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAutoAdjustFlushQuotaDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_bootstrap_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_bootstrap_parameters.go new file mode 100644 index 00000000000..9d73bd2fbf7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_bootstrap_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAutoBootstrapParams creates a new FindConfigAutoBootstrapParams object +// with the default values initialized. +func NewFindConfigAutoBootstrapParams() *FindConfigAutoBootstrapParams { + + return &FindConfigAutoBootstrapParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAutoBootstrapParamsWithTimeout creates a new FindConfigAutoBootstrapParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAutoBootstrapParamsWithTimeout(timeout time.Duration) *FindConfigAutoBootstrapParams { + + return &FindConfigAutoBootstrapParams{ + + timeout: timeout, + } +} + +// NewFindConfigAutoBootstrapParamsWithContext creates a new FindConfigAutoBootstrapParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAutoBootstrapParamsWithContext(ctx context.Context) *FindConfigAutoBootstrapParams { + + return &FindConfigAutoBootstrapParams{ + + Context: ctx, + } +} + +// NewFindConfigAutoBootstrapParamsWithHTTPClient creates a new FindConfigAutoBootstrapParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAutoBootstrapParamsWithHTTPClient(client *http.Client) *FindConfigAutoBootstrapParams { + + return &FindConfigAutoBootstrapParams{ + HTTPClient: client, + } +} + +/* +FindConfigAutoBootstrapParams contains all the parameters to send to the API endpoint +for the find config auto bootstrap operation typically these are written to a http.Request +*/ +type FindConfigAutoBootstrapParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config auto bootstrap params +func (o *FindConfigAutoBootstrapParams) WithTimeout(timeout time.Duration) *FindConfigAutoBootstrapParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config auto bootstrap params +func (o *FindConfigAutoBootstrapParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config auto bootstrap params +func (o *FindConfigAutoBootstrapParams) WithContext(ctx context.Context) *FindConfigAutoBootstrapParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config auto bootstrap params +func (o *FindConfigAutoBootstrapParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config auto bootstrap params +func (o *FindConfigAutoBootstrapParams) WithHTTPClient(client *http.Client) *FindConfigAutoBootstrapParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config auto bootstrap params +func (o *FindConfigAutoBootstrapParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAutoBootstrapParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_bootstrap_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_bootstrap_responses.go new file mode 100644 index 00000000000..2132becae3a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_bootstrap_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAutoBootstrapReader is a Reader for the FindConfigAutoBootstrap structure. +type FindConfigAutoBootstrapReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAutoBootstrapReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAutoBootstrapOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAutoBootstrapDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAutoBootstrapOK creates a FindConfigAutoBootstrapOK with default headers values +func NewFindConfigAutoBootstrapOK() *FindConfigAutoBootstrapOK { + return &FindConfigAutoBootstrapOK{} +} + +/* +FindConfigAutoBootstrapOK handles this case with default header values. + +Config value +*/ +type FindConfigAutoBootstrapOK struct { + Payload bool +} + +func (o *FindConfigAutoBootstrapOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigAutoBootstrapOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAutoBootstrapDefault creates a FindConfigAutoBootstrapDefault with default headers values +func NewFindConfigAutoBootstrapDefault(code int) *FindConfigAutoBootstrapDefault { + return &FindConfigAutoBootstrapDefault{ + _statusCode: code, + } +} + +/* +FindConfigAutoBootstrapDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAutoBootstrapDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config auto bootstrap default response +func (o *FindConfigAutoBootstrapDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAutoBootstrapDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAutoBootstrapDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAutoBootstrapDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_snapshot_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_snapshot_parameters.go new file mode 100644 index 00000000000..6ce6dd83ae8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_snapshot_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigAutoSnapshotParams creates a new FindConfigAutoSnapshotParams object +// with the default values initialized. +func NewFindConfigAutoSnapshotParams() *FindConfigAutoSnapshotParams { + + return &FindConfigAutoSnapshotParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigAutoSnapshotParamsWithTimeout creates a new FindConfigAutoSnapshotParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigAutoSnapshotParamsWithTimeout(timeout time.Duration) *FindConfigAutoSnapshotParams { + + return &FindConfigAutoSnapshotParams{ + + timeout: timeout, + } +} + +// NewFindConfigAutoSnapshotParamsWithContext creates a new FindConfigAutoSnapshotParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigAutoSnapshotParamsWithContext(ctx context.Context) *FindConfigAutoSnapshotParams { + + return &FindConfigAutoSnapshotParams{ + + Context: ctx, + } +} + +// NewFindConfigAutoSnapshotParamsWithHTTPClient creates a new FindConfigAutoSnapshotParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigAutoSnapshotParamsWithHTTPClient(client *http.Client) *FindConfigAutoSnapshotParams { + + return &FindConfigAutoSnapshotParams{ + HTTPClient: client, + } +} + +/* +FindConfigAutoSnapshotParams contains all the parameters to send to the API endpoint +for the find config auto snapshot operation typically these are written to a http.Request +*/ +type FindConfigAutoSnapshotParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config auto snapshot params +func (o *FindConfigAutoSnapshotParams) WithTimeout(timeout time.Duration) *FindConfigAutoSnapshotParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config auto snapshot params +func (o *FindConfigAutoSnapshotParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config auto snapshot params +func (o *FindConfigAutoSnapshotParams) WithContext(ctx context.Context) *FindConfigAutoSnapshotParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config auto snapshot params +func (o *FindConfigAutoSnapshotParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config auto snapshot params +func (o *FindConfigAutoSnapshotParams) WithHTTPClient(client *http.Client) *FindConfigAutoSnapshotParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config auto snapshot params +func (o *FindConfigAutoSnapshotParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigAutoSnapshotParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_snapshot_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_snapshot_responses.go new file mode 100644 index 00000000000..2ee4f0d0b60 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_auto_snapshot_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigAutoSnapshotReader is a Reader for the FindConfigAutoSnapshot structure. +type FindConfigAutoSnapshotReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigAutoSnapshotReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigAutoSnapshotOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigAutoSnapshotDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigAutoSnapshotOK creates a FindConfigAutoSnapshotOK with default headers values +func NewFindConfigAutoSnapshotOK() *FindConfigAutoSnapshotOK { + return &FindConfigAutoSnapshotOK{} +} + +/* +FindConfigAutoSnapshotOK handles this case with default header values. + +Config value +*/ +type FindConfigAutoSnapshotOK struct { + Payload bool +} + +func (o *FindConfigAutoSnapshotOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigAutoSnapshotOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigAutoSnapshotDefault creates a FindConfigAutoSnapshotDefault with default headers values +func NewFindConfigAutoSnapshotDefault(code int) *FindConfigAutoSnapshotDefault { + return &FindConfigAutoSnapshotDefault{ + _statusCode: code, + } +} + +/* +FindConfigAutoSnapshotDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigAutoSnapshotDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config auto snapshot default response +func (o *FindConfigAutoSnapshotDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigAutoSnapshotDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigAutoSnapshotDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigAutoSnapshotDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_background_writer_scheduling_quota_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_background_writer_scheduling_quota_parameters.go new file mode 100644 index 00000000000..03c7d8747d0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_background_writer_scheduling_quota_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigBackgroundWriterSchedulingQuotaParams creates a new FindConfigBackgroundWriterSchedulingQuotaParams object +// with the default values initialized. +func NewFindConfigBackgroundWriterSchedulingQuotaParams() *FindConfigBackgroundWriterSchedulingQuotaParams { + + return &FindConfigBackgroundWriterSchedulingQuotaParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigBackgroundWriterSchedulingQuotaParamsWithTimeout creates a new FindConfigBackgroundWriterSchedulingQuotaParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigBackgroundWriterSchedulingQuotaParamsWithTimeout(timeout time.Duration) *FindConfigBackgroundWriterSchedulingQuotaParams { + + return &FindConfigBackgroundWriterSchedulingQuotaParams{ + + timeout: timeout, + } +} + +// NewFindConfigBackgroundWriterSchedulingQuotaParamsWithContext creates a new FindConfigBackgroundWriterSchedulingQuotaParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigBackgroundWriterSchedulingQuotaParamsWithContext(ctx context.Context) *FindConfigBackgroundWriterSchedulingQuotaParams { + + return &FindConfigBackgroundWriterSchedulingQuotaParams{ + + Context: ctx, + } +} + +// NewFindConfigBackgroundWriterSchedulingQuotaParamsWithHTTPClient creates a new FindConfigBackgroundWriterSchedulingQuotaParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigBackgroundWriterSchedulingQuotaParamsWithHTTPClient(client *http.Client) *FindConfigBackgroundWriterSchedulingQuotaParams { + + return &FindConfigBackgroundWriterSchedulingQuotaParams{ + HTTPClient: client, + } +} + +/* +FindConfigBackgroundWriterSchedulingQuotaParams contains all the parameters to send to the API endpoint +for the find config background writer scheduling quota operation typically these are written to a http.Request +*/ +type FindConfigBackgroundWriterSchedulingQuotaParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config background writer scheduling quota params +func (o *FindConfigBackgroundWriterSchedulingQuotaParams) WithTimeout(timeout time.Duration) *FindConfigBackgroundWriterSchedulingQuotaParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config background writer scheduling quota params +func (o *FindConfigBackgroundWriterSchedulingQuotaParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config background writer scheduling quota params +func (o *FindConfigBackgroundWriterSchedulingQuotaParams) WithContext(ctx context.Context) *FindConfigBackgroundWriterSchedulingQuotaParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config background writer scheduling quota params +func (o *FindConfigBackgroundWriterSchedulingQuotaParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config background writer scheduling quota params +func (o *FindConfigBackgroundWriterSchedulingQuotaParams) WithHTTPClient(client *http.Client) *FindConfigBackgroundWriterSchedulingQuotaParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config background writer scheduling quota params +func (o *FindConfigBackgroundWriterSchedulingQuotaParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigBackgroundWriterSchedulingQuotaParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_background_writer_scheduling_quota_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_background_writer_scheduling_quota_responses.go new file mode 100644 index 00000000000..86f06165658 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_background_writer_scheduling_quota_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigBackgroundWriterSchedulingQuotaReader is a Reader for the FindConfigBackgroundWriterSchedulingQuota structure. +type FindConfigBackgroundWriterSchedulingQuotaReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigBackgroundWriterSchedulingQuotaReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigBackgroundWriterSchedulingQuotaOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigBackgroundWriterSchedulingQuotaDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigBackgroundWriterSchedulingQuotaOK creates a FindConfigBackgroundWriterSchedulingQuotaOK with default headers values +func NewFindConfigBackgroundWriterSchedulingQuotaOK() *FindConfigBackgroundWriterSchedulingQuotaOK { + return &FindConfigBackgroundWriterSchedulingQuotaOK{} +} + +/* +FindConfigBackgroundWriterSchedulingQuotaOK handles this case with default header values. + +Config value +*/ +type FindConfigBackgroundWriterSchedulingQuotaOK struct { + Payload float64 +} + +func (o *FindConfigBackgroundWriterSchedulingQuotaOK) GetPayload() float64 { + return o.Payload +} + +func (o *FindConfigBackgroundWriterSchedulingQuotaOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigBackgroundWriterSchedulingQuotaDefault creates a FindConfigBackgroundWriterSchedulingQuotaDefault with default headers values +func NewFindConfigBackgroundWriterSchedulingQuotaDefault(code int) *FindConfigBackgroundWriterSchedulingQuotaDefault { + return &FindConfigBackgroundWriterSchedulingQuotaDefault{ + _statusCode: code, + } +} + +/* +FindConfigBackgroundWriterSchedulingQuotaDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigBackgroundWriterSchedulingQuotaDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config background writer scheduling quota default response +func (o *FindConfigBackgroundWriterSchedulingQuotaDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigBackgroundWriterSchedulingQuotaDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigBackgroundWriterSchedulingQuotaDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigBackgroundWriterSchedulingQuotaDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_fail_threshold_in_kb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_fail_threshold_in_kb_parameters.go new file mode 100644 index 00000000000..d0ebb4df6df --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_fail_threshold_in_kb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigBatchSizeFailThresholdInKbParams creates a new FindConfigBatchSizeFailThresholdInKbParams object +// with the default values initialized. +func NewFindConfigBatchSizeFailThresholdInKbParams() *FindConfigBatchSizeFailThresholdInKbParams { + + return &FindConfigBatchSizeFailThresholdInKbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigBatchSizeFailThresholdInKbParamsWithTimeout creates a new FindConfigBatchSizeFailThresholdInKbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigBatchSizeFailThresholdInKbParamsWithTimeout(timeout time.Duration) *FindConfigBatchSizeFailThresholdInKbParams { + + return &FindConfigBatchSizeFailThresholdInKbParams{ + + timeout: timeout, + } +} + +// NewFindConfigBatchSizeFailThresholdInKbParamsWithContext creates a new FindConfigBatchSizeFailThresholdInKbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigBatchSizeFailThresholdInKbParamsWithContext(ctx context.Context) *FindConfigBatchSizeFailThresholdInKbParams { + + return &FindConfigBatchSizeFailThresholdInKbParams{ + + Context: ctx, + } +} + +// NewFindConfigBatchSizeFailThresholdInKbParamsWithHTTPClient creates a new FindConfigBatchSizeFailThresholdInKbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigBatchSizeFailThresholdInKbParamsWithHTTPClient(client *http.Client) *FindConfigBatchSizeFailThresholdInKbParams { + + return &FindConfigBatchSizeFailThresholdInKbParams{ + HTTPClient: client, + } +} + +/* +FindConfigBatchSizeFailThresholdInKbParams contains all the parameters to send to the API endpoint +for the find config batch size fail threshold in kb operation typically these are written to a http.Request +*/ +type FindConfigBatchSizeFailThresholdInKbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config batch size fail threshold in kb params +func (o *FindConfigBatchSizeFailThresholdInKbParams) WithTimeout(timeout time.Duration) *FindConfigBatchSizeFailThresholdInKbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config batch size fail threshold in kb params +func (o *FindConfigBatchSizeFailThresholdInKbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config batch size fail threshold in kb params +func (o *FindConfigBatchSizeFailThresholdInKbParams) WithContext(ctx context.Context) *FindConfigBatchSizeFailThresholdInKbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config batch size fail threshold in kb params +func (o *FindConfigBatchSizeFailThresholdInKbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config batch size fail threshold in kb params +func (o *FindConfigBatchSizeFailThresholdInKbParams) WithHTTPClient(client *http.Client) *FindConfigBatchSizeFailThresholdInKbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config batch size fail threshold in kb params +func (o *FindConfigBatchSizeFailThresholdInKbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigBatchSizeFailThresholdInKbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_fail_threshold_in_kb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_fail_threshold_in_kb_responses.go new file mode 100644 index 00000000000..797b3dcffd2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_fail_threshold_in_kb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigBatchSizeFailThresholdInKbReader is a Reader for the FindConfigBatchSizeFailThresholdInKb structure. +type FindConfigBatchSizeFailThresholdInKbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigBatchSizeFailThresholdInKbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigBatchSizeFailThresholdInKbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigBatchSizeFailThresholdInKbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigBatchSizeFailThresholdInKbOK creates a FindConfigBatchSizeFailThresholdInKbOK with default headers values +func NewFindConfigBatchSizeFailThresholdInKbOK() *FindConfigBatchSizeFailThresholdInKbOK { + return &FindConfigBatchSizeFailThresholdInKbOK{} +} + +/* +FindConfigBatchSizeFailThresholdInKbOK handles this case with default header values. + +Config value +*/ +type FindConfigBatchSizeFailThresholdInKbOK struct { + Payload int64 +} + +func (o *FindConfigBatchSizeFailThresholdInKbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigBatchSizeFailThresholdInKbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigBatchSizeFailThresholdInKbDefault creates a FindConfigBatchSizeFailThresholdInKbDefault with default headers values +func NewFindConfigBatchSizeFailThresholdInKbDefault(code int) *FindConfigBatchSizeFailThresholdInKbDefault { + return &FindConfigBatchSizeFailThresholdInKbDefault{ + _statusCode: code, + } +} + +/* +FindConfigBatchSizeFailThresholdInKbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigBatchSizeFailThresholdInKbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config batch size fail threshold in kb default response +func (o *FindConfigBatchSizeFailThresholdInKbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigBatchSizeFailThresholdInKbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigBatchSizeFailThresholdInKbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigBatchSizeFailThresholdInKbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_warn_threshold_in_kb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_warn_threshold_in_kb_parameters.go new file mode 100644 index 00000000000..51a108bae64 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_warn_threshold_in_kb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigBatchSizeWarnThresholdInKbParams creates a new FindConfigBatchSizeWarnThresholdInKbParams object +// with the default values initialized. +func NewFindConfigBatchSizeWarnThresholdInKbParams() *FindConfigBatchSizeWarnThresholdInKbParams { + + return &FindConfigBatchSizeWarnThresholdInKbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigBatchSizeWarnThresholdInKbParamsWithTimeout creates a new FindConfigBatchSizeWarnThresholdInKbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigBatchSizeWarnThresholdInKbParamsWithTimeout(timeout time.Duration) *FindConfigBatchSizeWarnThresholdInKbParams { + + return &FindConfigBatchSizeWarnThresholdInKbParams{ + + timeout: timeout, + } +} + +// NewFindConfigBatchSizeWarnThresholdInKbParamsWithContext creates a new FindConfigBatchSizeWarnThresholdInKbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigBatchSizeWarnThresholdInKbParamsWithContext(ctx context.Context) *FindConfigBatchSizeWarnThresholdInKbParams { + + return &FindConfigBatchSizeWarnThresholdInKbParams{ + + Context: ctx, + } +} + +// NewFindConfigBatchSizeWarnThresholdInKbParamsWithHTTPClient creates a new FindConfigBatchSizeWarnThresholdInKbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigBatchSizeWarnThresholdInKbParamsWithHTTPClient(client *http.Client) *FindConfigBatchSizeWarnThresholdInKbParams { + + return &FindConfigBatchSizeWarnThresholdInKbParams{ + HTTPClient: client, + } +} + +/* +FindConfigBatchSizeWarnThresholdInKbParams contains all the parameters to send to the API endpoint +for the find config batch size warn threshold in kb operation typically these are written to a http.Request +*/ +type FindConfigBatchSizeWarnThresholdInKbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config batch size warn threshold in kb params +func (o *FindConfigBatchSizeWarnThresholdInKbParams) WithTimeout(timeout time.Duration) *FindConfigBatchSizeWarnThresholdInKbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config batch size warn threshold in kb params +func (o *FindConfigBatchSizeWarnThresholdInKbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config batch size warn threshold in kb params +func (o *FindConfigBatchSizeWarnThresholdInKbParams) WithContext(ctx context.Context) *FindConfigBatchSizeWarnThresholdInKbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config batch size warn threshold in kb params +func (o *FindConfigBatchSizeWarnThresholdInKbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config batch size warn threshold in kb params +func (o *FindConfigBatchSizeWarnThresholdInKbParams) WithHTTPClient(client *http.Client) *FindConfigBatchSizeWarnThresholdInKbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config batch size warn threshold in kb params +func (o *FindConfigBatchSizeWarnThresholdInKbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigBatchSizeWarnThresholdInKbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_warn_threshold_in_kb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_warn_threshold_in_kb_responses.go new file mode 100644 index 00000000000..5eabac32fa3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batch_size_warn_threshold_in_kb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigBatchSizeWarnThresholdInKbReader is a Reader for the FindConfigBatchSizeWarnThresholdInKb structure. +type FindConfigBatchSizeWarnThresholdInKbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigBatchSizeWarnThresholdInKbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigBatchSizeWarnThresholdInKbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigBatchSizeWarnThresholdInKbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigBatchSizeWarnThresholdInKbOK creates a FindConfigBatchSizeWarnThresholdInKbOK with default headers values +func NewFindConfigBatchSizeWarnThresholdInKbOK() *FindConfigBatchSizeWarnThresholdInKbOK { + return &FindConfigBatchSizeWarnThresholdInKbOK{} +} + +/* +FindConfigBatchSizeWarnThresholdInKbOK handles this case with default header values. + +Config value +*/ +type FindConfigBatchSizeWarnThresholdInKbOK struct { + Payload int64 +} + +func (o *FindConfigBatchSizeWarnThresholdInKbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigBatchSizeWarnThresholdInKbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigBatchSizeWarnThresholdInKbDefault creates a FindConfigBatchSizeWarnThresholdInKbDefault with default headers values +func NewFindConfigBatchSizeWarnThresholdInKbDefault(code int) *FindConfigBatchSizeWarnThresholdInKbDefault { + return &FindConfigBatchSizeWarnThresholdInKbDefault{ + _statusCode: code, + } +} + +/* +FindConfigBatchSizeWarnThresholdInKbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigBatchSizeWarnThresholdInKbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config batch size warn threshold in kb default response +func (o *FindConfigBatchSizeWarnThresholdInKbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigBatchSizeWarnThresholdInKbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigBatchSizeWarnThresholdInKbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigBatchSizeWarnThresholdInKbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batchlog_replay_throttle_in_kb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batchlog_replay_throttle_in_kb_parameters.go new file mode 100644 index 00000000000..881be05bf1c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batchlog_replay_throttle_in_kb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigBatchlogReplayThrottleInKbParams creates a new FindConfigBatchlogReplayThrottleInKbParams object +// with the default values initialized. +func NewFindConfigBatchlogReplayThrottleInKbParams() *FindConfigBatchlogReplayThrottleInKbParams { + + return &FindConfigBatchlogReplayThrottleInKbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigBatchlogReplayThrottleInKbParamsWithTimeout creates a new FindConfigBatchlogReplayThrottleInKbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigBatchlogReplayThrottleInKbParamsWithTimeout(timeout time.Duration) *FindConfigBatchlogReplayThrottleInKbParams { + + return &FindConfigBatchlogReplayThrottleInKbParams{ + + timeout: timeout, + } +} + +// NewFindConfigBatchlogReplayThrottleInKbParamsWithContext creates a new FindConfigBatchlogReplayThrottleInKbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigBatchlogReplayThrottleInKbParamsWithContext(ctx context.Context) *FindConfigBatchlogReplayThrottleInKbParams { + + return &FindConfigBatchlogReplayThrottleInKbParams{ + + Context: ctx, + } +} + +// NewFindConfigBatchlogReplayThrottleInKbParamsWithHTTPClient creates a new FindConfigBatchlogReplayThrottleInKbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigBatchlogReplayThrottleInKbParamsWithHTTPClient(client *http.Client) *FindConfigBatchlogReplayThrottleInKbParams { + + return &FindConfigBatchlogReplayThrottleInKbParams{ + HTTPClient: client, + } +} + +/* +FindConfigBatchlogReplayThrottleInKbParams contains all the parameters to send to the API endpoint +for the find config batchlog replay throttle in kb operation typically these are written to a http.Request +*/ +type FindConfigBatchlogReplayThrottleInKbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config batchlog replay throttle in kb params +func (o *FindConfigBatchlogReplayThrottleInKbParams) WithTimeout(timeout time.Duration) *FindConfigBatchlogReplayThrottleInKbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config batchlog replay throttle in kb params +func (o *FindConfigBatchlogReplayThrottleInKbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config batchlog replay throttle in kb params +func (o *FindConfigBatchlogReplayThrottleInKbParams) WithContext(ctx context.Context) *FindConfigBatchlogReplayThrottleInKbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config batchlog replay throttle in kb params +func (o *FindConfigBatchlogReplayThrottleInKbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config batchlog replay throttle in kb params +func (o *FindConfigBatchlogReplayThrottleInKbParams) WithHTTPClient(client *http.Client) *FindConfigBatchlogReplayThrottleInKbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config batchlog replay throttle in kb params +func (o *FindConfigBatchlogReplayThrottleInKbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigBatchlogReplayThrottleInKbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batchlog_replay_throttle_in_kb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batchlog_replay_throttle_in_kb_responses.go new file mode 100644 index 00000000000..6c39f3944a5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_batchlog_replay_throttle_in_kb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigBatchlogReplayThrottleInKbReader is a Reader for the FindConfigBatchlogReplayThrottleInKb structure. +type FindConfigBatchlogReplayThrottleInKbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigBatchlogReplayThrottleInKbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigBatchlogReplayThrottleInKbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigBatchlogReplayThrottleInKbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigBatchlogReplayThrottleInKbOK creates a FindConfigBatchlogReplayThrottleInKbOK with default headers values +func NewFindConfigBatchlogReplayThrottleInKbOK() *FindConfigBatchlogReplayThrottleInKbOK { + return &FindConfigBatchlogReplayThrottleInKbOK{} +} + +/* +FindConfigBatchlogReplayThrottleInKbOK handles this case with default header values. + +Config value +*/ +type FindConfigBatchlogReplayThrottleInKbOK struct { + Payload int64 +} + +func (o *FindConfigBatchlogReplayThrottleInKbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigBatchlogReplayThrottleInKbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigBatchlogReplayThrottleInKbDefault creates a FindConfigBatchlogReplayThrottleInKbDefault with default headers values +func NewFindConfigBatchlogReplayThrottleInKbDefault(code int) *FindConfigBatchlogReplayThrottleInKbDefault { + return &FindConfigBatchlogReplayThrottleInKbDefault{ + _statusCode: code, + } +} + +/* +FindConfigBatchlogReplayThrottleInKbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigBatchlogReplayThrottleInKbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config batchlog replay throttle in kb default response +func (o *FindConfigBatchlogReplayThrottleInKbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigBatchlogReplayThrottleInKbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigBatchlogReplayThrottleInKbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigBatchlogReplayThrottleInKbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_address_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_address_parameters.go new file mode 100644 index 00000000000..d9b6584905a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_address_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigBroadcastAddressParams creates a new FindConfigBroadcastAddressParams object +// with the default values initialized. +func NewFindConfigBroadcastAddressParams() *FindConfigBroadcastAddressParams { + + return &FindConfigBroadcastAddressParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigBroadcastAddressParamsWithTimeout creates a new FindConfigBroadcastAddressParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigBroadcastAddressParamsWithTimeout(timeout time.Duration) *FindConfigBroadcastAddressParams { + + return &FindConfigBroadcastAddressParams{ + + timeout: timeout, + } +} + +// NewFindConfigBroadcastAddressParamsWithContext creates a new FindConfigBroadcastAddressParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigBroadcastAddressParamsWithContext(ctx context.Context) *FindConfigBroadcastAddressParams { + + return &FindConfigBroadcastAddressParams{ + + Context: ctx, + } +} + +// NewFindConfigBroadcastAddressParamsWithHTTPClient creates a new FindConfigBroadcastAddressParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigBroadcastAddressParamsWithHTTPClient(client *http.Client) *FindConfigBroadcastAddressParams { + + return &FindConfigBroadcastAddressParams{ + HTTPClient: client, + } +} + +/* +FindConfigBroadcastAddressParams contains all the parameters to send to the API endpoint +for the find config broadcast address operation typically these are written to a http.Request +*/ +type FindConfigBroadcastAddressParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config broadcast address params +func (o *FindConfigBroadcastAddressParams) WithTimeout(timeout time.Duration) *FindConfigBroadcastAddressParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config broadcast address params +func (o *FindConfigBroadcastAddressParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config broadcast address params +func (o *FindConfigBroadcastAddressParams) WithContext(ctx context.Context) *FindConfigBroadcastAddressParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config broadcast address params +func (o *FindConfigBroadcastAddressParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config broadcast address params +func (o *FindConfigBroadcastAddressParams) WithHTTPClient(client *http.Client) *FindConfigBroadcastAddressParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config broadcast address params +func (o *FindConfigBroadcastAddressParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigBroadcastAddressParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_address_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_address_responses.go new file mode 100644 index 00000000000..6796c1b8e16 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_address_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigBroadcastAddressReader is a Reader for the FindConfigBroadcastAddress structure. +type FindConfigBroadcastAddressReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigBroadcastAddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigBroadcastAddressOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigBroadcastAddressDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigBroadcastAddressOK creates a FindConfigBroadcastAddressOK with default headers values +func NewFindConfigBroadcastAddressOK() *FindConfigBroadcastAddressOK { + return &FindConfigBroadcastAddressOK{} +} + +/* +FindConfigBroadcastAddressOK handles this case with default header values. + +Config value +*/ +type FindConfigBroadcastAddressOK struct { + Payload string +} + +func (o *FindConfigBroadcastAddressOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigBroadcastAddressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigBroadcastAddressDefault creates a FindConfigBroadcastAddressDefault with default headers values +func NewFindConfigBroadcastAddressDefault(code int) *FindConfigBroadcastAddressDefault { + return &FindConfigBroadcastAddressDefault{ + _statusCode: code, + } +} + +/* +FindConfigBroadcastAddressDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigBroadcastAddressDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config broadcast address default response +func (o *FindConfigBroadcastAddressDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigBroadcastAddressDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigBroadcastAddressDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigBroadcastAddressDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_rpc_address_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_rpc_address_parameters.go new file mode 100644 index 00000000000..d677b903d01 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_rpc_address_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigBroadcastRPCAddressParams creates a new FindConfigBroadcastRPCAddressParams object +// with the default values initialized. +func NewFindConfigBroadcastRPCAddressParams() *FindConfigBroadcastRPCAddressParams { + + return &FindConfigBroadcastRPCAddressParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigBroadcastRPCAddressParamsWithTimeout creates a new FindConfigBroadcastRPCAddressParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigBroadcastRPCAddressParamsWithTimeout(timeout time.Duration) *FindConfigBroadcastRPCAddressParams { + + return &FindConfigBroadcastRPCAddressParams{ + + timeout: timeout, + } +} + +// NewFindConfigBroadcastRPCAddressParamsWithContext creates a new FindConfigBroadcastRPCAddressParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigBroadcastRPCAddressParamsWithContext(ctx context.Context) *FindConfigBroadcastRPCAddressParams { + + return &FindConfigBroadcastRPCAddressParams{ + + Context: ctx, + } +} + +// NewFindConfigBroadcastRPCAddressParamsWithHTTPClient creates a new FindConfigBroadcastRPCAddressParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigBroadcastRPCAddressParamsWithHTTPClient(client *http.Client) *FindConfigBroadcastRPCAddressParams { + + return &FindConfigBroadcastRPCAddressParams{ + HTTPClient: client, + } +} + +/* +FindConfigBroadcastRPCAddressParams contains all the parameters to send to the API endpoint +for the find config broadcast rpc address operation typically these are written to a http.Request +*/ +type FindConfigBroadcastRPCAddressParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config broadcast rpc address params +func (o *FindConfigBroadcastRPCAddressParams) WithTimeout(timeout time.Duration) *FindConfigBroadcastRPCAddressParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config broadcast rpc address params +func (o *FindConfigBroadcastRPCAddressParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config broadcast rpc address params +func (o *FindConfigBroadcastRPCAddressParams) WithContext(ctx context.Context) *FindConfigBroadcastRPCAddressParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config broadcast rpc address params +func (o *FindConfigBroadcastRPCAddressParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config broadcast rpc address params +func (o *FindConfigBroadcastRPCAddressParams) WithHTTPClient(client *http.Client) *FindConfigBroadcastRPCAddressParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config broadcast rpc address params +func (o *FindConfigBroadcastRPCAddressParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigBroadcastRPCAddressParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_rpc_address_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_rpc_address_responses.go new file mode 100644 index 00000000000..bf1fce08ba0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_broadcast_rpc_address_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigBroadcastRPCAddressReader is a Reader for the FindConfigBroadcastRPCAddress structure. +type FindConfigBroadcastRPCAddressReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigBroadcastRPCAddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigBroadcastRPCAddressOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigBroadcastRPCAddressDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigBroadcastRPCAddressOK creates a FindConfigBroadcastRPCAddressOK with default headers values +func NewFindConfigBroadcastRPCAddressOK() *FindConfigBroadcastRPCAddressOK { + return &FindConfigBroadcastRPCAddressOK{} +} + +/* +FindConfigBroadcastRPCAddressOK handles this case with default header values. + +Config value +*/ +type FindConfigBroadcastRPCAddressOK struct { + Payload string +} + +func (o *FindConfigBroadcastRPCAddressOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigBroadcastRPCAddressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigBroadcastRPCAddressDefault creates a FindConfigBroadcastRPCAddressDefault with default headers values +func NewFindConfigBroadcastRPCAddressDefault(code int) *FindConfigBroadcastRPCAddressDefault { + return &FindConfigBroadcastRPCAddressDefault{ + _statusCode: code, + } +} + +/* +FindConfigBroadcastRPCAddressDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigBroadcastRPCAddressDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config broadcast rpc address default response +func (o *FindConfigBroadcastRPCAddressDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigBroadcastRPCAddressDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigBroadcastRPCAddressDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigBroadcastRPCAddressDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cache_hit_rate_read_balancing_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cache_hit_rate_read_balancing_parameters.go new file mode 100644 index 00000000000..4eb96427af4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cache_hit_rate_read_balancing_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCacheHitRateReadBalancingParams creates a new FindConfigCacheHitRateReadBalancingParams object +// with the default values initialized. +func NewFindConfigCacheHitRateReadBalancingParams() *FindConfigCacheHitRateReadBalancingParams { + + return &FindConfigCacheHitRateReadBalancingParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCacheHitRateReadBalancingParamsWithTimeout creates a new FindConfigCacheHitRateReadBalancingParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCacheHitRateReadBalancingParamsWithTimeout(timeout time.Duration) *FindConfigCacheHitRateReadBalancingParams { + + return &FindConfigCacheHitRateReadBalancingParams{ + + timeout: timeout, + } +} + +// NewFindConfigCacheHitRateReadBalancingParamsWithContext creates a new FindConfigCacheHitRateReadBalancingParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCacheHitRateReadBalancingParamsWithContext(ctx context.Context) *FindConfigCacheHitRateReadBalancingParams { + + return &FindConfigCacheHitRateReadBalancingParams{ + + Context: ctx, + } +} + +// NewFindConfigCacheHitRateReadBalancingParamsWithHTTPClient creates a new FindConfigCacheHitRateReadBalancingParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCacheHitRateReadBalancingParamsWithHTTPClient(client *http.Client) *FindConfigCacheHitRateReadBalancingParams { + + return &FindConfigCacheHitRateReadBalancingParams{ + HTTPClient: client, + } +} + +/* +FindConfigCacheHitRateReadBalancingParams contains all the parameters to send to the API endpoint +for the find config cache hit rate read balancing operation typically these are written to a http.Request +*/ +type FindConfigCacheHitRateReadBalancingParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config cache hit rate read balancing params +func (o *FindConfigCacheHitRateReadBalancingParams) WithTimeout(timeout time.Duration) *FindConfigCacheHitRateReadBalancingParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config cache hit rate read balancing params +func (o *FindConfigCacheHitRateReadBalancingParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config cache hit rate read balancing params +func (o *FindConfigCacheHitRateReadBalancingParams) WithContext(ctx context.Context) *FindConfigCacheHitRateReadBalancingParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config cache hit rate read balancing params +func (o *FindConfigCacheHitRateReadBalancingParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config cache hit rate read balancing params +func (o *FindConfigCacheHitRateReadBalancingParams) WithHTTPClient(client *http.Client) *FindConfigCacheHitRateReadBalancingParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config cache hit rate read balancing params +func (o *FindConfigCacheHitRateReadBalancingParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCacheHitRateReadBalancingParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cache_hit_rate_read_balancing_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cache_hit_rate_read_balancing_responses.go new file mode 100644 index 00000000000..2110ef146c6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cache_hit_rate_read_balancing_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCacheHitRateReadBalancingReader is a Reader for the FindConfigCacheHitRateReadBalancing structure. +type FindConfigCacheHitRateReadBalancingReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCacheHitRateReadBalancingReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCacheHitRateReadBalancingOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCacheHitRateReadBalancingDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCacheHitRateReadBalancingOK creates a FindConfigCacheHitRateReadBalancingOK with default headers values +func NewFindConfigCacheHitRateReadBalancingOK() *FindConfigCacheHitRateReadBalancingOK { + return &FindConfigCacheHitRateReadBalancingOK{} +} + +/* +FindConfigCacheHitRateReadBalancingOK handles this case with default header values. + +Config value +*/ +type FindConfigCacheHitRateReadBalancingOK struct { + Payload bool +} + +func (o *FindConfigCacheHitRateReadBalancingOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigCacheHitRateReadBalancingOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCacheHitRateReadBalancingDefault creates a FindConfigCacheHitRateReadBalancingDefault with default headers values +func NewFindConfigCacheHitRateReadBalancingDefault(code int) *FindConfigCacheHitRateReadBalancingDefault { + return &FindConfigCacheHitRateReadBalancingDefault{ + _statusCode: code, + } +} + +/* +FindConfigCacheHitRateReadBalancingDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCacheHitRateReadBalancingDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config cache hit rate read balancing default response +func (o *FindConfigCacheHitRateReadBalancingDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCacheHitRateReadBalancingDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCacheHitRateReadBalancingDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCacheHitRateReadBalancingDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cas_contention_timeout_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cas_contention_timeout_in_ms_parameters.go new file mode 100644 index 00000000000..da89c7dae80 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cas_contention_timeout_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCasContentionTimeoutInMsParams creates a new FindConfigCasContentionTimeoutInMsParams object +// with the default values initialized. +func NewFindConfigCasContentionTimeoutInMsParams() *FindConfigCasContentionTimeoutInMsParams { + + return &FindConfigCasContentionTimeoutInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCasContentionTimeoutInMsParamsWithTimeout creates a new FindConfigCasContentionTimeoutInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCasContentionTimeoutInMsParamsWithTimeout(timeout time.Duration) *FindConfigCasContentionTimeoutInMsParams { + + return &FindConfigCasContentionTimeoutInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigCasContentionTimeoutInMsParamsWithContext creates a new FindConfigCasContentionTimeoutInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCasContentionTimeoutInMsParamsWithContext(ctx context.Context) *FindConfigCasContentionTimeoutInMsParams { + + return &FindConfigCasContentionTimeoutInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigCasContentionTimeoutInMsParamsWithHTTPClient creates a new FindConfigCasContentionTimeoutInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCasContentionTimeoutInMsParamsWithHTTPClient(client *http.Client) *FindConfigCasContentionTimeoutInMsParams { + + return &FindConfigCasContentionTimeoutInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigCasContentionTimeoutInMsParams contains all the parameters to send to the API endpoint +for the find config cas contention timeout in ms operation typically these are written to a http.Request +*/ +type FindConfigCasContentionTimeoutInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config cas contention timeout in ms params +func (o *FindConfigCasContentionTimeoutInMsParams) WithTimeout(timeout time.Duration) *FindConfigCasContentionTimeoutInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config cas contention timeout in ms params +func (o *FindConfigCasContentionTimeoutInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config cas contention timeout in ms params +func (o *FindConfigCasContentionTimeoutInMsParams) WithContext(ctx context.Context) *FindConfigCasContentionTimeoutInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config cas contention timeout in ms params +func (o *FindConfigCasContentionTimeoutInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config cas contention timeout in ms params +func (o *FindConfigCasContentionTimeoutInMsParams) WithHTTPClient(client *http.Client) *FindConfigCasContentionTimeoutInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config cas contention timeout in ms params +func (o *FindConfigCasContentionTimeoutInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCasContentionTimeoutInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cas_contention_timeout_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cas_contention_timeout_in_ms_responses.go new file mode 100644 index 00000000000..bda6e4b0b49 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cas_contention_timeout_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCasContentionTimeoutInMsReader is a Reader for the FindConfigCasContentionTimeoutInMs structure. +type FindConfigCasContentionTimeoutInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCasContentionTimeoutInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCasContentionTimeoutInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCasContentionTimeoutInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCasContentionTimeoutInMsOK creates a FindConfigCasContentionTimeoutInMsOK with default headers values +func NewFindConfigCasContentionTimeoutInMsOK() *FindConfigCasContentionTimeoutInMsOK { + return &FindConfigCasContentionTimeoutInMsOK{} +} + +/* +FindConfigCasContentionTimeoutInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigCasContentionTimeoutInMsOK struct { + Payload int64 +} + +func (o *FindConfigCasContentionTimeoutInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCasContentionTimeoutInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCasContentionTimeoutInMsDefault creates a FindConfigCasContentionTimeoutInMsDefault with default headers values +func NewFindConfigCasContentionTimeoutInMsDefault(code int) *FindConfigCasContentionTimeoutInMsDefault { + return &FindConfigCasContentionTimeoutInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigCasContentionTimeoutInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCasContentionTimeoutInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config cas contention timeout in ms default response +func (o *FindConfigCasContentionTimeoutInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCasContentionTimeoutInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCasContentionTimeoutInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCasContentionTimeoutInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_client_encryption_options_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_client_encryption_options_parameters.go new file mode 100644 index 00000000000..fdc092c6993 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_client_encryption_options_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigClientEncryptionOptionsParams creates a new FindConfigClientEncryptionOptionsParams object +// with the default values initialized. +func NewFindConfigClientEncryptionOptionsParams() *FindConfigClientEncryptionOptionsParams { + + return &FindConfigClientEncryptionOptionsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigClientEncryptionOptionsParamsWithTimeout creates a new FindConfigClientEncryptionOptionsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigClientEncryptionOptionsParamsWithTimeout(timeout time.Duration) *FindConfigClientEncryptionOptionsParams { + + return &FindConfigClientEncryptionOptionsParams{ + + timeout: timeout, + } +} + +// NewFindConfigClientEncryptionOptionsParamsWithContext creates a new FindConfigClientEncryptionOptionsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigClientEncryptionOptionsParamsWithContext(ctx context.Context) *FindConfigClientEncryptionOptionsParams { + + return &FindConfigClientEncryptionOptionsParams{ + + Context: ctx, + } +} + +// NewFindConfigClientEncryptionOptionsParamsWithHTTPClient creates a new FindConfigClientEncryptionOptionsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigClientEncryptionOptionsParamsWithHTTPClient(client *http.Client) *FindConfigClientEncryptionOptionsParams { + + return &FindConfigClientEncryptionOptionsParams{ + HTTPClient: client, + } +} + +/* +FindConfigClientEncryptionOptionsParams contains all the parameters to send to the API endpoint +for the find config client encryption options operation typically these are written to a http.Request +*/ +type FindConfigClientEncryptionOptionsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config client encryption options params +func (o *FindConfigClientEncryptionOptionsParams) WithTimeout(timeout time.Duration) *FindConfigClientEncryptionOptionsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config client encryption options params +func (o *FindConfigClientEncryptionOptionsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config client encryption options params +func (o *FindConfigClientEncryptionOptionsParams) WithContext(ctx context.Context) *FindConfigClientEncryptionOptionsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config client encryption options params +func (o *FindConfigClientEncryptionOptionsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config client encryption options params +func (o *FindConfigClientEncryptionOptionsParams) WithHTTPClient(client *http.Client) *FindConfigClientEncryptionOptionsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config client encryption options params +func (o *FindConfigClientEncryptionOptionsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigClientEncryptionOptionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_client_encryption_options_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_client_encryption_options_responses.go new file mode 100644 index 00000000000..3c30d10e5ad --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_client_encryption_options_responses.go @@ -0,0 +1,116 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigClientEncryptionOptionsReader is a Reader for the FindConfigClientEncryptionOptions structure. +type FindConfigClientEncryptionOptionsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigClientEncryptionOptionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigClientEncryptionOptionsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigClientEncryptionOptionsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigClientEncryptionOptionsOK creates a FindConfigClientEncryptionOptionsOK with default headers values +func NewFindConfigClientEncryptionOptionsOK() *FindConfigClientEncryptionOptionsOK { + return &FindConfigClientEncryptionOptionsOK{} +} + +/* +FindConfigClientEncryptionOptionsOK handles this case with default header values. + +Config value +*/ +type FindConfigClientEncryptionOptionsOK struct { + Payload *models.ClientEncryptionOptions +} + +func (o *FindConfigClientEncryptionOptionsOK) GetPayload() *models.ClientEncryptionOptions { + return o.Payload +} + +func (o *FindConfigClientEncryptionOptionsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ClientEncryptionOptions) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigClientEncryptionOptionsDefault creates a FindConfigClientEncryptionOptionsDefault with default headers values +func NewFindConfigClientEncryptionOptionsDefault(code int) *FindConfigClientEncryptionOptionsDefault { + return &FindConfigClientEncryptionOptionsDefault{ + _statusCode: code, + } +} + +/* +FindConfigClientEncryptionOptionsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigClientEncryptionOptionsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config client encryption options default response +func (o *FindConfigClientEncryptionOptionsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigClientEncryptionOptionsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigClientEncryptionOptionsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigClientEncryptionOptionsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cluster_name_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cluster_name_parameters.go new file mode 100644 index 00000000000..e1715f6e5f3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cluster_name_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigClusterNameParams creates a new FindConfigClusterNameParams object +// with the default values initialized. +func NewFindConfigClusterNameParams() *FindConfigClusterNameParams { + + return &FindConfigClusterNameParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigClusterNameParamsWithTimeout creates a new FindConfigClusterNameParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigClusterNameParamsWithTimeout(timeout time.Duration) *FindConfigClusterNameParams { + + return &FindConfigClusterNameParams{ + + timeout: timeout, + } +} + +// NewFindConfigClusterNameParamsWithContext creates a new FindConfigClusterNameParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigClusterNameParamsWithContext(ctx context.Context) *FindConfigClusterNameParams { + + return &FindConfigClusterNameParams{ + + Context: ctx, + } +} + +// NewFindConfigClusterNameParamsWithHTTPClient creates a new FindConfigClusterNameParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigClusterNameParamsWithHTTPClient(client *http.Client) *FindConfigClusterNameParams { + + return &FindConfigClusterNameParams{ + HTTPClient: client, + } +} + +/* +FindConfigClusterNameParams contains all the parameters to send to the API endpoint +for the find config cluster name operation typically these are written to a http.Request +*/ +type FindConfigClusterNameParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config cluster name params +func (o *FindConfigClusterNameParams) WithTimeout(timeout time.Duration) *FindConfigClusterNameParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config cluster name params +func (o *FindConfigClusterNameParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config cluster name params +func (o *FindConfigClusterNameParams) WithContext(ctx context.Context) *FindConfigClusterNameParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config cluster name params +func (o *FindConfigClusterNameParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config cluster name params +func (o *FindConfigClusterNameParams) WithHTTPClient(client *http.Client) *FindConfigClusterNameParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config cluster name params +func (o *FindConfigClusterNameParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigClusterNameParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cluster_name_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cluster_name_responses.go new file mode 100644 index 00000000000..13686eb5fdc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cluster_name_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigClusterNameReader is a Reader for the FindConfigClusterName structure. +type FindConfigClusterNameReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigClusterNameReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigClusterNameOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigClusterNameDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigClusterNameOK creates a FindConfigClusterNameOK with default headers values +func NewFindConfigClusterNameOK() *FindConfigClusterNameOK { + return &FindConfigClusterNameOK{} +} + +/* +FindConfigClusterNameOK handles this case with default header values. + +Config value +*/ +type FindConfigClusterNameOK struct { + Payload string +} + +func (o *FindConfigClusterNameOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigClusterNameOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigClusterNameDefault creates a FindConfigClusterNameDefault with default headers values +func NewFindConfigClusterNameDefault(code int) *FindConfigClusterNameDefault { + return &FindConfigClusterNameDefault{ + _statusCode: code, + } +} + +/* +FindConfigClusterNameDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigClusterNameDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config cluster name default response +func (o *FindConfigClusterNameDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigClusterNameDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigClusterNameDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigClusterNameDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_column_index_size_in_kb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_column_index_size_in_kb_parameters.go new file mode 100644 index 00000000000..6594af387d8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_column_index_size_in_kb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigColumnIndexSizeInKbParams creates a new FindConfigColumnIndexSizeInKbParams object +// with the default values initialized. +func NewFindConfigColumnIndexSizeInKbParams() *FindConfigColumnIndexSizeInKbParams { + + return &FindConfigColumnIndexSizeInKbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigColumnIndexSizeInKbParamsWithTimeout creates a new FindConfigColumnIndexSizeInKbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigColumnIndexSizeInKbParamsWithTimeout(timeout time.Duration) *FindConfigColumnIndexSizeInKbParams { + + return &FindConfigColumnIndexSizeInKbParams{ + + timeout: timeout, + } +} + +// NewFindConfigColumnIndexSizeInKbParamsWithContext creates a new FindConfigColumnIndexSizeInKbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigColumnIndexSizeInKbParamsWithContext(ctx context.Context) *FindConfigColumnIndexSizeInKbParams { + + return &FindConfigColumnIndexSizeInKbParams{ + + Context: ctx, + } +} + +// NewFindConfigColumnIndexSizeInKbParamsWithHTTPClient creates a new FindConfigColumnIndexSizeInKbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigColumnIndexSizeInKbParamsWithHTTPClient(client *http.Client) *FindConfigColumnIndexSizeInKbParams { + + return &FindConfigColumnIndexSizeInKbParams{ + HTTPClient: client, + } +} + +/* +FindConfigColumnIndexSizeInKbParams contains all the parameters to send to the API endpoint +for the find config column index size in kb operation typically these are written to a http.Request +*/ +type FindConfigColumnIndexSizeInKbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config column index size in kb params +func (o *FindConfigColumnIndexSizeInKbParams) WithTimeout(timeout time.Duration) *FindConfigColumnIndexSizeInKbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config column index size in kb params +func (o *FindConfigColumnIndexSizeInKbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config column index size in kb params +func (o *FindConfigColumnIndexSizeInKbParams) WithContext(ctx context.Context) *FindConfigColumnIndexSizeInKbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config column index size in kb params +func (o *FindConfigColumnIndexSizeInKbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config column index size in kb params +func (o *FindConfigColumnIndexSizeInKbParams) WithHTTPClient(client *http.Client) *FindConfigColumnIndexSizeInKbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config column index size in kb params +func (o *FindConfigColumnIndexSizeInKbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigColumnIndexSizeInKbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_column_index_size_in_kb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_column_index_size_in_kb_responses.go new file mode 100644 index 00000000000..d74569dc82a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_column_index_size_in_kb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigColumnIndexSizeInKbReader is a Reader for the FindConfigColumnIndexSizeInKb structure. +type FindConfigColumnIndexSizeInKbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigColumnIndexSizeInKbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigColumnIndexSizeInKbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigColumnIndexSizeInKbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigColumnIndexSizeInKbOK creates a FindConfigColumnIndexSizeInKbOK with default headers values +func NewFindConfigColumnIndexSizeInKbOK() *FindConfigColumnIndexSizeInKbOK { + return &FindConfigColumnIndexSizeInKbOK{} +} + +/* +FindConfigColumnIndexSizeInKbOK handles this case with default header values. + +Config value +*/ +type FindConfigColumnIndexSizeInKbOK struct { + Payload int64 +} + +func (o *FindConfigColumnIndexSizeInKbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigColumnIndexSizeInKbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigColumnIndexSizeInKbDefault creates a FindConfigColumnIndexSizeInKbDefault with default headers values +func NewFindConfigColumnIndexSizeInKbDefault(code int) *FindConfigColumnIndexSizeInKbDefault { + return &FindConfigColumnIndexSizeInKbDefault{ + _statusCode: code, + } +} + +/* +FindConfigColumnIndexSizeInKbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigColumnIndexSizeInKbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config column index size in kb default response +func (o *FindConfigColumnIndexSizeInKbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigColumnIndexSizeInKbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigColumnIndexSizeInKbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigColumnIndexSizeInKbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commit_failure_policy_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commit_failure_policy_parameters.go new file mode 100644 index 00000000000..4ab4bd75b0b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commit_failure_policy_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCommitFailurePolicyParams creates a new FindConfigCommitFailurePolicyParams object +// with the default values initialized. +func NewFindConfigCommitFailurePolicyParams() *FindConfigCommitFailurePolicyParams { + + return &FindConfigCommitFailurePolicyParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCommitFailurePolicyParamsWithTimeout creates a new FindConfigCommitFailurePolicyParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCommitFailurePolicyParamsWithTimeout(timeout time.Duration) *FindConfigCommitFailurePolicyParams { + + return &FindConfigCommitFailurePolicyParams{ + + timeout: timeout, + } +} + +// NewFindConfigCommitFailurePolicyParamsWithContext creates a new FindConfigCommitFailurePolicyParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCommitFailurePolicyParamsWithContext(ctx context.Context) *FindConfigCommitFailurePolicyParams { + + return &FindConfigCommitFailurePolicyParams{ + + Context: ctx, + } +} + +// NewFindConfigCommitFailurePolicyParamsWithHTTPClient creates a new FindConfigCommitFailurePolicyParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCommitFailurePolicyParamsWithHTTPClient(client *http.Client) *FindConfigCommitFailurePolicyParams { + + return &FindConfigCommitFailurePolicyParams{ + HTTPClient: client, + } +} + +/* +FindConfigCommitFailurePolicyParams contains all the parameters to send to the API endpoint +for the find config commit failure policy operation typically these are written to a http.Request +*/ +type FindConfigCommitFailurePolicyParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config commit failure policy params +func (o *FindConfigCommitFailurePolicyParams) WithTimeout(timeout time.Duration) *FindConfigCommitFailurePolicyParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config commit failure policy params +func (o *FindConfigCommitFailurePolicyParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config commit failure policy params +func (o *FindConfigCommitFailurePolicyParams) WithContext(ctx context.Context) *FindConfigCommitFailurePolicyParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config commit failure policy params +func (o *FindConfigCommitFailurePolicyParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config commit failure policy params +func (o *FindConfigCommitFailurePolicyParams) WithHTTPClient(client *http.Client) *FindConfigCommitFailurePolicyParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config commit failure policy params +func (o *FindConfigCommitFailurePolicyParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCommitFailurePolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commit_failure_policy_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commit_failure_policy_responses.go new file mode 100644 index 00000000000..bd575c174d2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commit_failure_policy_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCommitFailurePolicyReader is a Reader for the FindConfigCommitFailurePolicy structure. +type FindConfigCommitFailurePolicyReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCommitFailurePolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCommitFailurePolicyOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCommitFailurePolicyDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCommitFailurePolicyOK creates a FindConfigCommitFailurePolicyOK with default headers values +func NewFindConfigCommitFailurePolicyOK() *FindConfigCommitFailurePolicyOK { + return &FindConfigCommitFailurePolicyOK{} +} + +/* +FindConfigCommitFailurePolicyOK handles this case with default header values. + +Config value +*/ +type FindConfigCommitFailurePolicyOK struct { + Payload string +} + +func (o *FindConfigCommitFailurePolicyOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigCommitFailurePolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCommitFailurePolicyDefault creates a FindConfigCommitFailurePolicyDefault with default headers values +func NewFindConfigCommitFailurePolicyDefault(code int) *FindConfigCommitFailurePolicyDefault { + return &FindConfigCommitFailurePolicyDefault{ + _statusCode: code, + } +} + +/* +FindConfigCommitFailurePolicyDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCommitFailurePolicyDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config commit failure policy default response +func (o *FindConfigCommitFailurePolicyDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCommitFailurePolicyDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCommitFailurePolicyDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCommitFailurePolicyDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_directory_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_directory_parameters.go new file mode 100644 index 00000000000..7dcbd7b1f94 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_directory_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCommitlogDirectoryParams creates a new FindConfigCommitlogDirectoryParams object +// with the default values initialized. +func NewFindConfigCommitlogDirectoryParams() *FindConfigCommitlogDirectoryParams { + + return &FindConfigCommitlogDirectoryParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCommitlogDirectoryParamsWithTimeout creates a new FindConfigCommitlogDirectoryParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCommitlogDirectoryParamsWithTimeout(timeout time.Duration) *FindConfigCommitlogDirectoryParams { + + return &FindConfigCommitlogDirectoryParams{ + + timeout: timeout, + } +} + +// NewFindConfigCommitlogDirectoryParamsWithContext creates a new FindConfigCommitlogDirectoryParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCommitlogDirectoryParamsWithContext(ctx context.Context) *FindConfigCommitlogDirectoryParams { + + return &FindConfigCommitlogDirectoryParams{ + + Context: ctx, + } +} + +// NewFindConfigCommitlogDirectoryParamsWithHTTPClient creates a new FindConfigCommitlogDirectoryParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCommitlogDirectoryParamsWithHTTPClient(client *http.Client) *FindConfigCommitlogDirectoryParams { + + return &FindConfigCommitlogDirectoryParams{ + HTTPClient: client, + } +} + +/* +FindConfigCommitlogDirectoryParams contains all the parameters to send to the API endpoint +for the find config commitlog directory operation typically these are written to a http.Request +*/ +type FindConfigCommitlogDirectoryParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config commitlog directory params +func (o *FindConfigCommitlogDirectoryParams) WithTimeout(timeout time.Duration) *FindConfigCommitlogDirectoryParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config commitlog directory params +func (o *FindConfigCommitlogDirectoryParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config commitlog directory params +func (o *FindConfigCommitlogDirectoryParams) WithContext(ctx context.Context) *FindConfigCommitlogDirectoryParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config commitlog directory params +func (o *FindConfigCommitlogDirectoryParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config commitlog directory params +func (o *FindConfigCommitlogDirectoryParams) WithHTTPClient(client *http.Client) *FindConfigCommitlogDirectoryParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config commitlog directory params +func (o *FindConfigCommitlogDirectoryParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCommitlogDirectoryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_directory_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_directory_responses.go new file mode 100644 index 00000000000..ddaecbae8e4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_directory_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCommitlogDirectoryReader is a Reader for the FindConfigCommitlogDirectory structure. +type FindConfigCommitlogDirectoryReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCommitlogDirectoryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCommitlogDirectoryOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCommitlogDirectoryDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCommitlogDirectoryOK creates a FindConfigCommitlogDirectoryOK with default headers values +func NewFindConfigCommitlogDirectoryOK() *FindConfigCommitlogDirectoryOK { + return &FindConfigCommitlogDirectoryOK{} +} + +/* +FindConfigCommitlogDirectoryOK handles this case with default header values. + +Config value +*/ +type FindConfigCommitlogDirectoryOK struct { + Payload string +} + +func (o *FindConfigCommitlogDirectoryOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigCommitlogDirectoryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCommitlogDirectoryDefault creates a FindConfigCommitlogDirectoryDefault with default headers values +func NewFindConfigCommitlogDirectoryDefault(code int) *FindConfigCommitlogDirectoryDefault { + return &FindConfigCommitlogDirectoryDefault{ + _statusCode: code, + } +} + +/* +FindConfigCommitlogDirectoryDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCommitlogDirectoryDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config commitlog directory default response +func (o *FindConfigCommitlogDirectoryDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCommitlogDirectoryDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCommitlogDirectoryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCommitlogDirectoryDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_reuse_segments_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_reuse_segments_parameters.go new file mode 100644 index 00000000000..9452947ea1d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_reuse_segments_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCommitlogReuseSegmentsParams creates a new FindConfigCommitlogReuseSegmentsParams object +// with the default values initialized. +func NewFindConfigCommitlogReuseSegmentsParams() *FindConfigCommitlogReuseSegmentsParams { + + return &FindConfigCommitlogReuseSegmentsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCommitlogReuseSegmentsParamsWithTimeout creates a new FindConfigCommitlogReuseSegmentsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCommitlogReuseSegmentsParamsWithTimeout(timeout time.Duration) *FindConfigCommitlogReuseSegmentsParams { + + return &FindConfigCommitlogReuseSegmentsParams{ + + timeout: timeout, + } +} + +// NewFindConfigCommitlogReuseSegmentsParamsWithContext creates a new FindConfigCommitlogReuseSegmentsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCommitlogReuseSegmentsParamsWithContext(ctx context.Context) *FindConfigCommitlogReuseSegmentsParams { + + return &FindConfigCommitlogReuseSegmentsParams{ + + Context: ctx, + } +} + +// NewFindConfigCommitlogReuseSegmentsParamsWithHTTPClient creates a new FindConfigCommitlogReuseSegmentsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCommitlogReuseSegmentsParamsWithHTTPClient(client *http.Client) *FindConfigCommitlogReuseSegmentsParams { + + return &FindConfigCommitlogReuseSegmentsParams{ + HTTPClient: client, + } +} + +/* +FindConfigCommitlogReuseSegmentsParams contains all the parameters to send to the API endpoint +for the find config commitlog reuse segments operation typically these are written to a http.Request +*/ +type FindConfigCommitlogReuseSegmentsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config commitlog reuse segments params +func (o *FindConfigCommitlogReuseSegmentsParams) WithTimeout(timeout time.Duration) *FindConfigCommitlogReuseSegmentsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config commitlog reuse segments params +func (o *FindConfigCommitlogReuseSegmentsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config commitlog reuse segments params +func (o *FindConfigCommitlogReuseSegmentsParams) WithContext(ctx context.Context) *FindConfigCommitlogReuseSegmentsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config commitlog reuse segments params +func (o *FindConfigCommitlogReuseSegmentsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config commitlog reuse segments params +func (o *FindConfigCommitlogReuseSegmentsParams) WithHTTPClient(client *http.Client) *FindConfigCommitlogReuseSegmentsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config commitlog reuse segments params +func (o *FindConfigCommitlogReuseSegmentsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCommitlogReuseSegmentsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_reuse_segments_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_reuse_segments_responses.go new file mode 100644 index 00000000000..e1b6320bedd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_reuse_segments_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCommitlogReuseSegmentsReader is a Reader for the FindConfigCommitlogReuseSegments structure. +type FindConfigCommitlogReuseSegmentsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCommitlogReuseSegmentsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCommitlogReuseSegmentsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCommitlogReuseSegmentsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCommitlogReuseSegmentsOK creates a FindConfigCommitlogReuseSegmentsOK with default headers values +func NewFindConfigCommitlogReuseSegmentsOK() *FindConfigCommitlogReuseSegmentsOK { + return &FindConfigCommitlogReuseSegmentsOK{} +} + +/* +FindConfigCommitlogReuseSegmentsOK handles this case with default header values. + +Config value +*/ +type FindConfigCommitlogReuseSegmentsOK struct { + Payload bool +} + +func (o *FindConfigCommitlogReuseSegmentsOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigCommitlogReuseSegmentsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCommitlogReuseSegmentsDefault creates a FindConfigCommitlogReuseSegmentsDefault with default headers values +func NewFindConfigCommitlogReuseSegmentsDefault(code int) *FindConfigCommitlogReuseSegmentsDefault { + return &FindConfigCommitlogReuseSegmentsDefault{ + _statusCode: code, + } +} + +/* +FindConfigCommitlogReuseSegmentsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCommitlogReuseSegmentsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config commitlog reuse segments default response +func (o *FindConfigCommitlogReuseSegmentsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCommitlogReuseSegmentsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCommitlogReuseSegmentsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCommitlogReuseSegmentsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_segment_size_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_segment_size_in_mb_parameters.go new file mode 100644 index 00000000000..4791c73caea --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_segment_size_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCommitlogSegmentSizeInMbParams creates a new FindConfigCommitlogSegmentSizeInMbParams object +// with the default values initialized. +func NewFindConfigCommitlogSegmentSizeInMbParams() *FindConfigCommitlogSegmentSizeInMbParams { + + return &FindConfigCommitlogSegmentSizeInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCommitlogSegmentSizeInMbParamsWithTimeout creates a new FindConfigCommitlogSegmentSizeInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCommitlogSegmentSizeInMbParamsWithTimeout(timeout time.Duration) *FindConfigCommitlogSegmentSizeInMbParams { + + return &FindConfigCommitlogSegmentSizeInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigCommitlogSegmentSizeInMbParamsWithContext creates a new FindConfigCommitlogSegmentSizeInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCommitlogSegmentSizeInMbParamsWithContext(ctx context.Context) *FindConfigCommitlogSegmentSizeInMbParams { + + return &FindConfigCommitlogSegmentSizeInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigCommitlogSegmentSizeInMbParamsWithHTTPClient creates a new FindConfigCommitlogSegmentSizeInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCommitlogSegmentSizeInMbParamsWithHTTPClient(client *http.Client) *FindConfigCommitlogSegmentSizeInMbParams { + + return &FindConfigCommitlogSegmentSizeInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigCommitlogSegmentSizeInMbParams contains all the parameters to send to the API endpoint +for the find config commitlog segment size in mb operation typically these are written to a http.Request +*/ +type FindConfigCommitlogSegmentSizeInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config commitlog segment size in mb params +func (o *FindConfigCommitlogSegmentSizeInMbParams) WithTimeout(timeout time.Duration) *FindConfigCommitlogSegmentSizeInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config commitlog segment size in mb params +func (o *FindConfigCommitlogSegmentSizeInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config commitlog segment size in mb params +func (o *FindConfigCommitlogSegmentSizeInMbParams) WithContext(ctx context.Context) *FindConfigCommitlogSegmentSizeInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config commitlog segment size in mb params +func (o *FindConfigCommitlogSegmentSizeInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config commitlog segment size in mb params +func (o *FindConfigCommitlogSegmentSizeInMbParams) WithHTTPClient(client *http.Client) *FindConfigCommitlogSegmentSizeInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config commitlog segment size in mb params +func (o *FindConfigCommitlogSegmentSizeInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCommitlogSegmentSizeInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_segment_size_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_segment_size_in_mb_responses.go new file mode 100644 index 00000000000..0677b63e60d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_segment_size_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCommitlogSegmentSizeInMbReader is a Reader for the FindConfigCommitlogSegmentSizeInMb structure. +type FindConfigCommitlogSegmentSizeInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCommitlogSegmentSizeInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCommitlogSegmentSizeInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCommitlogSegmentSizeInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCommitlogSegmentSizeInMbOK creates a FindConfigCommitlogSegmentSizeInMbOK with default headers values +func NewFindConfigCommitlogSegmentSizeInMbOK() *FindConfigCommitlogSegmentSizeInMbOK { + return &FindConfigCommitlogSegmentSizeInMbOK{} +} + +/* +FindConfigCommitlogSegmentSizeInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigCommitlogSegmentSizeInMbOK struct { + Payload int64 +} + +func (o *FindConfigCommitlogSegmentSizeInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCommitlogSegmentSizeInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCommitlogSegmentSizeInMbDefault creates a FindConfigCommitlogSegmentSizeInMbDefault with default headers values +func NewFindConfigCommitlogSegmentSizeInMbDefault(code int) *FindConfigCommitlogSegmentSizeInMbDefault { + return &FindConfigCommitlogSegmentSizeInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigCommitlogSegmentSizeInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCommitlogSegmentSizeInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config commitlog segment size in mb default response +func (o *FindConfigCommitlogSegmentSizeInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCommitlogSegmentSizeInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCommitlogSegmentSizeInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCommitlogSegmentSizeInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_batch_window_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_batch_window_in_ms_parameters.go new file mode 100644 index 00000000000..4bb3705b8ac --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_batch_window_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCommitlogSyncBatchWindowInMsParams creates a new FindConfigCommitlogSyncBatchWindowInMsParams object +// with the default values initialized. +func NewFindConfigCommitlogSyncBatchWindowInMsParams() *FindConfigCommitlogSyncBatchWindowInMsParams { + + return &FindConfigCommitlogSyncBatchWindowInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCommitlogSyncBatchWindowInMsParamsWithTimeout creates a new FindConfigCommitlogSyncBatchWindowInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCommitlogSyncBatchWindowInMsParamsWithTimeout(timeout time.Duration) *FindConfigCommitlogSyncBatchWindowInMsParams { + + return &FindConfigCommitlogSyncBatchWindowInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigCommitlogSyncBatchWindowInMsParamsWithContext creates a new FindConfigCommitlogSyncBatchWindowInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCommitlogSyncBatchWindowInMsParamsWithContext(ctx context.Context) *FindConfigCommitlogSyncBatchWindowInMsParams { + + return &FindConfigCommitlogSyncBatchWindowInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigCommitlogSyncBatchWindowInMsParamsWithHTTPClient creates a new FindConfigCommitlogSyncBatchWindowInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCommitlogSyncBatchWindowInMsParamsWithHTTPClient(client *http.Client) *FindConfigCommitlogSyncBatchWindowInMsParams { + + return &FindConfigCommitlogSyncBatchWindowInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigCommitlogSyncBatchWindowInMsParams contains all the parameters to send to the API endpoint +for the find config commitlog sync batch window in ms operation typically these are written to a http.Request +*/ +type FindConfigCommitlogSyncBatchWindowInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config commitlog sync batch window in ms params +func (o *FindConfigCommitlogSyncBatchWindowInMsParams) WithTimeout(timeout time.Duration) *FindConfigCommitlogSyncBatchWindowInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config commitlog sync batch window in ms params +func (o *FindConfigCommitlogSyncBatchWindowInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config commitlog sync batch window in ms params +func (o *FindConfigCommitlogSyncBatchWindowInMsParams) WithContext(ctx context.Context) *FindConfigCommitlogSyncBatchWindowInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config commitlog sync batch window in ms params +func (o *FindConfigCommitlogSyncBatchWindowInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config commitlog sync batch window in ms params +func (o *FindConfigCommitlogSyncBatchWindowInMsParams) WithHTTPClient(client *http.Client) *FindConfigCommitlogSyncBatchWindowInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config commitlog sync batch window in ms params +func (o *FindConfigCommitlogSyncBatchWindowInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCommitlogSyncBatchWindowInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_batch_window_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_batch_window_in_ms_responses.go new file mode 100644 index 00000000000..a3257d683f5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_batch_window_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCommitlogSyncBatchWindowInMsReader is a Reader for the FindConfigCommitlogSyncBatchWindowInMs structure. +type FindConfigCommitlogSyncBatchWindowInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCommitlogSyncBatchWindowInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCommitlogSyncBatchWindowInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCommitlogSyncBatchWindowInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCommitlogSyncBatchWindowInMsOK creates a FindConfigCommitlogSyncBatchWindowInMsOK with default headers values +func NewFindConfigCommitlogSyncBatchWindowInMsOK() *FindConfigCommitlogSyncBatchWindowInMsOK { + return &FindConfigCommitlogSyncBatchWindowInMsOK{} +} + +/* +FindConfigCommitlogSyncBatchWindowInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigCommitlogSyncBatchWindowInMsOK struct { + Payload int64 +} + +func (o *FindConfigCommitlogSyncBatchWindowInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCommitlogSyncBatchWindowInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCommitlogSyncBatchWindowInMsDefault creates a FindConfigCommitlogSyncBatchWindowInMsDefault with default headers values +func NewFindConfigCommitlogSyncBatchWindowInMsDefault(code int) *FindConfigCommitlogSyncBatchWindowInMsDefault { + return &FindConfigCommitlogSyncBatchWindowInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigCommitlogSyncBatchWindowInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCommitlogSyncBatchWindowInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config commitlog sync batch window in ms default response +func (o *FindConfigCommitlogSyncBatchWindowInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCommitlogSyncBatchWindowInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCommitlogSyncBatchWindowInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCommitlogSyncBatchWindowInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_parameters.go new file mode 100644 index 00000000000..8c5ba7d1eef --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCommitlogSyncParams creates a new FindConfigCommitlogSyncParams object +// with the default values initialized. +func NewFindConfigCommitlogSyncParams() *FindConfigCommitlogSyncParams { + + return &FindConfigCommitlogSyncParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCommitlogSyncParamsWithTimeout creates a new FindConfigCommitlogSyncParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCommitlogSyncParamsWithTimeout(timeout time.Duration) *FindConfigCommitlogSyncParams { + + return &FindConfigCommitlogSyncParams{ + + timeout: timeout, + } +} + +// NewFindConfigCommitlogSyncParamsWithContext creates a new FindConfigCommitlogSyncParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCommitlogSyncParamsWithContext(ctx context.Context) *FindConfigCommitlogSyncParams { + + return &FindConfigCommitlogSyncParams{ + + Context: ctx, + } +} + +// NewFindConfigCommitlogSyncParamsWithHTTPClient creates a new FindConfigCommitlogSyncParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCommitlogSyncParamsWithHTTPClient(client *http.Client) *FindConfigCommitlogSyncParams { + + return &FindConfigCommitlogSyncParams{ + HTTPClient: client, + } +} + +/* +FindConfigCommitlogSyncParams contains all the parameters to send to the API endpoint +for the find config commitlog sync operation typically these are written to a http.Request +*/ +type FindConfigCommitlogSyncParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config commitlog sync params +func (o *FindConfigCommitlogSyncParams) WithTimeout(timeout time.Duration) *FindConfigCommitlogSyncParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config commitlog sync params +func (o *FindConfigCommitlogSyncParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config commitlog sync params +func (o *FindConfigCommitlogSyncParams) WithContext(ctx context.Context) *FindConfigCommitlogSyncParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config commitlog sync params +func (o *FindConfigCommitlogSyncParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config commitlog sync params +func (o *FindConfigCommitlogSyncParams) WithHTTPClient(client *http.Client) *FindConfigCommitlogSyncParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config commitlog sync params +func (o *FindConfigCommitlogSyncParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCommitlogSyncParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_period_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_period_in_ms_parameters.go new file mode 100644 index 00000000000..f7dd5c2ddfb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_period_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCommitlogSyncPeriodInMsParams creates a new FindConfigCommitlogSyncPeriodInMsParams object +// with the default values initialized. +func NewFindConfigCommitlogSyncPeriodInMsParams() *FindConfigCommitlogSyncPeriodInMsParams { + + return &FindConfigCommitlogSyncPeriodInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCommitlogSyncPeriodInMsParamsWithTimeout creates a new FindConfigCommitlogSyncPeriodInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCommitlogSyncPeriodInMsParamsWithTimeout(timeout time.Duration) *FindConfigCommitlogSyncPeriodInMsParams { + + return &FindConfigCommitlogSyncPeriodInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigCommitlogSyncPeriodInMsParamsWithContext creates a new FindConfigCommitlogSyncPeriodInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCommitlogSyncPeriodInMsParamsWithContext(ctx context.Context) *FindConfigCommitlogSyncPeriodInMsParams { + + return &FindConfigCommitlogSyncPeriodInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigCommitlogSyncPeriodInMsParamsWithHTTPClient creates a new FindConfigCommitlogSyncPeriodInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCommitlogSyncPeriodInMsParamsWithHTTPClient(client *http.Client) *FindConfigCommitlogSyncPeriodInMsParams { + + return &FindConfigCommitlogSyncPeriodInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigCommitlogSyncPeriodInMsParams contains all the parameters to send to the API endpoint +for the find config commitlog sync period in ms operation typically these are written to a http.Request +*/ +type FindConfigCommitlogSyncPeriodInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config commitlog sync period in ms params +func (o *FindConfigCommitlogSyncPeriodInMsParams) WithTimeout(timeout time.Duration) *FindConfigCommitlogSyncPeriodInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config commitlog sync period in ms params +func (o *FindConfigCommitlogSyncPeriodInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config commitlog sync period in ms params +func (o *FindConfigCommitlogSyncPeriodInMsParams) WithContext(ctx context.Context) *FindConfigCommitlogSyncPeriodInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config commitlog sync period in ms params +func (o *FindConfigCommitlogSyncPeriodInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config commitlog sync period in ms params +func (o *FindConfigCommitlogSyncPeriodInMsParams) WithHTTPClient(client *http.Client) *FindConfigCommitlogSyncPeriodInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config commitlog sync period in ms params +func (o *FindConfigCommitlogSyncPeriodInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCommitlogSyncPeriodInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_period_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_period_in_ms_responses.go new file mode 100644 index 00000000000..7da56e40d68 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_period_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCommitlogSyncPeriodInMsReader is a Reader for the FindConfigCommitlogSyncPeriodInMs structure. +type FindConfigCommitlogSyncPeriodInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCommitlogSyncPeriodInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCommitlogSyncPeriodInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCommitlogSyncPeriodInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCommitlogSyncPeriodInMsOK creates a FindConfigCommitlogSyncPeriodInMsOK with default headers values +func NewFindConfigCommitlogSyncPeriodInMsOK() *FindConfigCommitlogSyncPeriodInMsOK { + return &FindConfigCommitlogSyncPeriodInMsOK{} +} + +/* +FindConfigCommitlogSyncPeriodInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigCommitlogSyncPeriodInMsOK struct { + Payload int64 +} + +func (o *FindConfigCommitlogSyncPeriodInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCommitlogSyncPeriodInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCommitlogSyncPeriodInMsDefault creates a FindConfigCommitlogSyncPeriodInMsDefault with default headers values +func NewFindConfigCommitlogSyncPeriodInMsDefault(code int) *FindConfigCommitlogSyncPeriodInMsDefault { + return &FindConfigCommitlogSyncPeriodInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigCommitlogSyncPeriodInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCommitlogSyncPeriodInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config commitlog sync period in ms default response +func (o *FindConfigCommitlogSyncPeriodInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCommitlogSyncPeriodInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCommitlogSyncPeriodInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCommitlogSyncPeriodInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_responses.go new file mode 100644 index 00000000000..479391bce32 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_sync_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCommitlogSyncReader is a Reader for the FindConfigCommitlogSync structure. +type FindConfigCommitlogSyncReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCommitlogSyncReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCommitlogSyncOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCommitlogSyncDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCommitlogSyncOK creates a FindConfigCommitlogSyncOK with default headers values +func NewFindConfigCommitlogSyncOK() *FindConfigCommitlogSyncOK { + return &FindConfigCommitlogSyncOK{} +} + +/* +FindConfigCommitlogSyncOK handles this case with default header values. + +Config value +*/ +type FindConfigCommitlogSyncOK struct { + Payload string +} + +func (o *FindConfigCommitlogSyncOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigCommitlogSyncOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCommitlogSyncDefault creates a FindConfigCommitlogSyncDefault with default headers values +func NewFindConfigCommitlogSyncDefault(code int) *FindConfigCommitlogSyncDefault { + return &FindConfigCommitlogSyncDefault{ + _statusCode: code, + } +} + +/* +FindConfigCommitlogSyncDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCommitlogSyncDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config commitlog sync default response +func (o *FindConfigCommitlogSyncDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCommitlogSyncDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCommitlogSyncDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCommitlogSyncDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_total_space_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_total_space_in_mb_parameters.go new file mode 100644 index 00000000000..ba72738fd9c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_total_space_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCommitlogTotalSpaceInMbParams creates a new FindConfigCommitlogTotalSpaceInMbParams object +// with the default values initialized. +func NewFindConfigCommitlogTotalSpaceInMbParams() *FindConfigCommitlogTotalSpaceInMbParams { + + return &FindConfigCommitlogTotalSpaceInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCommitlogTotalSpaceInMbParamsWithTimeout creates a new FindConfigCommitlogTotalSpaceInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCommitlogTotalSpaceInMbParamsWithTimeout(timeout time.Duration) *FindConfigCommitlogTotalSpaceInMbParams { + + return &FindConfigCommitlogTotalSpaceInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigCommitlogTotalSpaceInMbParamsWithContext creates a new FindConfigCommitlogTotalSpaceInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCommitlogTotalSpaceInMbParamsWithContext(ctx context.Context) *FindConfigCommitlogTotalSpaceInMbParams { + + return &FindConfigCommitlogTotalSpaceInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigCommitlogTotalSpaceInMbParamsWithHTTPClient creates a new FindConfigCommitlogTotalSpaceInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCommitlogTotalSpaceInMbParamsWithHTTPClient(client *http.Client) *FindConfigCommitlogTotalSpaceInMbParams { + + return &FindConfigCommitlogTotalSpaceInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigCommitlogTotalSpaceInMbParams contains all the parameters to send to the API endpoint +for the find config commitlog total space in mb operation typically these are written to a http.Request +*/ +type FindConfigCommitlogTotalSpaceInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config commitlog total space in mb params +func (o *FindConfigCommitlogTotalSpaceInMbParams) WithTimeout(timeout time.Duration) *FindConfigCommitlogTotalSpaceInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config commitlog total space in mb params +func (o *FindConfigCommitlogTotalSpaceInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config commitlog total space in mb params +func (o *FindConfigCommitlogTotalSpaceInMbParams) WithContext(ctx context.Context) *FindConfigCommitlogTotalSpaceInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config commitlog total space in mb params +func (o *FindConfigCommitlogTotalSpaceInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config commitlog total space in mb params +func (o *FindConfigCommitlogTotalSpaceInMbParams) WithHTTPClient(client *http.Client) *FindConfigCommitlogTotalSpaceInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config commitlog total space in mb params +func (o *FindConfigCommitlogTotalSpaceInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCommitlogTotalSpaceInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_total_space_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_total_space_in_mb_responses.go new file mode 100644 index 00000000000..e2bd77c0f67 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_total_space_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCommitlogTotalSpaceInMbReader is a Reader for the FindConfigCommitlogTotalSpaceInMb structure. +type FindConfigCommitlogTotalSpaceInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCommitlogTotalSpaceInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCommitlogTotalSpaceInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCommitlogTotalSpaceInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCommitlogTotalSpaceInMbOK creates a FindConfigCommitlogTotalSpaceInMbOK with default headers values +func NewFindConfigCommitlogTotalSpaceInMbOK() *FindConfigCommitlogTotalSpaceInMbOK { + return &FindConfigCommitlogTotalSpaceInMbOK{} +} + +/* +FindConfigCommitlogTotalSpaceInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigCommitlogTotalSpaceInMbOK struct { + Payload int64 +} + +func (o *FindConfigCommitlogTotalSpaceInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCommitlogTotalSpaceInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCommitlogTotalSpaceInMbDefault creates a FindConfigCommitlogTotalSpaceInMbDefault with default headers values +func NewFindConfigCommitlogTotalSpaceInMbDefault(code int) *FindConfigCommitlogTotalSpaceInMbDefault { + return &FindConfigCommitlogTotalSpaceInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigCommitlogTotalSpaceInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCommitlogTotalSpaceInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config commitlog total space in mb default response +func (o *FindConfigCommitlogTotalSpaceInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCommitlogTotalSpaceInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCommitlogTotalSpaceInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCommitlogTotalSpaceInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_useo_dsync_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_useo_dsync_parameters.go new file mode 100644 index 00000000000..fef5437af8d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_useo_dsync_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCommitlogUseoDsyncParams creates a new FindConfigCommitlogUseoDsyncParams object +// with the default values initialized. +func NewFindConfigCommitlogUseoDsyncParams() *FindConfigCommitlogUseoDsyncParams { + + return &FindConfigCommitlogUseoDsyncParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCommitlogUseoDsyncParamsWithTimeout creates a new FindConfigCommitlogUseoDsyncParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCommitlogUseoDsyncParamsWithTimeout(timeout time.Duration) *FindConfigCommitlogUseoDsyncParams { + + return &FindConfigCommitlogUseoDsyncParams{ + + timeout: timeout, + } +} + +// NewFindConfigCommitlogUseoDsyncParamsWithContext creates a new FindConfigCommitlogUseoDsyncParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCommitlogUseoDsyncParamsWithContext(ctx context.Context) *FindConfigCommitlogUseoDsyncParams { + + return &FindConfigCommitlogUseoDsyncParams{ + + Context: ctx, + } +} + +// NewFindConfigCommitlogUseoDsyncParamsWithHTTPClient creates a new FindConfigCommitlogUseoDsyncParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCommitlogUseoDsyncParamsWithHTTPClient(client *http.Client) *FindConfigCommitlogUseoDsyncParams { + + return &FindConfigCommitlogUseoDsyncParams{ + HTTPClient: client, + } +} + +/* +FindConfigCommitlogUseoDsyncParams contains all the parameters to send to the API endpoint +for the find config commitlog use o dsync operation typically these are written to a http.Request +*/ +type FindConfigCommitlogUseoDsyncParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config commitlog use o dsync params +func (o *FindConfigCommitlogUseoDsyncParams) WithTimeout(timeout time.Duration) *FindConfigCommitlogUseoDsyncParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config commitlog use o dsync params +func (o *FindConfigCommitlogUseoDsyncParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config commitlog use o dsync params +func (o *FindConfigCommitlogUseoDsyncParams) WithContext(ctx context.Context) *FindConfigCommitlogUseoDsyncParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config commitlog use o dsync params +func (o *FindConfigCommitlogUseoDsyncParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config commitlog use o dsync params +func (o *FindConfigCommitlogUseoDsyncParams) WithHTTPClient(client *http.Client) *FindConfigCommitlogUseoDsyncParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config commitlog use o dsync params +func (o *FindConfigCommitlogUseoDsyncParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCommitlogUseoDsyncParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_useo_dsync_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_useo_dsync_responses.go new file mode 100644 index 00000000000..4d4ba360d53 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_commitlog_useo_dsync_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCommitlogUseoDsyncReader is a Reader for the FindConfigCommitlogUseoDsync structure. +type FindConfigCommitlogUseoDsyncReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCommitlogUseoDsyncReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCommitlogUseODsyncOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCommitlogUseoDsyncDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCommitlogUseODsyncOK creates a FindConfigCommitlogUseODsyncOK with default headers values +func NewFindConfigCommitlogUseODsyncOK() *FindConfigCommitlogUseODsyncOK { + return &FindConfigCommitlogUseODsyncOK{} +} + +/* +FindConfigCommitlogUseODsyncOK handles this case with default header values. + +Config value +*/ +type FindConfigCommitlogUseODsyncOK struct { + Payload bool +} + +func (o *FindConfigCommitlogUseODsyncOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigCommitlogUseODsyncOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCommitlogUseoDsyncDefault creates a FindConfigCommitlogUseoDsyncDefault with default headers values +func NewFindConfigCommitlogUseoDsyncDefault(code int) *FindConfigCommitlogUseoDsyncDefault { + return &FindConfigCommitlogUseoDsyncDefault{ + _statusCode: code, + } +} + +/* +FindConfigCommitlogUseoDsyncDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCommitlogUseoDsyncDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config commitlog use o dsync default response +func (o *FindConfigCommitlogUseoDsyncDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCommitlogUseoDsyncDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCommitlogUseoDsyncDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCommitlogUseoDsyncDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_enforce_min_threshold_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_enforce_min_threshold_parameters.go new file mode 100644 index 00000000000..1f7fcff5444 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_enforce_min_threshold_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCompactionEnforceMinThresholdParams creates a new FindConfigCompactionEnforceMinThresholdParams object +// with the default values initialized. +func NewFindConfigCompactionEnforceMinThresholdParams() *FindConfigCompactionEnforceMinThresholdParams { + + return &FindConfigCompactionEnforceMinThresholdParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCompactionEnforceMinThresholdParamsWithTimeout creates a new FindConfigCompactionEnforceMinThresholdParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCompactionEnforceMinThresholdParamsWithTimeout(timeout time.Duration) *FindConfigCompactionEnforceMinThresholdParams { + + return &FindConfigCompactionEnforceMinThresholdParams{ + + timeout: timeout, + } +} + +// NewFindConfigCompactionEnforceMinThresholdParamsWithContext creates a new FindConfigCompactionEnforceMinThresholdParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCompactionEnforceMinThresholdParamsWithContext(ctx context.Context) *FindConfigCompactionEnforceMinThresholdParams { + + return &FindConfigCompactionEnforceMinThresholdParams{ + + Context: ctx, + } +} + +// NewFindConfigCompactionEnforceMinThresholdParamsWithHTTPClient creates a new FindConfigCompactionEnforceMinThresholdParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCompactionEnforceMinThresholdParamsWithHTTPClient(client *http.Client) *FindConfigCompactionEnforceMinThresholdParams { + + return &FindConfigCompactionEnforceMinThresholdParams{ + HTTPClient: client, + } +} + +/* +FindConfigCompactionEnforceMinThresholdParams contains all the parameters to send to the API endpoint +for the find config compaction enforce min threshold operation typically these are written to a http.Request +*/ +type FindConfigCompactionEnforceMinThresholdParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config compaction enforce min threshold params +func (o *FindConfigCompactionEnforceMinThresholdParams) WithTimeout(timeout time.Duration) *FindConfigCompactionEnforceMinThresholdParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config compaction enforce min threshold params +func (o *FindConfigCompactionEnforceMinThresholdParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config compaction enforce min threshold params +func (o *FindConfigCompactionEnforceMinThresholdParams) WithContext(ctx context.Context) *FindConfigCompactionEnforceMinThresholdParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config compaction enforce min threshold params +func (o *FindConfigCompactionEnforceMinThresholdParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config compaction enforce min threshold params +func (o *FindConfigCompactionEnforceMinThresholdParams) WithHTTPClient(client *http.Client) *FindConfigCompactionEnforceMinThresholdParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config compaction enforce min threshold params +func (o *FindConfigCompactionEnforceMinThresholdParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCompactionEnforceMinThresholdParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_enforce_min_threshold_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_enforce_min_threshold_responses.go new file mode 100644 index 00000000000..5795744adde --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_enforce_min_threshold_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCompactionEnforceMinThresholdReader is a Reader for the FindConfigCompactionEnforceMinThreshold structure. +type FindConfigCompactionEnforceMinThresholdReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCompactionEnforceMinThresholdReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCompactionEnforceMinThresholdOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCompactionEnforceMinThresholdDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCompactionEnforceMinThresholdOK creates a FindConfigCompactionEnforceMinThresholdOK with default headers values +func NewFindConfigCompactionEnforceMinThresholdOK() *FindConfigCompactionEnforceMinThresholdOK { + return &FindConfigCompactionEnforceMinThresholdOK{} +} + +/* +FindConfigCompactionEnforceMinThresholdOK handles this case with default header values. + +Config value +*/ +type FindConfigCompactionEnforceMinThresholdOK struct { + Payload bool +} + +func (o *FindConfigCompactionEnforceMinThresholdOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigCompactionEnforceMinThresholdOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCompactionEnforceMinThresholdDefault creates a FindConfigCompactionEnforceMinThresholdDefault with default headers values +func NewFindConfigCompactionEnforceMinThresholdDefault(code int) *FindConfigCompactionEnforceMinThresholdDefault { + return &FindConfigCompactionEnforceMinThresholdDefault{ + _statusCode: code, + } +} + +/* +FindConfigCompactionEnforceMinThresholdDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCompactionEnforceMinThresholdDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config compaction enforce min threshold default response +func (o *FindConfigCompactionEnforceMinThresholdDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCompactionEnforceMinThresholdDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCompactionEnforceMinThresholdDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCompactionEnforceMinThresholdDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_cell_warning_threshold_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_cell_warning_threshold_mb_parameters.go new file mode 100644 index 00000000000..71dab2dc2b1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_cell_warning_threshold_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCompactionLargeCellWarningThresholdMbParams creates a new FindConfigCompactionLargeCellWarningThresholdMbParams object +// with the default values initialized. +func NewFindConfigCompactionLargeCellWarningThresholdMbParams() *FindConfigCompactionLargeCellWarningThresholdMbParams { + + return &FindConfigCompactionLargeCellWarningThresholdMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCompactionLargeCellWarningThresholdMbParamsWithTimeout creates a new FindConfigCompactionLargeCellWarningThresholdMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCompactionLargeCellWarningThresholdMbParamsWithTimeout(timeout time.Duration) *FindConfigCompactionLargeCellWarningThresholdMbParams { + + return &FindConfigCompactionLargeCellWarningThresholdMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigCompactionLargeCellWarningThresholdMbParamsWithContext creates a new FindConfigCompactionLargeCellWarningThresholdMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCompactionLargeCellWarningThresholdMbParamsWithContext(ctx context.Context) *FindConfigCompactionLargeCellWarningThresholdMbParams { + + return &FindConfigCompactionLargeCellWarningThresholdMbParams{ + + Context: ctx, + } +} + +// NewFindConfigCompactionLargeCellWarningThresholdMbParamsWithHTTPClient creates a new FindConfigCompactionLargeCellWarningThresholdMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCompactionLargeCellWarningThresholdMbParamsWithHTTPClient(client *http.Client) *FindConfigCompactionLargeCellWarningThresholdMbParams { + + return &FindConfigCompactionLargeCellWarningThresholdMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigCompactionLargeCellWarningThresholdMbParams contains all the parameters to send to the API endpoint +for the find config compaction large cell warning threshold mb operation typically these are written to a http.Request +*/ +type FindConfigCompactionLargeCellWarningThresholdMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config compaction large cell warning threshold mb params +func (o *FindConfigCompactionLargeCellWarningThresholdMbParams) WithTimeout(timeout time.Duration) *FindConfigCompactionLargeCellWarningThresholdMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config compaction large cell warning threshold mb params +func (o *FindConfigCompactionLargeCellWarningThresholdMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config compaction large cell warning threshold mb params +func (o *FindConfigCompactionLargeCellWarningThresholdMbParams) WithContext(ctx context.Context) *FindConfigCompactionLargeCellWarningThresholdMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config compaction large cell warning threshold mb params +func (o *FindConfigCompactionLargeCellWarningThresholdMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config compaction large cell warning threshold mb params +func (o *FindConfigCompactionLargeCellWarningThresholdMbParams) WithHTTPClient(client *http.Client) *FindConfigCompactionLargeCellWarningThresholdMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config compaction large cell warning threshold mb params +func (o *FindConfigCompactionLargeCellWarningThresholdMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCompactionLargeCellWarningThresholdMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_cell_warning_threshold_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_cell_warning_threshold_mb_responses.go new file mode 100644 index 00000000000..00d531e8b7d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_cell_warning_threshold_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCompactionLargeCellWarningThresholdMbReader is a Reader for the FindConfigCompactionLargeCellWarningThresholdMb structure. +type FindConfigCompactionLargeCellWarningThresholdMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCompactionLargeCellWarningThresholdMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCompactionLargeCellWarningThresholdMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCompactionLargeCellWarningThresholdMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCompactionLargeCellWarningThresholdMbOK creates a FindConfigCompactionLargeCellWarningThresholdMbOK with default headers values +func NewFindConfigCompactionLargeCellWarningThresholdMbOK() *FindConfigCompactionLargeCellWarningThresholdMbOK { + return &FindConfigCompactionLargeCellWarningThresholdMbOK{} +} + +/* +FindConfigCompactionLargeCellWarningThresholdMbOK handles this case with default header values. + +Config value +*/ +type FindConfigCompactionLargeCellWarningThresholdMbOK struct { + Payload int64 +} + +func (o *FindConfigCompactionLargeCellWarningThresholdMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCompactionLargeCellWarningThresholdMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCompactionLargeCellWarningThresholdMbDefault creates a FindConfigCompactionLargeCellWarningThresholdMbDefault with default headers values +func NewFindConfigCompactionLargeCellWarningThresholdMbDefault(code int) *FindConfigCompactionLargeCellWarningThresholdMbDefault { + return &FindConfigCompactionLargeCellWarningThresholdMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigCompactionLargeCellWarningThresholdMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCompactionLargeCellWarningThresholdMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config compaction large cell warning threshold mb default response +func (o *FindConfigCompactionLargeCellWarningThresholdMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCompactionLargeCellWarningThresholdMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCompactionLargeCellWarningThresholdMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCompactionLargeCellWarningThresholdMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_partition_warning_threshold_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_partition_warning_threshold_mb_parameters.go new file mode 100644 index 00000000000..56ba39a7362 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_partition_warning_threshold_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCompactionLargePartitionWarningThresholdMbParams creates a new FindConfigCompactionLargePartitionWarningThresholdMbParams object +// with the default values initialized. +func NewFindConfigCompactionLargePartitionWarningThresholdMbParams() *FindConfigCompactionLargePartitionWarningThresholdMbParams { + + return &FindConfigCompactionLargePartitionWarningThresholdMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCompactionLargePartitionWarningThresholdMbParamsWithTimeout creates a new FindConfigCompactionLargePartitionWarningThresholdMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCompactionLargePartitionWarningThresholdMbParamsWithTimeout(timeout time.Duration) *FindConfigCompactionLargePartitionWarningThresholdMbParams { + + return &FindConfigCompactionLargePartitionWarningThresholdMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigCompactionLargePartitionWarningThresholdMbParamsWithContext creates a new FindConfigCompactionLargePartitionWarningThresholdMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCompactionLargePartitionWarningThresholdMbParamsWithContext(ctx context.Context) *FindConfigCompactionLargePartitionWarningThresholdMbParams { + + return &FindConfigCompactionLargePartitionWarningThresholdMbParams{ + + Context: ctx, + } +} + +// NewFindConfigCompactionLargePartitionWarningThresholdMbParamsWithHTTPClient creates a new FindConfigCompactionLargePartitionWarningThresholdMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCompactionLargePartitionWarningThresholdMbParamsWithHTTPClient(client *http.Client) *FindConfigCompactionLargePartitionWarningThresholdMbParams { + + return &FindConfigCompactionLargePartitionWarningThresholdMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigCompactionLargePartitionWarningThresholdMbParams contains all the parameters to send to the API endpoint +for the find config compaction large partition warning threshold mb operation typically these are written to a http.Request +*/ +type FindConfigCompactionLargePartitionWarningThresholdMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config compaction large partition warning threshold mb params +func (o *FindConfigCompactionLargePartitionWarningThresholdMbParams) WithTimeout(timeout time.Duration) *FindConfigCompactionLargePartitionWarningThresholdMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config compaction large partition warning threshold mb params +func (o *FindConfigCompactionLargePartitionWarningThresholdMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config compaction large partition warning threshold mb params +func (o *FindConfigCompactionLargePartitionWarningThresholdMbParams) WithContext(ctx context.Context) *FindConfigCompactionLargePartitionWarningThresholdMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config compaction large partition warning threshold mb params +func (o *FindConfigCompactionLargePartitionWarningThresholdMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config compaction large partition warning threshold mb params +func (o *FindConfigCompactionLargePartitionWarningThresholdMbParams) WithHTTPClient(client *http.Client) *FindConfigCompactionLargePartitionWarningThresholdMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config compaction large partition warning threshold mb params +func (o *FindConfigCompactionLargePartitionWarningThresholdMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCompactionLargePartitionWarningThresholdMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_partition_warning_threshold_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_partition_warning_threshold_mb_responses.go new file mode 100644 index 00000000000..73762fc4cea --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_partition_warning_threshold_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCompactionLargePartitionWarningThresholdMbReader is a Reader for the FindConfigCompactionLargePartitionWarningThresholdMb structure. +type FindConfigCompactionLargePartitionWarningThresholdMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCompactionLargePartitionWarningThresholdMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCompactionLargePartitionWarningThresholdMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCompactionLargePartitionWarningThresholdMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCompactionLargePartitionWarningThresholdMbOK creates a FindConfigCompactionLargePartitionWarningThresholdMbOK with default headers values +func NewFindConfigCompactionLargePartitionWarningThresholdMbOK() *FindConfigCompactionLargePartitionWarningThresholdMbOK { + return &FindConfigCompactionLargePartitionWarningThresholdMbOK{} +} + +/* +FindConfigCompactionLargePartitionWarningThresholdMbOK handles this case with default header values. + +Config value +*/ +type FindConfigCompactionLargePartitionWarningThresholdMbOK struct { + Payload int64 +} + +func (o *FindConfigCompactionLargePartitionWarningThresholdMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCompactionLargePartitionWarningThresholdMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCompactionLargePartitionWarningThresholdMbDefault creates a FindConfigCompactionLargePartitionWarningThresholdMbDefault with default headers values +func NewFindConfigCompactionLargePartitionWarningThresholdMbDefault(code int) *FindConfigCompactionLargePartitionWarningThresholdMbDefault { + return &FindConfigCompactionLargePartitionWarningThresholdMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigCompactionLargePartitionWarningThresholdMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCompactionLargePartitionWarningThresholdMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config compaction large partition warning threshold mb default response +func (o *FindConfigCompactionLargePartitionWarningThresholdMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCompactionLargePartitionWarningThresholdMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCompactionLargePartitionWarningThresholdMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCompactionLargePartitionWarningThresholdMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_row_warning_threshold_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_row_warning_threshold_mb_parameters.go new file mode 100644 index 00000000000..0377ebeb538 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_row_warning_threshold_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCompactionLargeRowWarningThresholdMbParams creates a new FindConfigCompactionLargeRowWarningThresholdMbParams object +// with the default values initialized. +func NewFindConfigCompactionLargeRowWarningThresholdMbParams() *FindConfigCompactionLargeRowWarningThresholdMbParams { + + return &FindConfigCompactionLargeRowWarningThresholdMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCompactionLargeRowWarningThresholdMbParamsWithTimeout creates a new FindConfigCompactionLargeRowWarningThresholdMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCompactionLargeRowWarningThresholdMbParamsWithTimeout(timeout time.Duration) *FindConfigCompactionLargeRowWarningThresholdMbParams { + + return &FindConfigCompactionLargeRowWarningThresholdMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigCompactionLargeRowWarningThresholdMbParamsWithContext creates a new FindConfigCompactionLargeRowWarningThresholdMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCompactionLargeRowWarningThresholdMbParamsWithContext(ctx context.Context) *FindConfigCompactionLargeRowWarningThresholdMbParams { + + return &FindConfigCompactionLargeRowWarningThresholdMbParams{ + + Context: ctx, + } +} + +// NewFindConfigCompactionLargeRowWarningThresholdMbParamsWithHTTPClient creates a new FindConfigCompactionLargeRowWarningThresholdMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCompactionLargeRowWarningThresholdMbParamsWithHTTPClient(client *http.Client) *FindConfigCompactionLargeRowWarningThresholdMbParams { + + return &FindConfigCompactionLargeRowWarningThresholdMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigCompactionLargeRowWarningThresholdMbParams contains all the parameters to send to the API endpoint +for the find config compaction large row warning threshold mb operation typically these are written to a http.Request +*/ +type FindConfigCompactionLargeRowWarningThresholdMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config compaction large row warning threshold mb params +func (o *FindConfigCompactionLargeRowWarningThresholdMbParams) WithTimeout(timeout time.Duration) *FindConfigCompactionLargeRowWarningThresholdMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config compaction large row warning threshold mb params +func (o *FindConfigCompactionLargeRowWarningThresholdMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config compaction large row warning threshold mb params +func (o *FindConfigCompactionLargeRowWarningThresholdMbParams) WithContext(ctx context.Context) *FindConfigCompactionLargeRowWarningThresholdMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config compaction large row warning threshold mb params +func (o *FindConfigCompactionLargeRowWarningThresholdMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config compaction large row warning threshold mb params +func (o *FindConfigCompactionLargeRowWarningThresholdMbParams) WithHTTPClient(client *http.Client) *FindConfigCompactionLargeRowWarningThresholdMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config compaction large row warning threshold mb params +func (o *FindConfigCompactionLargeRowWarningThresholdMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCompactionLargeRowWarningThresholdMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_row_warning_threshold_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_row_warning_threshold_mb_responses.go new file mode 100644 index 00000000000..e9b19f384c1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_large_row_warning_threshold_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCompactionLargeRowWarningThresholdMbReader is a Reader for the FindConfigCompactionLargeRowWarningThresholdMb structure. +type FindConfigCompactionLargeRowWarningThresholdMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCompactionLargeRowWarningThresholdMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCompactionLargeRowWarningThresholdMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCompactionLargeRowWarningThresholdMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCompactionLargeRowWarningThresholdMbOK creates a FindConfigCompactionLargeRowWarningThresholdMbOK with default headers values +func NewFindConfigCompactionLargeRowWarningThresholdMbOK() *FindConfigCompactionLargeRowWarningThresholdMbOK { + return &FindConfigCompactionLargeRowWarningThresholdMbOK{} +} + +/* +FindConfigCompactionLargeRowWarningThresholdMbOK handles this case with default header values. + +Config value +*/ +type FindConfigCompactionLargeRowWarningThresholdMbOK struct { + Payload int64 +} + +func (o *FindConfigCompactionLargeRowWarningThresholdMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCompactionLargeRowWarningThresholdMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCompactionLargeRowWarningThresholdMbDefault creates a FindConfigCompactionLargeRowWarningThresholdMbDefault with default headers values +func NewFindConfigCompactionLargeRowWarningThresholdMbDefault(code int) *FindConfigCompactionLargeRowWarningThresholdMbDefault { + return &FindConfigCompactionLargeRowWarningThresholdMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigCompactionLargeRowWarningThresholdMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCompactionLargeRowWarningThresholdMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config compaction large row warning threshold mb default response +func (o *FindConfigCompactionLargeRowWarningThresholdMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCompactionLargeRowWarningThresholdMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCompactionLargeRowWarningThresholdMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCompactionLargeRowWarningThresholdMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_preheat_key_cache_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_preheat_key_cache_parameters.go new file mode 100644 index 00000000000..0d35dad2d8c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_preheat_key_cache_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCompactionPreheatKeyCacheParams creates a new FindConfigCompactionPreheatKeyCacheParams object +// with the default values initialized. +func NewFindConfigCompactionPreheatKeyCacheParams() *FindConfigCompactionPreheatKeyCacheParams { + + return &FindConfigCompactionPreheatKeyCacheParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCompactionPreheatKeyCacheParamsWithTimeout creates a new FindConfigCompactionPreheatKeyCacheParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCompactionPreheatKeyCacheParamsWithTimeout(timeout time.Duration) *FindConfigCompactionPreheatKeyCacheParams { + + return &FindConfigCompactionPreheatKeyCacheParams{ + + timeout: timeout, + } +} + +// NewFindConfigCompactionPreheatKeyCacheParamsWithContext creates a new FindConfigCompactionPreheatKeyCacheParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCompactionPreheatKeyCacheParamsWithContext(ctx context.Context) *FindConfigCompactionPreheatKeyCacheParams { + + return &FindConfigCompactionPreheatKeyCacheParams{ + + Context: ctx, + } +} + +// NewFindConfigCompactionPreheatKeyCacheParamsWithHTTPClient creates a new FindConfigCompactionPreheatKeyCacheParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCompactionPreheatKeyCacheParamsWithHTTPClient(client *http.Client) *FindConfigCompactionPreheatKeyCacheParams { + + return &FindConfigCompactionPreheatKeyCacheParams{ + HTTPClient: client, + } +} + +/* +FindConfigCompactionPreheatKeyCacheParams contains all the parameters to send to the API endpoint +for the find config compaction preheat key cache operation typically these are written to a http.Request +*/ +type FindConfigCompactionPreheatKeyCacheParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config compaction preheat key cache params +func (o *FindConfigCompactionPreheatKeyCacheParams) WithTimeout(timeout time.Duration) *FindConfigCompactionPreheatKeyCacheParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config compaction preheat key cache params +func (o *FindConfigCompactionPreheatKeyCacheParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config compaction preheat key cache params +func (o *FindConfigCompactionPreheatKeyCacheParams) WithContext(ctx context.Context) *FindConfigCompactionPreheatKeyCacheParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config compaction preheat key cache params +func (o *FindConfigCompactionPreheatKeyCacheParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config compaction preheat key cache params +func (o *FindConfigCompactionPreheatKeyCacheParams) WithHTTPClient(client *http.Client) *FindConfigCompactionPreheatKeyCacheParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config compaction preheat key cache params +func (o *FindConfigCompactionPreheatKeyCacheParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCompactionPreheatKeyCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_preheat_key_cache_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_preheat_key_cache_responses.go new file mode 100644 index 00000000000..4dc09c40a0e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_preheat_key_cache_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCompactionPreheatKeyCacheReader is a Reader for the FindConfigCompactionPreheatKeyCache structure. +type FindConfigCompactionPreheatKeyCacheReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCompactionPreheatKeyCacheReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCompactionPreheatKeyCacheOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCompactionPreheatKeyCacheDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCompactionPreheatKeyCacheOK creates a FindConfigCompactionPreheatKeyCacheOK with default headers values +func NewFindConfigCompactionPreheatKeyCacheOK() *FindConfigCompactionPreheatKeyCacheOK { + return &FindConfigCompactionPreheatKeyCacheOK{} +} + +/* +FindConfigCompactionPreheatKeyCacheOK handles this case with default header values. + +Config value +*/ +type FindConfigCompactionPreheatKeyCacheOK struct { + Payload bool +} + +func (o *FindConfigCompactionPreheatKeyCacheOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigCompactionPreheatKeyCacheOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCompactionPreheatKeyCacheDefault creates a FindConfigCompactionPreheatKeyCacheDefault with default headers values +func NewFindConfigCompactionPreheatKeyCacheDefault(code int) *FindConfigCompactionPreheatKeyCacheDefault { + return &FindConfigCompactionPreheatKeyCacheDefault{ + _statusCode: code, + } +} + +/* +FindConfigCompactionPreheatKeyCacheDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCompactionPreheatKeyCacheDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config compaction preheat key cache default response +func (o *FindConfigCompactionPreheatKeyCacheDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCompactionPreheatKeyCacheDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCompactionPreheatKeyCacheDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCompactionPreheatKeyCacheDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_rows_count_warning_threshold_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_rows_count_warning_threshold_parameters.go new file mode 100644 index 00000000000..b9eb40ba155 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_rows_count_warning_threshold_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCompactionRowsCountWarningThresholdParams creates a new FindConfigCompactionRowsCountWarningThresholdParams object +// with the default values initialized. +func NewFindConfigCompactionRowsCountWarningThresholdParams() *FindConfigCompactionRowsCountWarningThresholdParams { + + return &FindConfigCompactionRowsCountWarningThresholdParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCompactionRowsCountWarningThresholdParamsWithTimeout creates a new FindConfigCompactionRowsCountWarningThresholdParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCompactionRowsCountWarningThresholdParamsWithTimeout(timeout time.Duration) *FindConfigCompactionRowsCountWarningThresholdParams { + + return &FindConfigCompactionRowsCountWarningThresholdParams{ + + timeout: timeout, + } +} + +// NewFindConfigCompactionRowsCountWarningThresholdParamsWithContext creates a new FindConfigCompactionRowsCountWarningThresholdParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCompactionRowsCountWarningThresholdParamsWithContext(ctx context.Context) *FindConfigCompactionRowsCountWarningThresholdParams { + + return &FindConfigCompactionRowsCountWarningThresholdParams{ + + Context: ctx, + } +} + +// NewFindConfigCompactionRowsCountWarningThresholdParamsWithHTTPClient creates a new FindConfigCompactionRowsCountWarningThresholdParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCompactionRowsCountWarningThresholdParamsWithHTTPClient(client *http.Client) *FindConfigCompactionRowsCountWarningThresholdParams { + + return &FindConfigCompactionRowsCountWarningThresholdParams{ + HTTPClient: client, + } +} + +/* +FindConfigCompactionRowsCountWarningThresholdParams contains all the parameters to send to the API endpoint +for the find config compaction rows count warning threshold operation typically these are written to a http.Request +*/ +type FindConfigCompactionRowsCountWarningThresholdParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config compaction rows count warning threshold params +func (o *FindConfigCompactionRowsCountWarningThresholdParams) WithTimeout(timeout time.Duration) *FindConfigCompactionRowsCountWarningThresholdParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config compaction rows count warning threshold params +func (o *FindConfigCompactionRowsCountWarningThresholdParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config compaction rows count warning threshold params +func (o *FindConfigCompactionRowsCountWarningThresholdParams) WithContext(ctx context.Context) *FindConfigCompactionRowsCountWarningThresholdParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config compaction rows count warning threshold params +func (o *FindConfigCompactionRowsCountWarningThresholdParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config compaction rows count warning threshold params +func (o *FindConfigCompactionRowsCountWarningThresholdParams) WithHTTPClient(client *http.Client) *FindConfigCompactionRowsCountWarningThresholdParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config compaction rows count warning threshold params +func (o *FindConfigCompactionRowsCountWarningThresholdParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCompactionRowsCountWarningThresholdParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_rows_count_warning_threshold_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_rows_count_warning_threshold_responses.go new file mode 100644 index 00000000000..463234bfa1d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_rows_count_warning_threshold_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCompactionRowsCountWarningThresholdReader is a Reader for the FindConfigCompactionRowsCountWarningThreshold structure. +type FindConfigCompactionRowsCountWarningThresholdReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCompactionRowsCountWarningThresholdReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCompactionRowsCountWarningThresholdOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCompactionRowsCountWarningThresholdDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCompactionRowsCountWarningThresholdOK creates a FindConfigCompactionRowsCountWarningThresholdOK with default headers values +func NewFindConfigCompactionRowsCountWarningThresholdOK() *FindConfigCompactionRowsCountWarningThresholdOK { + return &FindConfigCompactionRowsCountWarningThresholdOK{} +} + +/* +FindConfigCompactionRowsCountWarningThresholdOK handles this case with default header values. + +Config value +*/ +type FindConfigCompactionRowsCountWarningThresholdOK struct { + Payload int64 +} + +func (o *FindConfigCompactionRowsCountWarningThresholdOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCompactionRowsCountWarningThresholdOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCompactionRowsCountWarningThresholdDefault creates a FindConfigCompactionRowsCountWarningThresholdDefault with default headers values +func NewFindConfigCompactionRowsCountWarningThresholdDefault(code int) *FindConfigCompactionRowsCountWarningThresholdDefault { + return &FindConfigCompactionRowsCountWarningThresholdDefault{ + _statusCode: code, + } +} + +/* +FindConfigCompactionRowsCountWarningThresholdDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCompactionRowsCountWarningThresholdDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config compaction rows count warning threshold default response +func (o *FindConfigCompactionRowsCountWarningThresholdDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCompactionRowsCountWarningThresholdDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCompactionRowsCountWarningThresholdDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCompactionRowsCountWarningThresholdDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_static_shares_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_static_shares_parameters.go new file mode 100644 index 00000000000..0df61e228ea --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_static_shares_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCompactionStaticSharesParams creates a new FindConfigCompactionStaticSharesParams object +// with the default values initialized. +func NewFindConfigCompactionStaticSharesParams() *FindConfigCompactionStaticSharesParams { + + return &FindConfigCompactionStaticSharesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCompactionStaticSharesParamsWithTimeout creates a new FindConfigCompactionStaticSharesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCompactionStaticSharesParamsWithTimeout(timeout time.Duration) *FindConfigCompactionStaticSharesParams { + + return &FindConfigCompactionStaticSharesParams{ + + timeout: timeout, + } +} + +// NewFindConfigCompactionStaticSharesParamsWithContext creates a new FindConfigCompactionStaticSharesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCompactionStaticSharesParamsWithContext(ctx context.Context) *FindConfigCompactionStaticSharesParams { + + return &FindConfigCompactionStaticSharesParams{ + + Context: ctx, + } +} + +// NewFindConfigCompactionStaticSharesParamsWithHTTPClient creates a new FindConfigCompactionStaticSharesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCompactionStaticSharesParamsWithHTTPClient(client *http.Client) *FindConfigCompactionStaticSharesParams { + + return &FindConfigCompactionStaticSharesParams{ + HTTPClient: client, + } +} + +/* +FindConfigCompactionStaticSharesParams contains all the parameters to send to the API endpoint +for the find config compaction static shares operation typically these are written to a http.Request +*/ +type FindConfigCompactionStaticSharesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config compaction static shares params +func (o *FindConfigCompactionStaticSharesParams) WithTimeout(timeout time.Duration) *FindConfigCompactionStaticSharesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config compaction static shares params +func (o *FindConfigCompactionStaticSharesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config compaction static shares params +func (o *FindConfigCompactionStaticSharesParams) WithContext(ctx context.Context) *FindConfigCompactionStaticSharesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config compaction static shares params +func (o *FindConfigCompactionStaticSharesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config compaction static shares params +func (o *FindConfigCompactionStaticSharesParams) WithHTTPClient(client *http.Client) *FindConfigCompactionStaticSharesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config compaction static shares params +func (o *FindConfigCompactionStaticSharesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCompactionStaticSharesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_static_shares_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_static_shares_responses.go new file mode 100644 index 00000000000..fdbaa0a265c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_static_shares_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCompactionStaticSharesReader is a Reader for the FindConfigCompactionStaticShares structure. +type FindConfigCompactionStaticSharesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCompactionStaticSharesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCompactionStaticSharesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCompactionStaticSharesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCompactionStaticSharesOK creates a FindConfigCompactionStaticSharesOK with default headers values +func NewFindConfigCompactionStaticSharesOK() *FindConfigCompactionStaticSharesOK { + return &FindConfigCompactionStaticSharesOK{} +} + +/* +FindConfigCompactionStaticSharesOK handles this case with default header values. + +Config value +*/ +type FindConfigCompactionStaticSharesOK struct { + Payload float64 +} + +func (o *FindConfigCompactionStaticSharesOK) GetPayload() float64 { + return o.Payload +} + +func (o *FindConfigCompactionStaticSharesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCompactionStaticSharesDefault creates a FindConfigCompactionStaticSharesDefault with default headers values +func NewFindConfigCompactionStaticSharesDefault(code int) *FindConfigCompactionStaticSharesDefault { + return &FindConfigCompactionStaticSharesDefault{ + _statusCode: code, + } +} + +/* +FindConfigCompactionStaticSharesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCompactionStaticSharesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config compaction static shares default response +func (o *FindConfigCompactionStaticSharesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCompactionStaticSharesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCompactionStaticSharesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCompactionStaticSharesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_throughput_mb_per_sec_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_throughput_mb_per_sec_parameters.go new file mode 100644 index 00000000000..7d1e75f40e9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_throughput_mb_per_sec_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCompactionThroughputMbPerSecParams creates a new FindConfigCompactionThroughputMbPerSecParams object +// with the default values initialized. +func NewFindConfigCompactionThroughputMbPerSecParams() *FindConfigCompactionThroughputMbPerSecParams { + + return &FindConfigCompactionThroughputMbPerSecParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCompactionThroughputMbPerSecParamsWithTimeout creates a new FindConfigCompactionThroughputMbPerSecParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCompactionThroughputMbPerSecParamsWithTimeout(timeout time.Duration) *FindConfigCompactionThroughputMbPerSecParams { + + return &FindConfigCompactionThroughputMbPerSecParams{ + + timeout: timeout, + } +} + +// NewFindConfigCompactionThroughputMbPerSecParamsWithContext creates a new FindConfigCompactionThroughputMbPerSecParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCompactionThroughputMbPerSecParamsWithContext(ctx context.Context) *FindConfigCompactionThroughputMbPerSecParams { + + return &FindConfigCompactionThroughputMbPerSecParams{ + + Context: ctx, + } +} + +// NewFindConfigCompactionThroughputMbPerSecParamsWithHTTPClient creates a new FindConfigCompactionThroughputMbPerSecParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCompactionThroughputMbPerSecParamsWithHTTPClient(client *http.Client) *FindConfigCompactionThroughputMbPerSecParams { + + return &FindConfigCompactionThroughputMbPerSecParams{ + HTTPClient: client, + } +} + +/* +FindConfigCompactionThroughputMbPerSecParams contains all the parameters to send to the API endpoint +for the find config compaction throughput mb per sec operation typically these are written to a http.Request +*/ +type FindConfigCompactionThroughputMbPerSecParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config compaction throughput mb per sec params +func (o *FindConfigCompactionThroughputMbPerSecParams) WithTimeout(timeout time.Duration) *FindConfigCompactionThroughputMbPerSecParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config compaction throughput mb per sec params +func (o *FindConfigCompactionThroughputMbPerSecParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config compaction throughput mb per sec params +func (o *FindConfigCompactionThroughputMbPerSecParams) WithContext(ctx context.Context) *FindConfigCompactionThroughputMbPerSecParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config compaction throughput mb per sec params +func (o *FindConfigCompactionThroughputMbPerSecParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config compaction throughput mb per sec params +func (o *FindConfigCompactionThroughputMbPerSecParams) WithHTTPClient(client *http.Client) *FindConfigCompactionThroughputMbPerSecParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config compaction throughput mb per sec params +func (o *FindConfigCompactionThroughputMbPerSecParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCompactionThroughputMbPerSecParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_throughput_mb_per_sec_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_throughput_mb_per_sec_responses.go new file mode 100644 index 00000000000..530018abfb5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_compaction_throughput_mb_per_sec_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCompactionThroughputMbPerSecReader is a Reader for the FindConfigCompactionThroughputMbPerSec structure. +type FindConfigCompactionThroughputMbPerSecReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCompactionThroughputMbPerSecReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCompactionThroughputMbPerSecOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCompactionThroughputMbPerSecDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCompactionThroughputMbPerSecOK creates a FindConfigCompactionThroughputMbPerSecOK with default headers values +func NewFindConfigCompactionThroughputMbPerSecOK() *FindConfigCompactionThroughputMbPerSecOK { + return &FindConfigCompactionThroughputMbPerSecOK{} +} + +/* +FindConfigCompactionThroughputMbPerSecOK handles this case with default header values. + +Config value +*/ +type FindConfigCompactionThroughputMbPerSecOK struct { + Payload int64 +} + +func (o *FindConfigCompactionThroughputMbPerSecOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCompactionThroughputMbPerSecOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCompactionThroughputMbPerSecDefault creates a FindConfigCompactionThroughputMbPerSecDefault with default headers values +func NewFindConfigCompactionThroughputMbPerSecDefault(code int) *FindConfigCompactionThroughputMbPerSecDefault { + return &FindConfigCompactionThroughputMbPerSecDefault{ + _statusCode: code, + } +} + +/* +FindConfigCompactionThroughputMbPerSecDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCompactionThroughputMbPerSecDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config compaction throughput mb per sec default response +func (o *FindConfigCompactionThroughputMbPerSecDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCompactionThroughputMbPerSecDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCompactionThroughputMbPerSecDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCompactionThroughputMbPerSecDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_compactors_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_compactors_parameters.go new file mode 100644 index 00000000000..69eca25deea --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_compactors_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigConcurrentCompactorsParams creates a new FindConfigConcurrentCompactorsParams object +// with the default values initialized. +func NewFindConfigConcurrentCompactorsParams() *FindConfigConcurrentCompactorsParams { + + return &FindConfigConcurrentCompactorsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigConcurrentCompactorsParamsWithTimeout creates a new FindConfigConcurrentCompactorsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigConcurrentCompactorsParamsWithTimeout(timeout time.Duration) *FindConfigConcurrentCompactorsParams { + + return &FindConfigConcurrentCompactorsParams{ + + timeout: timeout, + } +} + +// NewFindConfigConcurrentCompactorsParamsWithContext creates a new FindConfigConcurrentCompactorsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigConcurrentCompactorsParamsWithContext(ctx context.Context) *FindConfigConcurrentCompactorsParams { + + return &FindConfigConcurrentCompactorsParams{ + + Context: ctx, + } +} + +// NewFindConfigConcurrentCompactorsParamsWithHTTPClient creates a new FindConfigConcurrentCompactorsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigConcurrentCompactorsParamsWithHTTPClient(client *http.Client) *FindConfigConcurrentCompactorsParams { + + return &FindConfigConcurrentCompactorsParams{ + HTTPClient: client, + } +} + +/* +FindConfigConcurrentCompactorsParams contains all the parameters to send to the API endpoint +for the find config concurrent compactors operation typically these are written to a http.Request +*/ +type FindConfigConcurrentCompactorsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config concurrent compactors params +func (o *FindConfigConcurrentCompactorsParams) WithTimeout(timeout time.Duration) *FindConfigConcurrentCompactorsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config concurrent compactors params +func (o *FindConfigConcurrentCompactorsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config concurrent compactors params +func (o *FindConfigConcurrentCompactorsParams) WithContext(ctx context.Context) *FindConfigConcurrentCompactorsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config concurrent compactors params +func (o *FindConfigConcurrentCompactorsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config concurrent compactors params +func (o *FindConfigConcurrentCompactorsParams) WithHTTPClient(client *http.Client) *FindConfigConcurrentCompactorsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config concurrent compactors params +func (o *FindConfigConcurrentCompactorsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigConcurrentCompactorsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_compactors_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_compactors_responses.go new file mode 100644 index 00000000000..5d2bd20e8d8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_compactors_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigConcurrentCompactorsReader is a Reader for the FindConfigConcurrentCompactors structure. +type FindConfigConcurrentCompactorsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigConcurrentCompactorsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigConcurrentCompactorsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigConcurrentCompactorsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigConcurrentCompactorsOK creates a FindConfigConcurrentCompactorsOK with default headers values +func NewFindConfigConcurrentCompactorsOK() *FindConfigConcurrentCompactorsOK { + return &FindConfigConcurrentCompactorsOK{} +} + +/* +FindConfigConcurrentCompactorsOK handles this case with default header values. + +Config value +*/ +type FindConfigConcurrentCompactorsOK struct { + Payload int64 +} + +func (o *FindConfigConcurrentCompactorsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigConcurrentCompactorsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigConcurrentCompactorsDefault creates a FindConfigConcurrentCompactorsDefault with default headers values +func NewFindConfigConcurrentCompactorsDefault(code int) *FindConfigConcurrentCompactorsDefault { + return &FindConfigConcurrentCompactorsDefault{ + _statusCode: code, + } +} + +/* +FindConfigConcurrentCompactorsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigConcurrentCompactorsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config concurrent compactors default response +func (o *FindConfigConcurrentCompactorsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigConcurrentCompactorsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigConcurrentCompactorsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigConcurrentCompactorsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_counter_writes_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_counter_writes_parameters.go new file mode 100644 index 00000000000..e841322bc9c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_counter_writes_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigConcurrentCounterWritesParams creates a new FindConfigConcurrentCounterWritesParams object +// with the default values initialized. +func NewFindConfigConcurrentCounterWritesParams() *FindConfigConcurrentCounterWritesParams { + + return &FindConfigConcurrentCounterWritesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigConcurrentCounterWritesParamsWithTimeout creates a new FindConfigConcurrentCounterWritesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigConcurrentCounterWritesParamsWithTimeout(timeout time.Duration) *FindConfigConcurrentCounterWritesParams { + + return &FindConfigConcurrentCounterWritesParams{ + + timeout: timeout, + } +} + +// NewFindConfigConcurrentCounterWritesParamsWithContext creates a new FindConfigConcurrentCounterWritesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigConcurrentCounterWritesParamsWithContext(ctx context.Context) *FindConfigConcurrentCounterWritesParams { + + return &FindConfigConcurrentCounterWritesParams{ + + Context: ctx, + } +} + +// NewFindConfigConcurrentCounterWritesParamsWithHTTPClient creates a new FindConfigConcurrentCounterWritesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigConcurrentCounterWritesParamsWithHTTPClient(client *http.Client) *FindConfigConcurrentCounterWritesParams { + + return &FindConfigConcurrentCounterWritesParams{ + HTTPClient: client, + } +} + +/* +FindConfigConcurrentCounterWritesParams contains all the parameters to send to the API endpoint +for the find config concurrent counter writes operation typically these are written to a http.Request +*/ +type FindConfigConcurrentCounterWritesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config concurrent counter writes params +func (o *FindConfigConcurrentCounterWritesParams) WithTimeout(timeout time.Duration) *FindConfigConcurrentCounterWritesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config concurrent counter writes params +func (o *FindConfigConcurrentCounterWritesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config concurrent counter writes params +func (o *FindConfigConcurrentCounterWritesParams) WithContext(ctx context.Context) *FindConfigConcurrentCounterWritesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config concurrent counter writes params +func (o *FindConfigConcurrentCounterWritesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config concurrent counter writes params +func (o *FindConfigConcurrentCounterWritesParams) WithHTTPClient(client *http.Client) *FindConfigConcurrentCounterWritesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config concurrent counter writes params +func (o *FindConfigConcurrentCounterWritesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigConcurrentCounterWritesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_counter_writes_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_counter_writes_responses.go new file mode 100644 index 00000000000..a48b78d9dd0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_counter_writes_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigConcurrentCounterWritesReader is a Reader for the FindConfigConcurrentCounterWrites structure. +type FindConfigConcurrentCounterWritesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigConcurrentCounterWritesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigConcurrentCounterWritesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigConcurrentCounterWritesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigConcurrentCounterWritesOK creates a FindConfigConcurrentCounterWritesOK with default headers values +func NewFindConfigConcurrentCounterWritesOK() *FindConfigConcurrentCounterWritesOK { + return &FindConfigConcurrentCounterWritesOK{} +} + +/* +FindConfigConcurrentCounterWritesOK handles this case with default header values. + +Config value +*/ +type FindConfigConcurrentCounterWritesOK struct { + Payload int64 +} + +func (o *FindConfigConcurrentCounterWritesOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigConcurrentCounterWritesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigConcurrentCounterWritesDefault creates a FindConfigConcurrentCounterWritesDefault with default headers values +func NewFindConfigConcurrentCounterWritesDefault(code int) *FindConfigConcurrentCounterWritesDefault { + return &FindConfigConcurrentCounterWritesDefault{ + _statusCode: code, + } +} + +/* +FindConfigConcurrentCounterWritesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigConcurrentCounterWritesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config concurrent counter writes default response +func (o *FindConfigConcurrentCounterWritesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigConcurrentCounterWritesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigConcurrentCounterWritesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigConcurrentCounterWritesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_reads_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_reads_parameters.go new file mode 100644 index 00000000000..e7506404b64 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_reads_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigConcurrentReadsParams creates a new FindConfigConcurrentReadsParams object +// with the default values initialized. +func NewFindConfigConcurrentReadsParams() *FindConfigConcurrentReadsParams { + + return &FindConfigConcurrentReadsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigConcurrentReadsParamsWithTimeout creates a new FindConfigConcurrentReadsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigConcurrentReadsParamsWithTimeout(timeout time.Duration) *FindConfigConcurrentReadsParams { + + return &FindConfigConcurrentReadsParams{ + + timeout: timeout, + } +} + +// NewFindConfigConcurrentReadsParamsWithContext creates a new FindConfigConcurrentReadsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigConcurrentReadsParamsWithContext(ctx context.Context) *FindConfigConcurrentReadsParams { + + return &FindConfigConcurrentReadsParams{ + + Context: ctx, + } +} + +// NewFindConfigConcurrentReadsParamsWithHTTPClient creates a new FindConfigConcurrentReadsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigConcurrentReadsParamsWithHTTPClient(client *http.Client) *FindConfigConcurrentReadsParams { + + return &FindConfigConcurrentReadsParams{ + HTTPClient: client, + } +} + +/* +FindConfigConcurrentReadsParams contains all the parameters to send to the API endpoint +for the find config concurrent reads operation typically these are written to a http.Request +*/ +type FindConfigConcurrentReadsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config concurrent reads params +func (o *FindConfigConcurrentReadsParams) WithTimeout(timeout time.Duration) *FindConfigConcurrentReadsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config concurrent reads params +func (o *FindConfigConcurrentReadsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config concurrent reads params +func (o *FindConfigConcurrentReadsParams) WithContext(ctx context.Context) *FindConfigConcurrentReadsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config concurrent reads params +func (o *FindConfigConcurrentReadsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config concurrent reads params +func (o *FindConfigConcurrentReadsParams) WithHTTPClient(client *http.Client) *FindConfigConcurrentReadsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config concurrent reads params +func (o *FindConfigConcurrentReadsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigConcurrentReadsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_reads_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_reads_responses.go new file mode 100644 index 00000000000..f8dae54cbbc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_reads_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigConcurrentReadsReader is a Reader for the FindConfigConcurrentReads structure. +type FindConfigConcurrentReadsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigConcurrentReadsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigConcurrentReadsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigConcurrentReadsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigConcurrentReadsOK creates a FindConfigConcurrentReadsOK with default headers values +func NewFindConfigConcurrentReadsOK() *FindConfigConcurrentReadsOK { + return &FindConfigConcurrentReadsOK{} +} + +/* +FindConfigConcurrentReadsOK handles this case with default header values. + +Config value +*/ +type FindConfigConcurrentReadsOK struct { + Payload int64 +} + +func (o *FindConfigConcurrentReadsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigConcurrentReadsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigConcurrentReadsDefault creates a FindConfigConcurrentReadsDefault with default headers values +func NewFindConfigConcurrentReadsDefault(code int) *FindConfigConcurrentReadsDefault { + return &FindConfigConcurrentReadsDefault{ + _statusCode: code, + } +} + +/* +FindConfigConcurrentReadsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigConcurrentReadsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config concurrent reads default response +func (o *FindConfigConcurrentReadsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigConcurrentReadsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigConcurrentReadsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigConcurrentReadsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_writes_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_writes_parameters.go new file mode 100644 index 00000000000..2e2208b4677 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_writes_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigConcurrentWritesParams creates a new FindConfigConcurrentWritesParams object +// with the default values initialized. +func NewFindConfigConcurrentWritesParams() *FindConfigConcurrentWritesParams { + + return &FindConfigConcurrentWritesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigConcurrentWritesParamsWithTimeout creates a new FindConfigConcurrentWritesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigConcurrentWritesParamsWithTimeout(timeout time.Duration) *FindConfigConcurrentWritesParams { + + return &FindConfigConcurrentWritesParams{ + + timeout: timeout, + } +} + +// NewFindConfigConcurrentWritesParamsWithContext creates a new FindConfigConcurrentWritesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigConcurrentWritesParamsWithContext(ctx context.Context) *FindConfigConcurrentWritesParams { + + return &FindConfigConcurrentWritesParams{ + + Context: ctx, + } +} + +// NewFindConfigConcurrentWritesParamsWithHTTPClient creates a new FindConfigConcurrentWritesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigConcurrentWritesParamsWithHTTPClient(client *http.Client) *FindConfigConcurrentWritesParams { + + return &FindConfigConcurrentWritesParams{ + HTTPClient: client, + } +} + +/* +FindConfigConcurrentWritesParams contains all the parameters to send to the API endpoint +for the find config concurrent writes operation typically these are written to a http.Request +*/ +type FindConfigConcurrentWritesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config concurrent writes params +func (o *FindConfigConcurrentWritesParams) WithTimeout(timeout time.Duration) *FindConfigConcurrentWritesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config concurrent writes params +func (o *FindConfigConcurrentWritesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config concurrent writes params +func (o *FindConfigConcurrentWritesParams) WithContext(ctx context.Context) *FindConfigConcurrentWritesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config concurrent writes params +func (o *FindConfigConcurrentWritesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config concurrent writes params +func (o *FindConfigConcurrentWritesParams) WithHTTPClient(client *http.Client) *FindConfigConcurrentWritesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config concurrent writes params +func (o *FindConfigConcurrentWritesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigConcurrentWritesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_writes_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_writes_responses.go new file mode 100644 index 00000000000..071fefb5ad5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_concurrent_writes_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigConcurrentWritesReader is a Reader for the FindConfigConcurrentWrites structure. +type FindConfigConcurrentWritesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigConcurrentWritesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigConcurrentWritesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigConcurrentWritesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigConcurrentWritesOK creates a FindConfigConcurrentWritesOK with default headers values +func NewFindConfigConcurrentWritesOK() *FindConfigConcurrentWritesOK { + return &FindConfigConcurrentWritesOK{} +} + +/* +FindConfigConcurrentWritesOK handles this case with default header values. + +Config value +*/ +type FindConfigConcurrentWritesOK struct { + Payload int64 +} + +func (o *FindConfigConcurrentWritesOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigConcurrentWritesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigConcurrentWritesDefault creates a FindConfigConcurrentWritesDefault with default headers values +func NewFindConfigConcurrentWritesDefault(code int) *FindConfigConcurrentWritesDefault { + return &FindConfigConcurrentWritesDefault{ + _statusCode: code, + } +} + +/* +FindConfigConcurrentWritesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigConcurrentWritesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config concurrent writes default response +func (o *FindConfigConcurrentWritesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigConcurrentWritesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigConcurrentWritesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigConcurrentWritesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_cluster_management_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_cluster_management_parameters.go new file mode 100644 index 00000000000..692b79ba927 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_cluster_management_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigConsistentClusterManagementParams creates a new FindConfigConsistentClusterManagementParams object +// with the default values initialized. +func NewFindConfigConsistentClusterManagementParams() *FindConfigConsistentClusterManagementParams { + + return &FindConfigConsistentClusterManagementParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigConsistentClusterManagementParamsWithTimeout creates a new FindConfigConsistentClusterManagementParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigConsistentClusterManagementParamsWithTimeout(timeout time.Duration) *FindConfigConsistentClusterManagementParams { + + return &FindConfigConsistentClusterManagementParams{ + + timeout: timeout, + } +} + +// NewFindConfigConsistentClusterManagementParamsWithContext creates a new FindConfigConsistentClusterManagementParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigConsistentClusterManagementParamsWithContext(ctx context.Context) *FindConfigConsistentClusterManagementParams { + + return &FindConfigConsistentClusterManagementParams{ + + Context: ctx, + } +} + +// NewFindConfigConsistentClusterManagementParamsWithHTTPClient creates a new FindConfigConsistentClusterManagementParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigConsistentClusterManagementParamsWithHTTPClient(client *http.Client) *FindConfigConsistentClusterManagementParams { + + return &FindConfigConsistentClusterManagementParams{ + HTTPClient: client, + } +} + +/* +FindConfigConsistentClusterManagementParams contains all the parameters to send to the API endpoint +for the find config consistent cluster management operation typically these are written to a http.Request +*/ +type FindConfigConsistentClusterManagementParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config consistent cluster management params +func (o *FindConfigConsistentClusterManagementParams) WithTimeout(timeout time.Duration) *FindConfigConsistentClusterManagementParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config consistent cluster management params +func (o *FindConfigConsistentClusterManagementParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config consistent cluster management params +func (o *FindConfigConsistentClusterManagementParams) WithContext(ctx context.Context) *FindConfigConsistentClusterManagementParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config consistent cluster management params +func (o *FindConfigConsistentClusterManagementParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config consistent cluster management params +func (o *FindConfigConsistentClusterManagementParams) WithHTTPClient(client *http.Client) *FindConfigConsistentClusterManagementParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config consistent cluster management params +func (o *FindConfigConsistentClusterManagementParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigConsistentClusterManagementParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_cluster_management_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_cluster_management_responses.go new file mode 100644 index 00000000000..7cef53f909b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_cluster_management_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigConsistentClusterManagementReader is a Reader for the FindConfigConsistentClusterManagement structure. +type FindConfigConsistentClusterManagementReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigConsistentClusterManagementReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigConsistentClusterManagementOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigConsistentClusterManagementDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigConsistentClusterManagementOK creates a FindConfigConsistentClusterManagementOK with default headers values +func NewFindConfigConsistentClusterManagementOK() *FindConfigConsistentClusterManagementOK { + return &FindConfigConsistentClusterManagementOK{} +} + +/* +FindConfigConsistentClusterManagementOK handles this case with default header values. + +Config value +*/ +type FindConfigConsistentClusterManagementOK struct { + Payload bool +} + +func (o *FindConfigConsistentClusterManagementOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigConsistentClusterManagementOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigConsistentClusterManagementDefault creates a FindConfigConsistentClusterManagementDefault with default headers values +func NewFindConfigConsistentClusterManagementDefault(code int) *FindConfigConsistentClusterManagementDefault { + return &FindConfigConsistentClusterManagementDefault{ + _statusCode: code, + } +} + +/* +FindConfigConsistentClusterManagementDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigConsistentClusterManagementDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config consistent cluster management default response +func (o *FindConfigConsistentClusterManagementDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigConsistentClusterManagementDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigConsistentClusterManagementDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigConsistentClusterManagementDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_rangemovement_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_rangemovement_parameters.go new file mode 100644 index 00000000000..f8176d7dfb1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_rangemovement_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigConsistentRangemovementParams creates a new FindConfigConsistentRangemovementParams object +// with the default values initialized. +func NewFindConfigConsistentRangemovementParams() *FindConfigConsistentRangemovementParams { + + return &FindConfigConsistentRangemovementParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigConsistentRangemovementParamsWithTimeout creates a new FindConfigConsistentRangemovementParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigConsistentRangemovementParamsWithTimeout(timeout time.Duration) *FindConfigConsistentRangemovementParams { + + return &FindConfigConsistentRangemovementParams{ + + timeout: timeout, + } +} + +// NewFindConfigConsistentRangemovementParamsWithContext creates a new FindConfigConsistentRangemovementParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigConsistentRangemovementParamsWithContext(ctx context.Context) *FindConfigConsistentRangemovementParams { + + return &FindConfigConsistentRangemovementParams{ + + Context: ctx, + } +} + +// NewFindConfigConsistentRangemovementParamsWithHTTPClient creates a new FindConfigConsistentRangemovementParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigConsistentRangemovementParamsWithHTTPClient(client *http.Client) *FindConfigConsistentRangemovementParams { + + return &FindConfigConsistentRangemovementParams{ + HTTPClient: client, + } +} + +/* +FindConfigConsistentRangemovementParams contains all the parameters to send to the API endpoint +for the find config consistent rangemovement operation typically these are written to a http.Request +*/ +type FindConfigConsistentRangemovementParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config consistent rangemovement params +func (o *FindConfigConsistentRangemovementParams) WithTimeout(timeout time.Duration) *FindConfigConsistentRangemovementParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config consistent rangemovement params +func (o *FindConfigConsistentRangemovementParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config consistent rangemovement params +func (o *FindConfigConsistentRangemovementParams) WithContext(ctx context.Context) *FindConfigConsistentRangemovementParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config consistent rangemovement params +func (o *FindConfigConsistentRangemovementParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config consistent rangemovement params +func (o *FindConfigConsistentRangemovementParams) WithHTTPClient(client *http.Client) *FindConfigConsistentRangemovementParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config consistent rangemovement params +func (o *FindConfigConsistentRangemovementParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigConsistentRangemovementParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_rangemovement_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_rangemovement_responses.go new file mode 100644 index 00000000000..ed97cd9569a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_consistent_rangemovement_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigConsistentRangemovementReader is a Reader for the FindConfigConsistentRangemovement structure. +type FindConfigConsistentRangemovementReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigConsistentRangemovementReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigConsistentRangemovementOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigConsistentRangemovementDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigConsistentRangemovementOK creates a FindConfigConsistentRangemovementOK with default headers values +func NewFindConfigConsistentRangemovementOK() *FindConfigConsistentRangemovementOK { + return &FindConfigConsistentRangemovementOK{} +} + +/* +FindConfigConsistentRangemovementOK handles this case with default header values. + +Config value +*/ +type FindConfigConsistentRangemovementOK struct { + Payload bool +} + +func (o *FindConfigConsistentRangemovementOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigConsistentRangemovementOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigConsistentRangemovementDefault creates a FindConfigConsistentRangemovementDefault with default headers values +func NewFindConfigConsistentRangemovementDefault(code int) *FindConfigConsistentRangemovementDefault { + return &FindConfigConsistentRangemovementDefault{ + _statusCode: code, + } +} + +/* +FindConfigConsistentRangemovementDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigConsistentRangemovementDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config consistent rangemovement default response +func (o *FindConfigConsistentRangemovementDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigConsistentRangemovementDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigConsistentRangemovementDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigConsistentRangemovementDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_keys_to_save_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_keys_to_save_parameters.go new file mode 100644 index 00000000000..3c76ed753cb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_keys_to_save_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCounterCacheKeysToSaveParams creates a new FindConfigCounterCacheKeysToSaveParams object +// with the default values initialized. +func NewFindConfigCounterCacheKeysToSaveParams() *FindConfigCounterCacheKeysToSaveParams { + + return &FindConfigCounterCacheKeysToSaveParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCounterCacheKeysToSaveParamsWithTimeout creates a new FindConfigCounterCacheKeysToSaveParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCounterCacheKeysToSaveParamsWithTimeout(timeout time.Duration) *FindConfigCounterCacheKeysToSaveParams { + + return &FindConfigCounterCacheKeysToSaveParams{ + + timeout: timeout, + } +} + +// NewFindConfigCounterCacheKeysToSaveParamsWithContext creates a new FindConfigCounterCacheKeysToSaveParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCounterCacheKeysToSaveParamsWithContext(ctx context.Context) *FindConfigCounterCacheKeysToSaveParams { + + return &FindConfigCounterCacheKeysToSaveParams{ + + Context: ctx, + } +} + +// NewFindConfigCounterCacheKeysToSaveParamsWithHTTPClient creates a new FindConfigCounterCacheKeysToSaveParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCounterCacheKeysToSaveParamsWithHTTPClient(client *http.Client) *FindConfigCounterCacheKeysToSaveParams { + + return &FindConfigCounterCacheKeysToSaveParams{ + HTTPClient: client, + } +} + +/* +FindConfigCounterCacheKeysToSaveParams contains all the parameters to send to the API endpoint +for the find config counter cache keys to save operation typically these are written to a http.Request +*/ +type FindConfigCounterCacheKeysToSaveParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config counter cache keys to save params +func (o *FindConfigCounterCacheKeysToSaveParams) WithTimeout(timeout time.Duration) *FindConfigCounterCacheKeysToSaveParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config counter cache keys to save params +func (o *FindConfigCounterCacheKeysToSaveParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config counter cache keys to save params +func (o *FindConfigCounterCacheKeysToSaveParams) WithContext(ctx context.Context) *FindConfigCounterCacheKeysToSaveParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config counter cache keys to save params +func (o *FindConfigCounterCacheKeysToSaveParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config counter cache keys to save params +func (o *FindConfigCounterCacheKeysToSaveParams) WithHTTPClient(client *http.Client) *FindConfigCounterCacheKeysToSaveParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config counter cache keys to save params +func (o *FindConfigCounterCacheKeysToSaveParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCounterCacheKeysToSaveParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_keys_to_save_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_keys_to_save_responses.go new file mode 100644 index 00000000000..a1d1216679b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_keys_to_save_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCounterCacheKeysToSaveReader is a Reader for the FindConfigCounterCacheKeysToSave structure. +type FindConfigCounterCacheKeysToSaveReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCounterCacheKeysToSaveReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCounterCacheKeysToSaveOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCounterCacheKeysToSaveDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCounterCacheKeysToSaveOK creates a FindConfigCounterCacheKeysToSaveOK with default headers values +func NewFindConfigCounterCacheKeysToSaveOK() *FindConfigCounterCacheKeysToSaveOK { + return &FindConfigCounterCacheKeysToSaveOK{} +} + +/* +FindConfigCounterCacheKeysToSaveOK handles this case with default header values. + +Config value +*/ +type FindConfigCounterCacheKeysToSaveOK struct { + Payload int64 +} + +func (o *FindConfigCounterCacheKeysToSaveOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCounterCacheKeysToSaveOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCounterCacheKeysToSaveDefault creates a FindConfigCounterCacheKeysToSaveDefault with default headers values +func NewFindConfigCounterCacheKeysToSaveDefault(code int) *FindConfigCounterCacheKeysToSaveDefault { + return &FindConfigCounterCacheKeysToSaveDefault{ + _statusCode: code, + } +} + +/* +FindConfigCounterCacheKeysToSaveDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCounterCacheKeysToSaveDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config counter cache keys to save default response +func (o *FindConfigCounterCacheKeysToSaveDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCounterCacheKeysToSaveDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCounterCacheKeysToSaveDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCounterCacheKeysToSaveDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_save_period_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_save_period_parameters.go new file mode 100644 index 00000000000..2b449497600 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_save_period_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCounterCacheSavePeriodParams creates a new FindConfigCounterCacheSavePeriodParams object +// with the default values initialized. +func NewFindConfigCounterCacheSavePeriodParams() *FindConfigCounterCacheSavePeriodParams { + + return &FindConfigCounterCacheSavePeriodParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCounterCacheSavePeriodParamsWithTimeout creates a new FindConfigCounterCacheSavePeriodParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCounterCacheSavePeriodParamsWithTimeout(timeout time.Duration) *FindConfigCounterCacheSavePeriodParams { + + return &FindConfigCounterCacheSavePeriodParams{ + + timeout: timeout, + } +} + +// NewFindConfigCounterCacheSavePeriodParamsWithContext creates a new FindConfigCounterCacheSavePeriodParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCounterCacheSavePeriodParamsWithContext(ctx context.Context) *FindConfigCounterCacheSavePeriodParams { + + return &FindConfigCounterCacheSavePeriodParams{ + + Context: ctx, + } +} + +// NewFindConfigCounterCacheSavePeriodParamsWithHTTPClient creates a new FindConfigCounterCacheSavePeriodParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCounterCacheSavePeriodParamsWithHTTPClient(client *http.Client) *FindConfigCounterCacheSavePeriodParams { + + return &FindConfigCounterCacheSavePeriodParams{ + HTTPClient: client, + } +} + +/* +FindConfigCounterCacheSavePeriodParams contains all the parameters to send to the API endpoint +for the find config counter cache save period operation typically these are written to a http.Request +*/ +type FindConfigCounterCacheSavePeriodParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config counter cache save period params +func (o *FindConfigCounterCacheSavePeriodParams) WithTimeout(timeout time.Duration) *FindConfigCounterCacheSavePeriodParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config counter cache save period params +func (o *FindConfigCounterCacheSavePeriodParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config counter cache save period params +func (o *FindConfigCounterCacheSavePeriodParams) WithContext(ctx context.Context) *FindConfigCounterCacheSavePeriodParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config counter cache save period params +func (o *FindConfigCounterCacheSavePeriodParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config counter cache save period params +func (o *FindConfigCounterCacheSavePeriodParams) WithHTTPClient(client *http.Client) *FindConfigCounterCacheSavePeriodParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config counter cache save period params +func (o *FindConfigCounterCacheSavePeriodParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCounterCacheSavePeriodParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_save_period_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_save_period_responses.go new file mode 100644 index 00000000000..966c7ed5e4b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_save_period_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCounterCacheSavePeriodReader is a Reader for the FindConfigCounterCacheSavePeriod structure. +type FindConfigCounterCacheSavePeriodReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCounterCacheSavePeriodReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCounterCacheSavePeriodOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCounterCacheSavePeriodDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCounterCacheSavePeriodOK creates a FindConfigCounterCacheSavePeriodOK with default headers values +func NewFindConfigCounterCacheSavePeriodOK() *FindConfigCounterCacheSavePeriodOK { + return &FindConfigCounterCacheSavePeriodOK{} +} + +/* +FindConfigCounterCacheSavePeriodOK handles this case with default header values. + +Config value +*/ +type FindConfigCounterCacheSavePeriodOK struct { + Payload int64 +} + +func (o *FindConfigCounterCacheSavePeriodOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCounterCacheSavePeriodOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCounterCacheSavePeriodDefault creates a FindConfigCounterCacheSavePeriodDefault with default headers values +func NewFindConfigCounterCacheSavePeriodDefault(code int) *FindConfigCounterCacheSavePeriodDefault { + return &FindConfigCounterCacheSavePeriodDefault{ + _statusCode: code, + } +} + +/* +FindConfigCounterCacheSavePeriodDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCounterCacheSavePeriodDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config counter cache save period default response +func (o *FindConfigCounterCacheSavePeriodDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCounterCacheSavePeriodDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCounterCacheSavePeriodDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCounterCacheSavePeriodDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_size_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_size_in_mb_parameters.go new file mode 100644 index 00000000000..d517c2897a6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_size_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCounterCacheSizeInMbParams creates a new FindConfigCounterCacheSizeInMbParams object +// with the default values initialized. +func NewFindConfigCounterCacheSizeInMbParams() *FindConfigCounterCacheSizeInMbParams { + + return &FindConfigCounterCacheSizeInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCounterCacheSizeInMbParamsWithTimeout creates a new FindConfigCounterCacheSizeInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCounterCacheSizeInMbParamsWithTimeout(timeout time.Duration) *FindConfigCounterCacheSizeInMbParams { + + return &FindConfigCounterCacheSizeInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigCounterCacheSizeInMbParamsWithContext creates a new FindConfigCounterCacheSizeInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCounterCacheSizeInMbParamsWithContext(ctx context.Context) *FindConfigCounterCacheSizeInMbParams { + + return &FindConfigCounterCacheSizeInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigCounterCacheSizeInMbParamsWithHTTPClient creates a new FindConfigCounterCacheSizeInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCounterCacheSizeInMbParamsWithHTTPClient(client *http.Client) *FindConfigCounterCacheSizeInMbParams { + + return &FindConfigCounterCacheSizeInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigCounterCacheSizeInMbParams contains all the parameters to send to the API endpoint +for the find config counter cache size in mb operation typically these are written to a http.Request +*/ +type FindConfigCounterCacheSizeInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config counter cache size in mb params +func (o *FindConfigCounterCacheSizeInMbParams) WithTimeout(timeout time.Duration) *FindConfigCounterCacheSizeInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config counter cache size in mb params +func (o *FindConfigCounterCacheSizeInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config counter cache size in mb params +func (o *FindConfigCounterCacheSizeInMbParams) WithContext(ctx context.Context) *FindConfigCounterCacheSizeInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config counter cache size in mb params +func (o *FindConfigCounterCacheSizeInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config counter cache size in mb params +func (o *FindConfigCounterCacheSizeInMbParams) WithHTTPClient(client *http.Client) *FindConfigCounterCacheSizeInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config counter cache size in mb params +func (o *FindConfigCounterCacheSizeInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCounterCacheSizeInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_size_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_size_in_mb_responses.go new file mode 100644 index 00000000000..996a74c80c7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_cache_size_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCounterCacheSizeInMbReader is a Reader for the FindConfigCounterCacheSizeInMb structure. +type FindConfigCounterCacheSizeInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCounterCacheSizeInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCounterCacheSizeInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCounterCacheSizeInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCounterCacheSizeInMbOK creates a FindConfigCounterCacheSizeInMbOK with default headers values +func NewFindConfigCounterCacheSizeInMbOK() *FindConfigCounterCacheSizeInMbOK { + return &FindConfigCounterCacheSizeInMbOK{} +} + +/* +FindConfigCounterCacheSizeInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigCounterCacheSizeInMbOK struct { + Payload int64 +} + +func (o *FindConfigCounterCacheSizeInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCounterCacheSizeInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCounterCacheSizeInMbDefault creates a FindConfigCounterCacheSizeInMbDefault with default headers values +func NewFindConfigCounterCacheSizeInMbDefault(code int) *FindConfigCounterCacheSizeInMbDefault { + return &FindConfigCounterCacheSizeInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigCounterCacheSizeInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCounterCacheSizeInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config counter cache size in mb default response +func (o *FindConfigCounterCacheSizeInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCounterCacheSizeInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCounterCacheSizeInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCounterCacheSizeInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_write_request_timeout_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_write_request_timeout_in_ms_parameters.go new file mode 100644 index 00000000000..aadc6cbab67 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_write_request_timeout_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCounterWriteRequestTimeoutInMsParams creates a new FindConfigCounterWriteRequestTimeoutInMsParams object +// with the default values initialized. +func NewFindConfigCounterWriteRequestTimeoutInMsParams() *FindConfigCounterWriteRequestTimeoutInMsParams { + + return &FindConfigCounterWriteRequestTimeoutInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCounterWriteRequestTimeoutInMsParamsWithTimeout creates a new FindConfigCounterWriteRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCounterWriteRequestTimeoutInMsParamsWithTimeout(timeout time.Duration) *FindConfigCounterWriteRequestTimeoutInMsParams { + + return &FindConfigCounterWriteRequestTimeoutInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigCounterWriteRequestTimeoutInMsParamsWithContext creates a new FindConfigCounterWriteRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCounterWriteRequestTimeoutInMsParamsWithContext(ctx context.Context) *FindConfigCounterWriteRequestTimeoutInMsParams { + + return &FindConfigCounterWriteRequestTimeoutInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigCounterWriteRequestTimeoutInMsParamsWithHTTPClient creates a new FindConfigCounterWriteRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCounterWriteRequestTimeoutInMsParamsWithHTTPClient(client *http.Client) *FindConfigCounterWriteRequestTimeoutInMsParams { + + return &FindConfigCounterWriteRequestTimeoutInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigCounterWriteRequestTimeoutInMsParams contains all the parameters to send to the API endpoint +for the find config counter write request timeout in ms operation typically these are written to a http.Request +*/ +type FindConfigCounterWriteRequestTimeoutInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config counter write request timeout in ms params +func (o *FindConfigCounterWriteRequestTimeoutInMsParams) WithTimeout(timeout time.Duration) *FindConfigCounterWriteRequestTimeoutInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config counter write request timeout in ms params +func (o *FindConfigCounterWriteRequestTimeoutInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config counter write request timeout in ms params +func (o *FindConfigCounterWriteRequestTimeoutInMsParams) WithContext(ctx context.Context) *FindConfigCounterWriteRequestTimeoutInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config counter write request timeout in ms params +func (o *FindConfigCounterWriteRequestTimeoutInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config counter write request timeout in ms params +func (o *FindConfigCounterWriteRequestTimeoutInMsParams) WithHTTPClient(client *http.Client) *FindConfigCounterWriteRequestTimeoutInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config counter write request timeout in ms params +func (o *FindConfigCounterWriteRequestTimeoutInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCounterWriteRequestTimeoutInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_write_request_timeout_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_write_request_timeout_in_ms_responses.go new file mode 100644 index 00000000000..3d2a81eedd9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_counter_write_request_timeout_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCounterWriteRequestTimeoutInMsReader is a Reader for the FindConfigCounterWriteRequestTimeoutInMs structure. +type FindConfigCounterWriteRequestTimeoutInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCounterWriteRequestTimeoutInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCounterWriteRequestTimeoutInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCounterWriteRequestTimeoutInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCounterWriteRequestTimeoutInMsOK creates a FindConfigCounterWriteRequestTimeoutInMsOK with default headers values +func NewFindConfigCounterWriteRequestTimeoutInMsOK() *FindConfigCounterWriteRequestTimeoutInMsOK { + return &FindConfigCounterWriteRequestTimeoutInMsOK{} +} + +/* +FindConfigCounterWriteRequestTimeoutInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigCounterWriteRequestTimeoutInMsOK struct { + Payload int64 +} + +func (o *FindConfigCounterWriteRequestTimeoutInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigCounterWriteRequestTimeoutInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCounterWriteRequestTimeoutInMsDefault creates a FindConfigCounterWriteRequestTimeoutInMsDefault with default headers values +func NewFindConfigCounterWriteRequestTimeoutInMsDefault(code int) *FindConfigCounterWriteRequestTimeoutInMsDefault { + return &FindConfigCounterWriteRequestTimeoutInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigCounterWriteRequestTimeoutInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCounterWriteRequestTimeoutInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config counter write request timeout in ms default response +func (o *FindConfigCounterWriteRequestTimeoutInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCounterWriteRequestTimeoutInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCounterWriteRequestTimeoutInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCounterWriteRequestTimeoutInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cpu_scheduler_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cpu_scheduler_parameters.go new file mode 100644 index 00000000000..4f648d94656 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cpu_scheduler_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCPUSchedulerParams creates a new FindConfigCPUSchedulerParams object +// with the default values initialized. +func NewFindConfigCPUSchedulerParams() *FindConfigCPUSchedulerParams { + + return &FindConfigCPUSchedulerParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCPUSchedulerParamsWithTimeout creates a new FindConfigCPUSchedulerParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCPUSchedulerParamsWithTimeout(timeout time.Duration) *FindConfigCPUSchedulerParams { + + return &FindConfigCPUSchedulerParams{ + + timeout: timeout, + } +} + +// NewFindConfigCPUSchedulerParamsWithContext creates a new FindConfigCPUSchedulerParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCPUSchedulerParamsWithContext(ctx context.Context) *FindConfigCPUSchedulerParams { + + return &FindConfigCPUSchedulerParams{ + + Context: ctx, + } +} + +// NewFindConfigCPUSchedulerParamsWithHTTPClient creates a new FindConfigCPUSchedulerParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCPUSchedulerParamsWithHTTPClient(client *http.Client) *FindConfigCPUSchedulerParams { + + return &FindConfigCPUSchedulerParams{ + HTTPClient: client, + } +} + +/* +FindConfigCPUSchedulerParams contains all the parameters to send to the API endpoint +for the find config cpu scheduler operation typically these are written to a http.Request +*/ +type FindConfigCPUSchedulerParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config cpu scheduler params +func (o *FindConfigCPUSchedulerParams) WithTimeout(timeout time.Duration) *FindConfigCPUSchedulerParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config cpu scheduler params +func (o *FindConfigCPUSchedulerParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config cpu scheduler params +func (o *FindConfigCPUSchedulerParams) WithContext(ctx context.Context) *FindConfigCPUSchedulerParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config cpu scheduler params +func (o *FindConfigCPUSchedulerParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config cpu scheduler params +func (o *FindConfigCPUSchedulerParams) WithHTTPClient(client *http.Client) *FindConfigCPUSchedulerParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config cpu scheduler params +func (o *FindConfigCPUSchedulerParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCPUSchedulerParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cpu_scheduler_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cpu_scheduler_responses.go new file mode 100644 index 00000000000..cf8771d6dd2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cpu_scheduler_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCPUSchedulerReader is a Reader for the FindConfigCPUScheduler structure. +type FindConfigCPUSchedulerReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCPUSchedulerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCPUSchedulerOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCPUSchedulerDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCPUSchedulerOK creates a FindConfigCPUSchedulerOK with default headers values +func NewFindConfigCPUSchedulerOK() *FindConfigCPUSchedulerOK { + return &FindConfigCPUSchedulerOK{} +} + +/* +FindConfigCPUSchedulerOK handles this case with default header values. + +Config value +*/ +type FindConfigCPUSchedulerOK struct { + Payload bool +} + +func (o *FindConfigCPUSchedulerOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigCPUSchedulerOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCPUSchedulerDefault creates a FindConfigCPUSchedulerDefault with default headers values +func NewFindConfigCPUSchedulerDefault(code int) *FindConfigCPUSchedulerDefault { + return &FindConfigCPUSchedulerDefault{ + _statusCode: code, + } +} + +/* +FindConfigCPUSchedulerDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCPUSchedulerDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config cpu scheduler default response +func (o *FindConfigCPUSchedulerDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCPUSchedulerDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCPUSchedulerDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCPUSchedulerDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cross_node_timeout_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cross_node_timeout_parameters.go new file mode 100644 index 00000000000..dd3122b38eb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cross_node_timeout_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigCrossNodeTimeoutParams creates a new FindConfigCrossNodeTimeoutParams object +// with the default values initialized. +func NewFindConfigCrossNodeTimeoutParams() *FindConfigCrossNodeTimeoutParams { + + return &FindConfigCrossNodeTimeoutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigCrossNodeTimeoutParamsWithTimeout creates a new FindConfigCrossNodeTimeoutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigCrossNodeTimeoutParamsWithTimeout(timeout time.Duration) *FindConfigCrossNodeTimeoutParams { + + return &FindConfigCrossNodeTimeoutParams{ + + timeout: timeout, + } +} + +// NewFindConfigCrossNodeTimeoutParamsWithContext creates a new FindConfigCrossNodeTimeoutParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigCrossNodeTimeoutParamsWithContext(ctx context.Context) *FindConfigCrossNodeTimeoutParams { + + return &FindConfigCrossNodeTimeoutParams{ + + Context: ctx, + } +} + +// NewFindConfigCrossNodeTimeoutParamsWithHTTPClient creates a new FindConfigCrossNodeTimeoutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigCrossNodeTimeoutParamsWithHTTPClient(client *http.Client) *FindConfigCrossNodeTimeoutParams { + + return &FindConfigCrossNodeTimeoutParams{ + HTTPClient: client, + } +} + +/* +FindConfigCrossNodeTimeoutParams contains all the parameters to send to the API endpoint +for the find config cross node timeout operation typically these are written to a http.Request +*/ +type FindConfigCrossNodeTimeoutParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config cross node timeout params +func (o *FindConfigCrossNodeTimeoutParams) WithTimeout(timeout time.Duration) *FindConfigCrossNodeTimeoutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config cross node timeout params +func (o *FindConfigCrossNodeTimeoutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config cross node timeout params +func (o *FindConfigCrossNodeTimeoutParams) WithContext(ctx context.Context) *FindConfigCrossNodeTimeoutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config cross node timeout params +func (o *FindConfigCrossNodeTimeoutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config cross node timeout params +func (o *FindConfigCrossNodeTimeoutParams) WithHTTPClient(client *http.Client) *FindConfigCrossNodeTimeoutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config cross node timeout params +func (o *FindConfigCrossNodeTimeoutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigCrossNodeTimeoutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cross_node_timeout_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cross_node_timeout_responses.go new file mode 100644 index 00000000000..67a97bc92ce --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_cross_node_timeout_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigCrossNodeTimeoutReader is a Reader for the FindConfigCrossNodeTimeout structure. +type FindConfigCrossNodeTimeoutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigCrossNodeTimeoutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigCrossNodeTimeoutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigCrossNodeTimeoutDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigCrossNodeTimeoutOK creates a FindConfigCrossNodeTimeoutOK with default headers values +func NewFindConfigCrossNodeTimeoutOK() *FindConfigCrossNodeTimeoutOK { + return &FindConfigCrossNodeTimeoutOK{} +} + +/* +FindConfigCrossNodeTimeoutOK handles this case with default header values. + +Config value +*/ +type FindConfigCrossNodeTimeoutOK struct { + Payload bool +} + +func (o *FindConfigCrossNodeTimeoutOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigCrossNodeTimeoutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigCrossNodeTimeoutDefault creates a FindConfigCrossNodeTimeoutDefault with default headers values +func NewFindConfigCrossNodeTimeoutDefault(code int) *FindConfigCrossNodeTimeoutDefault { + return &FindConfigCrossNodeTimeoutDefault{ + _statusCode: code, + } +} + +/* +FindConfigCrossNodeTimeoutDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigCrossNodeTimeoutDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config cross node timeout default response +func (o *FindConfigCrossNodeTimeoutDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigCrossNodeTimeoutDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigCrossNodeTimeoutDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigCrossNodeTimeoutDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_data_file_directories_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_data_file_directories_parameters.go new file mode 100644 index 00000000000..a5121c8cb0a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_data_file_directories_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigDataFileDirectoriesParams creates a new FindConfigDataFileDirectoriesParams object +// with the default values initialized. +func NewFindConfigDataFileDirectoriesParams() *FindConfigDataFileDirectoriesParams { + + return &FindConfigDataFileDirectoriesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigDataFileDirectoriesParamsWithTimeout creates a new FindConfigDataFileDirectoriesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigDataFileDirectoriesParamsWithTimeout(timeout time.Duration) *FindConfigDataFileDirectoriesParams { + + return &FindConfigDataFileDirectoriesParams{ + + timeout: timeout, + } +} + +// NewFindConfigDataFileDirectoriesParamsWithContext creates a new FindConfigDataFileDirectoriesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigDataFileDirectoriesParamsWithContext(ctx context.Context) *FindConfigDataFileDirectoriesParams { + + return &FindConfigDataFileDirectoriesParams{ + + Context: ctx, + } +} + +// NewFindConfigDataFileDirectoriesParamsWithHTTPClient creates a new FindConfigDataFileDirectoriesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigDataFileDirectoriesParamsWithHTTPClient(client *http.Client) *FindConfigDataFileDirectoriesParams { + + return &FindConfigDataFileDirectoriesParams{ + HTTPClient: client, + } +} + +/* +FindConfigDataFileDirectoriesParams contains all the parameters to send to the API endpoint +for the find config data file directories operation typically these are written to a http.Request +*/ +type FindConfigDataFileDirectoriesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config data file directories params +func (o *FindConfigDataFileDirectoriesParams) WithTimeout(timeout time.Duration) *FindConfigDataFileDirectoriesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config data file directories params +func (o *FindConfigDataFileDirectoriesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config data file directories params +func (o *FindConfigDataFileDirectoriesParams) WithContext(ctx context.Context) *FindConfigDataFileDirectoriesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config data file directories params +func (o *FindConfigDataFileDirectoriesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config data file directories params +func (o *FindConfigDataFileDirectoriesParams) WithHTTPClient(client *http.Client) *FindConfigDataFileDirectoriesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config data file directories params +func (o *FindConfigDataFileDirectoriesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigDataFileDirectoriesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_data_file_directories_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_data_file_directories_responses.go new file mode 100644 index 00000000000..12797a1e408 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_data_file_directories_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigDataFileDirectoriesReader is a Reader for the FindConfigDataFileDirectories structure. +type FindConfigDataFileDirectoriesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigDataFileDirectoriesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigDataFileDirectoriesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigDataFileDirectoriesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigDataFileDirectoriesOK creates a FindConfigDataFileDirectoriesOK with default headers values +func NewFindConfigDataFileDirectoriesOK() *FindConfigDataFileDirectoriesOK { + return &FindConfigDataFileDirectoriesOK{} +} + +/* +FindConfigDataFileDirectoriesOK handles this case with default header values. + +Config value +*/ +type FindConfigDataFileDirectoriesOK struct { + Payload []string +} + +func (o *FindConfigDataFileDirectoriesOK) GetPayload() []string { + return o.Payload +} + +func (o *FindConfigDataFileDirectoriesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigDataFileDirectoriesDefault creates a FindConfigDataFileDirectoriesDefault with default headers values +func NewFindConfigDataFileDirectoriesDefault(code int) *FindConfigDataFileDirectoriesDefault { + return &FindConfigDataFileDirectoriesDefault{ + _statusCode: code, + } +} + +/* +FindConfigDataFileDirectoriesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigDataFileDirectoriesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config data file directories default response +func (o *FindConfigDataFileDirectoriesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigDataFileDirectoriesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigDataFileDirectoriesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigDataFileDirectoriesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_default_log_level_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_default_log_level_parameters.go new file mode 100644 index 00000000000..8fba9fbe24f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_default_log_level_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigDefaultLogLevelParams creates a new FindConfigDefaultLogLevelParams object +// with the default values initialized. +func NewFindConfigDefaultLogLevelParams() *FindConfigDefaultLogLevelParams { + + return &FindConfigDefaultLogLevelParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigDefaultLogLevelParamsWithTimeout creates a new FindConfigDefaultLogLevelParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigDefaultLogLevelParamsWithTimeout(timeout time.Duration) *FindConfigDefaultLogLevelParams { + + return &FindConfigDefaultLogLevelParams{ + + timeout: timeout, + } +} + +// NewFindConfigDefaultLogLevelParamsWithContext creates a new FindConfigDefaultLogLevelParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigDefaultLogLevelParamsWithContext(ctx context.Context) *FindConfigDefaultLogLevelParams { + + return &FindConfigDefaultLogLevelParams{ + + Context: ctx, + } +} + +// NewFindConfigDefaultLogLevelParamsWithHTTPClient creates a new FindConfigDefaultLogLevelParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigDefaultLogLevelParamsWithHTTPClient(client *http.Client) *FindConfigDefaultLogLevelParams { + + return &FindConfigDefaultLogLevelParams{ + HTTPClient: client, + } +} + +/* +FindConfigDefaultLogLevelParams contains all the parameters to send to the API endpoint +for the find config default log level operation typically these are written to a http.Request +*/ +type FindConfigDefaultLogLevelParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config default log level params +func (o *FindConfigDefaultLogLevelParams) WithTimeout(timeout time.Duration) *FindConfigDefaultLogLevelParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config default log level params +func (o *FindConfigDefaultLogLevelParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config default log level params +func (o *FindConfigDefaultLogLevelParams) WithContext(ctx context.Context) *FindConfigDefaultLogLevelParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config default log level params +func (o *FindConfigDefaultLogLevelParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config default log level params +func (o *FindConfigDefaultLogLevelParams) WithHTTPClient(client *http.Client) *FindConfigDefaultLogLevelParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config default log level params +func (o *FindConfigDefaultLogLevelParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigDefaultLogLevelParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_default_log_level_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_default_log_level_responses.go new file mode 100644 index 00000000000..3ab76b6e3eb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_default_log_level_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigDefaultLogLevelReader is a Reader for the FindConfigDefaultLogLevel structure. +type FindConfigDefaultLogLevelReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigDefaultLogLevelReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigDefaultLogLevelOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigDefaultLogLevelDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigDefaultLogLevelOK creates a FindConfigDefaultLogLevelOK with default headers values +func NewFindConfigDefaultLogLevelOK() *FindConfigDefaultLogLevelOK { + return &FindConfigDefaultLogLevelOK{} +} + +/* +FindConfigDefaultLogLevelOK handles this case with default header values. + +Config value +*/ +type FindConfigDefaultLogLevelOK struct { + Payload string +} + +func (o *FindConfigDefaultLogLevelOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigDefaultLogLevelOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigDefaultLogLevelDefault creates a FindConfigDefaultLogLevelDefault with default headers values +func NewFindConfigDefaultLogLevelDefault(code int) *FindConfigDefaultLogLevelDefault { + return &FindConfigDefaultLogLevelDefault{ + _statusCode: code, + } +} + +/* +FindConfigDefaultLogLevelDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigDefaultLogLevelDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config default log level default response +func (o *FindConfigDefaultLogLevelDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigDefaultLogLevelDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigDefaultLogLevelDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigDefaultLogLevelDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_defragment_memory_on_idle_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_defragment_memory_on_idle_parameters.go new file mode 100644 index 00000000000..31f16998056 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_defragment_memory_on_idle_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigDefragmentMemoryOnIdleParams creates a new FindConfigDefragmentMemoryOnIdleParams object +// with the default values initialized. +func NewFindConfigDefragmentMemoryOnIdleParams() *FindConfigDefragmentMemoryOnIdleParams { + + return &FindConfigDefragmentMemoryOnIdleParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigDefragmentMemoryOnIdleParamsWithTimeout creates a new FindConfigDefragmentMemoryOnIdleParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigDefragmentMemoryOnIdleParamsWithTimeout(timeout time.Duration) *FindConfigDefragmentMemoryOnIdleParams { + + return &FindConfigDefragmentMemoryOnIdleParams{ + + timeout: timeout, + } +} + +// NewFindConfigDefragmentMemoryOnIdleParamsWithContext creates a new FindConfigDefragmentMemoryOnIdleParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigDefragmentMemoryOnIdleParamsWithContext(ctx context.Context) *FindConfigDefragmentMemoryOnIdleParams { + + return &FindConfigDefragmentMemoryOnIdleParams{ + + Context: ctx, + } +} + +// NewFindConfigDefragmentMemoryOnIdleParamsWithHTTPClient creates a new FindConfigDefragmentMemoryOnIdleParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigDefragmentMemoryOnIdleParamsWithHTTPClient(client *http.Client) *FindConfigDefragmentMemoryOnIdleParams { + + return &FindConfigDefragmentMemoryOnIdleParams{ + HTTPClient: client, + } +} + +/* +FindConfigDefragmentMemoryOnIdleParams contains all the parameters to send to the API endpoint +for the find config defragment memory on idle operation typically these are written to a http.Request +*/ +type FindConfigDefragmentMemoryOnIdleParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config defragment memory on idle params +func (o *FindConfigDefragmentMemoryOnIdleParams) WithTimeout(timeout time.Duration) *FindConfigDefragmentMemoryOnIdleParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config defragment memory on idle params +func (o *FindConfigDefragmentMemoryOnIdleParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config defragment memory on idle params +func (o *FindConfigDefragmentMemoryOnIdleParams) WithContext(ctx context.Context) *FindConfigDefragmentMemoryOnIdleParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config defragment memory on idle params +func (o *FindConfigDefragmentMemoryOnIdleParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config defragment memory on idle params +func (o *FindConfigDefragmentMemoryOnIdleParams) WithHTTPClient(client *http.Client) *FindConfigDefragmentMemoryOnIdleParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config defragment memory on idle params +func (o *FindConfigDefragmentMemoryOnIdleParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigDefragmentMemoryOnIdleParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_defragment_memory_on_idle_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_defragment_memory_on_idle_responses.go new file mode 100644 index 00000000000..cdced0be124 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_defragment_memory_on_idle_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigDefragmentMemoryOnIdleReader is a Reader for the FindConfigDefragmentMemoryOnIdle structure. +type FindConfigDefragmentMemoryOnIdleReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigDefragmentMemoryOnIdleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigDefragmentMemoryOnIdleOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigDefragmentMemoryOnIdleDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigDefragmentMemoryOnIdleOK creates a FindConfigDefragmentMemoryOnIdleOK with default headers values +func NewFindConfigDefragmentMemoryOnIdleOK() *FindConfigDefragmentMemoryOnIdleOK { + return &FindConfigDefragmentMemoryOnIdleOK{} +} + +/* +FindConfigDefragmentMemoryOnIdleOK handles this case with default header values. + +Config value +*/ +type FindConfigDefragmentMemoryOnIdleOK struct { + Payload bool +} + +func (o *FindConfigDefragmentMemoryOnIdleOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigDefragmentMemoryOnIdleOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigDefragmentMemoryOnIdleDefault creates a FindConfigDefragmentMemoryOnIdleDefault with default headers values +func NewFindConfigDefragmentMemoryOnIdleDefault(code int) *FindConfigDefragmentMemoryOnIdleDefault { + return &FindConfigDefragmentMemoryOnIdleDefault{ + _statusCode: code, + } +} + +/* +FindConfigDefragmentMemoryOnIdleDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigDefragmentMemoryOnIdleDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config defragment memory on idle default response +func (o *FindConfigDefragmentMemoryOnIdleDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigDefragmentMemoryOnIdleDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigDefragmentMemoryOnIdleDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigDefragmentMemoryOnIdleDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_developer_mode_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_developer_mode_parameters.go new file mode 100644 index 00000000000..2df5b6607b9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_developer_mode_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigDeveloperModeParams creates a new FindConfigDeveloperModeParams object +// with the default values initialized. +func NewFindConfigDeveloperModeParams() *FindConfigDeveloperModeParams { + + return &FindConfigDeveloperModeParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigDeveloperModeParamsWithTimeout creates a new FindConfigDeveloperModeParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigDeveloperModeParamsWithTimeout(timeout time.Duration) *FindConfigDeveloperModeParams { + + return &FindConfigDeveloperModeParams{ + + timeout: timeout, + } +} + +// NewFindConfigDeveloperModeParamsWithContext creates a new FindConfigDeveloperModeParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigDeveloperModeParamsWithContext(ctx context.Context) *FindConfigDeveloperModeParams { + + return &FindConfigDeveloperModeParams{ + + Context: ctx, + } +} + +// NewFindConfigDeveloperModeParamsWithHTTPClient creates a new FindConfigDeveloperModeParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigDeveloperModeParamsWithHTTPClient(client *http.Client) *FindConfigDeveloperModeParams { + + return &FindConfigDeveloperModeParams{ + HTTPClient: client, + } +} + +/* +FindConfigDeveloperModeParams contains all the parameters to send to the API endpoint +for the find config developer mode operation typically these are written to a http.Request +*/ +type FindConfigDeveloperModeParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config developer mode params +func (o *FindConfigDeveloperModeParams) WithTimeout(timeout time.Duration) *FindConfigDeveloperModeParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config developer mode params +func (o *FindConfigDeveloperModeParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config developer mode params +func (o *FindConfigDeveloperModeParams) WithContext(ctx context.Context) *FindConfigDeveloperModeParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config developer mode params +func (o *FindConfigDeveloperModeParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config developer mode params +func (o *FindConfigDeveloperModeParams) WithHTTPClient(client *http.Client) *FindConfigDeveloperModeParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config developer mode params +func (o *FindConfigDeveloperModeParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigDeveloperModeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_developer_mode_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_developer_mode_responses.go new file mode 100644 index 00000000000..dead89bffea --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_developer_mode_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigDeveloperModeReader is a Reader for the FindConfigDeveloperMode structure. +type FindConfigDeveloperModeReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigDeveloperModeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigDeveloperModeOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigDeveloperModeDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigDeveloperModeOK creates a FindConfigDeveloperModeOK with default headers values +func NewFindConfigDeveloperModeOK() *FindConfigDeveloperModeOK { + return &FindConfigDeveloperModeOK{} +} + +/* +FindConfigDeveloperModeOK handles this case with default header values. + +Config value +*/ +type FindConfigDeveloperModeOK struct { + Payload bool +} + +func (o *FindConfigDeveloperModeOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigDeveloperModeOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigDeveloperModeDefault creates a FindConfigDeveloperModeDefault with default headers values +func NewFindConfigDeveloperModeDefault(code int) *FindConfigDeveloperModeDefault { + return &FindConfigDeveloperModeDefault{ + _statusCode: code, + } +} + +/* +FindConfigDeveloperModeDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigDeveloperModeDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config developer mode default response +func (o *FindConfigDeveloperModeDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigDeveloperModeDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigDeveloperModeDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigDeveloperModeDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_disk_failure_policy_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_disk_failure_policy_parameters.go new file mode 100644 index 00000000000..f93d17aca3c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_disk_failure_policy_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigDiskFailurePolicyParams creates a new FindConfigDiskFailurePolicyParams object +// with the default values initialized. +func NewFindConfigDiskFailurePolicyParams() *FindConfigDiskFailurePolicyParams { + + return &FindConfigDiskFailurePolicyParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigDiskFailurePolicyParamsWithTimeout creates a new FindConfigDiskFailurePolicyParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigDiskFailurePolicyParamsWithTimeout(timeout time.Duration) *FindConfigDiskFailurePolicyParams { + + return &FindConfigDiskFailurePolicyParams{ + + timeout: timeout, + } +} + +// NewFindConfigDiskFailurePolicyParamsWithContext creates a new FindConfigDiskFailurePolicyParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigDiskFailurePolicyParamsWithContext(ctx context.Context) *FindConfigDiskFailurePolicyParams { + + return &FindConfigDiskFailurePolicyParams{ + + Context: ctx, + } +} + +// NewFindConfigDiskFailurePolicyParamsWithHTTPClient creates a new FindConfigDiskFailurePolicyParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigDiskFailurePolicyParamsWithHTTPClient(client *http.Client) *FindConfigDiskFailurePolicyParams { + + return &FindConfigDiskFailurePolicyParams{ + HTTPClient: client, + } +} + +/* +FindConfigDiskFailurePolicyParams contains all the parameters to send to the API endpoint +for the find config disk failure policy operation typically these are written to a http.Request +*/ +type FindConfigDiskFailurePolicyParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config disk failure policy params +func (o *FindConfigDiskFailurePolicyParams) WithTimeout(timeout time.Duration) *FindConfigDiskFailurePolicyParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config disk failure policy params +func (o *FindConfigDiskFailurePolicyParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config disk failure policy params +func (o *FindConfigDiskFailurePolicyParams) WithContext(ctx context.Context) *FindConfigDiskFailurePolicyParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config disk failure policy params +func (o *FindConfigDiskFailurePolicyParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config disk failure policy params +func (o *FindConfigDiskFailurePolicyParams) WithHTTPClient(client *http.Client) *FindConfigDiskFailurePolicyParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config disk failure policy params +func (o *FindConfigDiskFailurePolicyParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigDiskFailurePolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_disk_failure_policy_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_disk_failure_policy_responses.go new file mode 100644 index 00000000000..e236b39f7ba --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_disk_failure_policy_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigDiskFailurePolicyReader is a Reader for the FindConfigDiskFailurePolicy structure. +type FindConfigDiskFailurePolicyReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigDiskFailurePolicyReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigDiskFailurePolicyOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigDiskFailurePolicyDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigDiskFailurePolicyOK creates a FindConfigDiskFailurePolicyOK with default headers values +func NewFindConfigDiskFailurePolicyOK() *FindConfigDiskFailurePolicyOK { + return &FindConfigDiskFailurePolicyOK{} +} + +/* +FindConfigDiskFailurePolicyOK handles this case with default header values. + +Config value +*/ +type FindConfigDiskFailurePolicyOK struct { + Payload string +} + +func (o *FindConfigDiskFailurePolicyOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigDiskFailurePolicyOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigDiskFailurePolicyDefault creates a FindConfigDiskFailurePolicyDefault with default headers values +func NewFindConfigDiskFailurePolicyDefault(code int) *FindConfigDiskFailurePolicyDefault { + return &FindConfigDiskFailurePolicyDefault{ + _statusCode: code, + } +} + +/* +FindConfigDiskFailurePolicyDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigDiskFailurePolicyDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config disk failure policy default response +func (o *FindConfigDiskFailurePolicyDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigDiskFailurePolicyDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigDiskFailurePolicyDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigDiskFailurePolicyDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_badness_threshold_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_badness_threshold_parameters.go new file mode 100644 index 00000000000..0808ac8beb0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_badness_threshold_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigDynamicSnitchBadnessThresholdParams creates a new FindConfigDynamicSnitchBadnessThresholdParams object +// with the default values initialized. +func NewFindConfigDynamicSnitchBadnessThresholdParams() *FindConfigDynamicSnitchBadnessThresholdParams { + + return &FindConfigDynamicSnitchBadnessThresholdParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigDynamicSnitchBadnessThresholdParamsWithTimeout creates a new FindConfigDynamicSnitchBadnessThresholdParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigDynamicSnitchBadnessThresholdParamsWithTimeout(timeout time.Duration) *FindConfigDynamicSnitchBadnessThresholdParams { + + return &FindConfigDynamicSnitchBadnessThresholdParams{ + + timeout: timeout, + } +} + +// NewFindConfigDynamicSnitchBadnessThresholdParamsWithContext creates a new FindConfigDynamicSnitchBadnessThresholdParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigDynamicSnitchBadnessThresholdParamsWithContext(ctx context.Context) *FindConfigDynamicSnitchBadnessThresholdParams { + + return &FindConfigDynamicSnitchBadnessThresholdParams{ + + Context: ctx, + } +} + +// NewFindConfigDynamicSnitchBadnessThresholdParamsWithHTTPClient creates a new FindConfigDynamicSnitchBadnessThresholdParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigDynamicSnitchBadnessThresholdParamsWithHTTPClient(client *http.Client) *FindConfigDynamicSnitchBadnessThresholdParams { + + return &FindConfigDynamicSnitchBadnessThresholdParams{ + HTTPClient: client, + } +} + +/* +FindConfigDynamicSnitchBadnessThresholdParams contains all the parameters to send to the API endpoint +for the find config dynamic snitch badness threshold operation typically these are written to a http.Request +*/ +type FindConfigDynamicSnitchBadnessThresholdParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config dynamic snitch badness threshold params +func (o *FindConfigDynamicSnitchBadnessThresholdParams) WithTimeout(timeout time.Duration) *FindConfigDynamicSnitchBadnessThresholdParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config dynamic snitch badness threshold params +func (o *FindConfigDynamicSnitchBadnessThresholdParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config dynamic snitch badness threshold params +func (o *FindConfigDynamicSnitchBadnessThresholdParams) WithContext(ctx context.Context) *FindConfigDynamicSnitchBadnessThresholdParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config dynamic snitch badness threshold params +func (o *FindConfigDynamicSnitchBadnessThresholdParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config dynamic snitch badness threshold params +func (o *FindConfigDynamicSnitchBadnessThresholdParams) WithHTTPClient(client *http.Client) *FindConfigDynamicSnitchBadnessThresholdParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config dynamic snitch badness threshold params +func (o *FindConfigDynamicSnitchBadnessThresholdParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigDynamicSnitchBadnessThresholdParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_badness_threshold_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_badness_threshold_responses.go new file mode 100644 index 00000000000..fab9795fee5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_badness_threshold_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigDynamicSnitchBadnessThresholdReader is a Reader for the FindConfigDynamicSnitchBadnessThreshold structure. +type FindConfigDynamicSnitchBadnessThresholdReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigDynamicSnitchBadnessThresholdReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigDynamicSnitchBadnessThresholdOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigDynamicSnitchBadnessThresholdDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigDynamicSnitchBadnessThresholdOK creates a FindConfigDynamicSnitchBadnessThresholdOK with default headers values +func NewFindConfigDynamicSnitchBadnessThresholdOK() *FindConfigDynamicSnitchBadnessThresholdOK { + return &FindConfigDynamicSnitchBadnessThresholdOK{} +} + +/* +FindConfigDynamicSnitchBadnessThresholdOK handles this case with default header values. + +Config value +*/ +type FindConfigDynamicSnitchBadnessThresholdOK struct { + Payload float64 +} + +func (o *FindConfigDynamicSnitchBadnessThresholdOK) GetPayload() float64 { + return o.Payload +} + +func (o *FindConfigDynamicSnitchBadnessThresholdOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigDynamicSnitchBadnessThresholdDefault creates a FindConfigDynamicSnitchBadnessThresholdDefault with default headers values +func NewFindConfigDynamicSnitchBadnessThresholdDefault(code int) *FindConfigDynamicSnitchBadnessThresholdDefault { + return &FindConfigDynamicSnitchBadnessThresholdDefault{ + _statusCode: code, + } +} + +/* +FindConfigDynamicSnitchBadnessThresholdDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigDynamicSnitchBadnessThresholdDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config dynamic snitch badness threshold default response +func (o *FindConfigDynamicSnitchBadnessThresholdDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigDynamicSnitchBadnessThresholdDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigDynamicSnitchBadnessThresholdDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigDynamicSnitchBadnessThresholdDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_reset_interval_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_reset_interval_in_ms_parameters.go new file mode 100644 index 00000000000..1da0fbba74c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_reset_interval_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigDynamicSnitchResetIntervalInMsParams creates a new FindConfigDynamicSnitchResetIntervalInMsParams object +// with the default values initialized. +func NewFindConfigDynamicSnitchResetIntervalInMsParams() *FindConfigDynamicSnitchResetIntervalInMsParams { + + return &FindConfigDynamicSnitchResetIntervalInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigDynamicSnitchResetIntervalInMsParamsWithTimeout creates a new FindConfigDynamicSnitchResetIntervalInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigDynamicSnitchResetIntervalInMsParamsWithTimeout(timeout time.Duration) *FindConfigDynamicSnitchResetIntervalInMsParams { + + return &FindConfigDynamicSnitchResetIntervalInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigDynamicSnitchResetIntervalInMsParamsWithContext creates a new FindConfigDynamicSnitchResetIntervalInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigDynamicSnitchResetIntervalInMsParamsWithContext(ctx context.Context) *FindConfigDynamicSnitchResetIntervalInMsParams { + + return &FindConfigDynamicSnitchResetIntervalInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigDynamicSnitchResetIntervalInMsParamsWithHTTPClient creates a new FindConfigDynamicSnitchResetIntervalInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigDynamicSnitchResetIntervalInMsParamsWithHTTPClient(client *http.Client) *FindConfigDynamicSnitchResetIntervalInMsParams { + + return &FindConfigDynamicSnitchResetIntervalInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigDynamicSnitchResetIntervalInMsParams contains all the parameters to send to the API endpoint +for the find config dynamic snitch reset interval in ms operation typically these are written to a http.Request +*/ +type FindConfigDynamicSnitchResetIntervalInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config dynamic snitch reset interval in ms params +func (o *FindConfigDynamicSnitchResetIntervalInMsParams) WithTimeout(timeout time.Duration) *FindConfigDynamicSnitchResetIntervalInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config dynamic snitch reset interval in ms params +func (o *FindConfigDynamicSnitchResetIntervalInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config dynamic snitch reset interval in ms params +func (o *FindConfigDynamicSnitchResetIntervalInMsParams) WithContext(ctx context.Context) *FindConfigDynamicSnitchResetIntervalInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config dynamic snitch reset interval in ms params +func (o *FindConfigDynamicSnitchResetIntervalInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config dynamic snitch reset interval in ms params +func (o *FindConfigDynamicSnitchResetIntervalInMsParams) WithHTTPClient(client *http.Client) *FindConfigDynamicSnitchResetIntervalInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config dynamic snitch reset interval in ms params +func (o *FindConfigDynamicSnitchResetIntervalInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigDynamicSnitchResetIntervalInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_reset_interval_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_reset_interval_in_ms_responses.go new file mode 100644 index 00000000000..760e2534d1b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_reset_interval_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigDynamicSnitchResetIntervalInMsReader is a Reader for the FindConfigDynamicSnitchResetIntervalInMs structure. +type FindConfigDynamicSnitchResetIntervalInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigDynamicSnitchResetIntervalInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigDynamicSnitchResetIntervalInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigDynamicSnitchResetIntervalInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigDynamicSnitchResetIntervalInMsOK creates a FindConfigDynamicSnitchResetIntervalInMsOK with default headers values +func NewFindConfigDynamicSnitchResetIntervalInMsOK() *FindConfigDynamicSnitchResetIntervalInMsOK { + return &FindConfigDynamicSnitchResetIntervalInMsOK{} +} + +/* +FindConfigDynamicSnitchResetIntervalInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigDynamicSnitchResetIntervalInMsOK struct { + Payload int64 +} + +func (o *FindConfigDynamicSnitchResetIntervalInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigDynamicSnitchResetIntervalInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigDynamicSnitchResetIntervalInMsDefault creates a FindConfigDynamicSnitchResetIntervalInMsDefault with default headers values +func NewFindConfigDynamicSnitchResetIntervalInMsDefault(code int) *FindConfigDynamicSnitchResetIntervalInMsDefault { + return &FindConfigDynamicSnitchResetIntervalInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigDynamicSnitchResetIntervalInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigDynamicSnitchResetIntervalInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config dynamic snitch reset interval in ms default response +func (o *FindConfigDynamicSnitchResetIntervalInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigDynamicSnitchResetIntervalInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigDynamicSnitchResetIntervalInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigDynamicSnitchResetIntervalInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_update_interval_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_update_interval_in_ms_parameters.go new file mode 100644 index 00000000000..63fd6ace1d5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_update_interval_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigDynamicSnitchUpdateIntervalInMsParams creates a new FindConfigDynamicSnitchUpdateIntervalInMsParams object +// with the default values initialized. +func NewFindConfigDynamicSnitchUpdateIntervalInMsParams() *FindConfigDynamicSnitchUpdateIntervalInMsParams { + + return &FindConfigDynamicSnitchUpdateIntervalInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigDynamicSnitchUpdateIntervalInMsParamsWithTimeout creates a new FindConfigDynamicSnitchUpdateIntervalInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigDynamicSnitchUpdateIntervalInMsParamsWithTimeout(timeout time.Duration) *FindConfigDynamicSnitchUpdateIntervalInMsParams { + + return &FindConfigDynamicSnitchUpdateIntervalInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigDynamicSnitchUpdateIntervalInMsParamsWithContext creates a new FindConfigDynamicSnitchUpdateIntervalInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigDynamicSnitchUpdateIntervalInMsParamsWithContext(ctx context.Context) *FindConfigDynamicSnitchUpdateIntervalInMsParams { + + return &FindConfigDynamicSnitchUpdateIntervalInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigDynamicSnitchUpdateIntervalInMsParamsWithHTTPClient creates a new FindConfigDynamicSnitchUpdateIntervalInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigDynamicSnitchUpdateIntervalInMsParamsWithHTTPClient(client *http.Client) *FindConfigDynamicSnitchUpdateIntervalInMsParams { + + return &FindConfigDynamicSnitchUpdateIntervalInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigDynamicSnitchUpdateIntervalInMsParams contains all the parameters to send to the API endpoint +for the find config dynamic snitch update interval in ms operation typically these are written to a http.Request +*/ +type FindConfigDynamicSnitchUpdateIntervalInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config dynamic snitch update interval in ms params +func (o *FindConfigDynamicSnitchUpdateIntervalInMsParams) WithTimeout(timeout time.Duration) *FindConfigDynamicSnitchUpdateIntervalInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config dynamic snitch update interval in ms params +func (o *FindConfigDynamicSnitchUpdateIntervalInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config dynamic snitch update interval in ms params +func (o *FindConfigDynamicSnitchUpdateIntervalInMsParams) WithContext(ctx context.Context) *FindConfigDynamicSnitchUpdateIntervalInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config dynamic snitch update interval in ms params +func (o *FindConfigDynamicSnitchUpdateIntervalInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config dynamic snitch update interval in ms params +func (o *FindConfigDynamicSnitchUpdateIntervalInMsParams) WithHTTPClient(client *http.Client) *FindConfigDynamicSnitchUpdateIntervalInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config dynamic snitch update interval in ms params +func (o *FindConfigDynamicSnitchUpdateIntervalInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigDynamicSnitchUpdateIntervalInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_update_interval_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_update_interval_in_ms_responses.go new file mode 100644 index 00000000000..184d9b93638 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_dynamic_snitch_update_interval_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigDynamicSnitchUpdateIntervalInMsReader is a Reader for the FindConfigDynamicSnitchUpdateIntervalInMs structure. +type FindConfigDynamicSnitchUpdateIntervalInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigDynamicSnitchUpdateIntervalInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigDynamicSnitchUpdateIntervalInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigDynamicSnitchUpdateIntervalInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigDynamicSnitchUpdateIntervalInMsOK creates a FindConfigDynamicSnitchUpdateIntervalInMsOK with default headers values +func NewFindConfigDynamicSnitchUpdateIntervalInMsOK() *FindConfigDynamicSnitchUpdateIntervalInMsOK { + return &FindConfigDynamicSnitchUpdateIntervalInMsOK{} +} + +/* +FindConfigDynamicSnitchUpdateIntervalInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigDynamicSnitchUpdateIntervalInMsOK struct { + Payload int64 +} + +func (o *FindConfigDynamicSnitchUpdateIntervalInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigDynamicSnitchUpdateIntervalInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigDynamicSnitchUpdateIntervalInMsDefault creates a FindConfigDynamicSnitchUpdateIntervalInMsDefault with default headers values +func NewFindConfigDynamicSnitchUpdateIntervalInMsDefault(code int) *FindConfigDynamicSnitchUpdateIntervalInMsDefault { + return &FindConfigDynamicSnitchUpdateIntervalInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigDynamicSnitchUpdateIntervalInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigDynamicSnitchUpdateIntervalInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config dynamic snitch update interval in ms default response +func (o *FindConfigDynamicSnitchUpdateIntervalInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigDynamicSnitchUpdateIntervalInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigDynamicSnitchUpdateIntervalInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigDynamicSnitchUpdateIntervalInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_cache_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_cache_parameters.go new file mode 100644 index 00000000000..848d9b8e260 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_cache_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigEnableCacheParams creates a new FindConfigEnableCacheParams object +// with the default values initialized. +func NewFindConfigEnableCacheParams() *FindConfigEnableCacheParams { + + return &FindConfigEnableCacheParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigEnableCacheParamsWithTimeout creates a new FindConfigEnableCacheParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigEnableCacheParamsWithTimeout(timeout time.Duration) *FindConfigEnableCacheParams { + + return &FindConfigEnableCacheParams{ + + timeout: timeout, + } +} + +// NewFindConfigEnableCacheParamsWithContext creates a new FindConfigEnableCacheParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigEnableCacheParamsWithContext(ctx context.Context) *FindConfigEnableCacheParams { + + return &FindConfigEnableCacheParams{ + + Context: ctx, + } +} + +// NewFindConfigEnableCacheParamsWithHTTPClient creates a new FindConfigEnableCacheParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigEnableCacheParamsWithHTTPClient(client *http.Client) *FindConfigEnableCacheParams { + + return &FindConfigEnableCacheParams{ + HTTPClient: client, + } +} + +/* +FindConfigEnableCacheParams contains all the parameters to send to the API endpoint +for the find config enable cache operation typically these are written to a http.Request +*/ +type FindConfigEnableCacheParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config enable cache params +func (o *FindConfigEnableCacheParams) WithTimeout(timeout time.Duration) *FindConfigEnableCacheParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config enable cache params +func (o *FindConfigEnableCacheParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config enable cache params +func (o *FindConfigEnableCacheParams) WithContext(ctx context.Context) *FindConfigEnableCacheParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config enable cache params +func (o *FindConfigEnableCacheParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config enable cache params +func (o *FindConfigEnableCacheParams) WithHTTPClient(client *http.Client) *FindConfigEnableCacheParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config enable cache params +func (o *FindConfigEnableCacheParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigEnableCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_cache_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_cache_responses.go new file mode 100644 index 00000000000..5e998b09393 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_cache_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigEnableCacheReader is a Reader for the FindConfigEnableCache structure. +type FindConfigEnableCacheReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigEnableCacheReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigEnableCacheOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigEnableCacheDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigEnableCacheOK creates a FindConfigEnableCacheOK with default headers values +func NewFindConfigEnableCacheOK() *FindConfigEnableCacheOK { + return &FindConfigEnableCacheOK{} +} + +/* +FindConfigEnableCacheOK handles this case with default header values. + +Config value +*/ +type FindConfigEnableCacheOK struct { + Payload bool +} + +func (o *FindConfigEnableCacheOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigEnableCacheOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigEnableCacheDefault creates a FindConfigEnableCacheDefault with default headers values +func NewFindConfigEnableCacheDefault(code int) *FindConfigEnableCacheDefault { + return &FindConfigEnableCacheDefault{ + _statusCode: code, + } +} + +/* +FindConfigEnableCacheDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigEnableCacheDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config enable cache default response +func (o *FindConfigEnableCacheDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigEnableCacheDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigEnableCacheDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigEnableCacheDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_commitlog_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_commitlog_parameters.go new file mode 100644 index 00000000000..8a3d55e69b0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_commitlog_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigEnableCommitlogParams creates a new FindConfigEnableCommitlogParams object +// with the default values initialized. +func NewFindConfigEnableCommitlogParams() *FindConfigEnableCommitlogParams { + + return &FindConfigEnableCommitlogParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigEnableCommitlogParamsWithTimeout creates a new FindConfigEnableCommitlogParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigEnableCommitlogParamsWithTimeout(timeout time.Duration) *FindConfigEnableCommitlogParams { + + return &FindConfigEnableCommitlogParams{ + + timeout: timeout, + } +} + +// NewFindConfigEnableCommitlogParamsWithContext creates a new FindConfigEnableCommitlogParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigEnableCommitlogParamsWithContext(ctx context.Context) *FindConfigEnableCommitlogParams { + + return &FindConfigEnableCommitlogParams{ + + Context: ctx, + } +} + +// NewFindConfigEnableCommitlogParamsWithHTTPClient creates a new FindConfigEnableCommitlogParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigEnableCommitlogParamsWithHTTPClient(client *http.Client) *FindConfigEnableCommitlogParams { + + return &FindConfigEnableCommitlogParams{ + HTTPClient: client, + } +} + +/* +FindConfigEnableCommitlogParams contains all the parameters to send to the API endpoint +for the find config enable commitlog operation typically these are written to a http.Request +*/ +type FindConfigEnableCommitlogParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config enable commitlog params +func (o *FindConfigEnableCommitlogParams) WithTimeout(timeout time.Duration) *FindConfigEnableCommitlogParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config enable commitlog params +func (o *FindConfigEnableCommitlogParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config enable commitlog params +func (o *FindConfigEnableCommitlogParams) WithContext(ctx context.Context) *FindConfigEnableCommitlogParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config enable commitlog params +func (o *FindConfigEnableCommitlogParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config enable commitlog params +func (o *FindConfigEnableCommitlogParams) WithHTTPClient(client *http.Client) *FindConfigEnableCommitlogParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config enable commitlog params +func (o *FindConfigEnableCommitlogParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigEnableCommitlogParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_commitlog_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_commitlog_responses.go new file mode 100644 index 00000000000..e99c48a5d46 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_commitlog_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigEnableCommitlogReader is a Reader for the FindConfigEnableCommitlog structure. +type FindConfigEnableCommitlogReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigEnableCommitlogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigEnableCommitlogOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigEnableCommitlogDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigEnableCommitlogOK creates a FindConfigEnableCommitlogOK with default headers values +func NewFindConfigEnableCommitlogOK() *FindConfigEnableCommitlogOK { + return &FindConfigEnableCommitlogOK{} +} + +/* +FindConfigEnableCommitlogOK handles this case with default header values. + +Config value +*/ +type FindConfigEnableCommitlogOK struct { + Payload bool +} + +func (o *FindConfigEnableCommitlogOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigEnableCommitlogOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigEnableCommitlogDefault creates a FindConfigEnableCommitlogDefault with default headers values +func NewFindConfigEnableCommitlogDefault(code int) *FindConfigEnableCommitlogDefault { + return &FindConfigEnableCommitlogDefault{ + _statusCode: code, + } +} + +/* +FindConfigEnableCommitlogDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigEnableCommitlogDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config enable commitlog default response +func (o *FindConfigEnableCommitlogDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigEnableCommitlogDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigEnableCommitlogDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigEnableCommitlogDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_dangerous_direct_import_of_cassandra_counters_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_dangerous_direct_import_of_cassandra_counters_parameters.go new file mode 100644 index 00000000000..a4be4689692 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_dangerous_direct_import_of_cassandra_counters_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigEnableDangerousDirectImportOfCassandraCountersParams creates a new FindConfigEnableDangerousDirectImportOfCassandraCountersParams object +// with the default values initialized. +func NewFindConfigEnableDangerousDirectImportOfCassandraCountersParams() *FindConfigEnableDangerousDirectImportOfCassandraCountersParams { + + return &FindConfigEnableDangerousDirectImportOfCassandraCountersParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigEnableDangerousDirectImportOfCassandraCountersParamsWithTimeout creates a new FindConfigEnableDangerousDirectImportOfCassandraCountersParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigEnableDangerousDirectImportOfCassandraCountersParamsWithTimeout(timeout time.Duration) *FindConfigEnableDangerousDirectImportOfCassandraCountersParams { + + return &FindConfigEnableDangerousDirectImportOfCassandraCountersParams{ + + timeout: timeout, + } +} + +// NewFindConfigEnableDangerousDirectImportOfCassandraCountersParamsWithContext creates a new FindConfigEnableDangerousDirectImportOfCassandraCountersParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigEnableDangerousDirectImportOfCassandraCountersParamsWithContext(ctx context.Context) *FindConfigEnableDangerousDirectImportOfCassandraCountersParams { + + return &FindConfigEnableDangerousDirectImportOfCassandraCountersParams{ + + Context: ctx, + } +} + +// NewFindConfigEnableDangerousDirectImportOfCassandraCountersParamsWithHTTPClient creates a new FindConfigEnableDangerousDirectImportOfCassandraCountersParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigEnableDangerousDirectImportOfCassandraCountersParamsWithHTTPClient(client *http.Client) *FindConfigEnableDangerousDirectImportOfCassandraCountersParams { + + return &FindConfigEnableDangerousDirectImportOfCassandraCountersParams{ + HTTPClient: client, + } +} + +/* +FindConfigEnableDangerousDirectImportOfCassandraCountersParams contains all the parameters to send to the API endpoint +for the find config enable dangerous direct import of cassandra counters operation typically these are written to a http.Request +*/ +type FindConfigEnableDangerousDirectImportOfCassandraCountersParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config enable dangerous direct import of cassandra counters params +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersParams) WithTimeout(timeout time.Duration) *FindConfigEnableDangerousDirectImportOfCassandraCountersParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config enable dangerous direct import of cassandra counters params +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config enable dangerous direct import of cassandra counters params +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersParams) WithContext(ctx context.Context) *FindConfigEnableDangerousDirectImportOfCassandraCountersParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config enable dangerous direct import of cassandra counters params +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config enable dangerous direct import of cassandra counters params +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersParams) WithHTTPClient(client *http.Client) *FindConfigEnableDangerousDirectImportOfCassandraCountersParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config enable dangerous direct import of cassandra counters params +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_dangerous_direct_import_of_cassandra_counters_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_dangerous_direct_import_of_cassandra_counters_responses.go new file mode 100644 index 00000000000..c562580b7d7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_dangerous_direct_import_of_cassandra_counters_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigEnableDangerousDirectImportOfCassandraCountersReader is a Reader for the FindConfigEnableDangerousDirectImportOfCassandraCounters structure. +type FindConfigEnableDangerousDirectImportOfCassandraCountersReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigEnableDangerousDirectImportOfCassandraCountersOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigEnableDangerousDirectImportOfCassandraCountersDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigEnableDangerousDirectImportOfCassandraCountersOK creates a FindConfigEnableDangerousDirectImportOfCassandraCountersOK with default headers values +func NewFindConfigEnableDangerousDirectImportOfCassandraCountersOK() *FindConfigEnableDangerousDirectImportOfCassandraCountersOK { + return &FindConfigEnableDangerousDirectImportOfCassandraCountersOK{} +} + +/* +FindConfigEnableDangerousDirectImportOfCassandraCountersOK handles this case with default header values. + +Config value +*/ +type FindConfigEnableDangerousDirectImportOfCassandraCountersOK struct { + Payload bool +} + +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigEnableDangerousDirectImportOfCassandraCountersDefault creates a FindConfigEnableDangerousDirectImportOfCassandraCountersDefault with default headers values +func NewFindConfigEnableDangerousDirectImportOfCassandraCountersDefault(code int) *FindConfigEnableDangerousDirectImportOfCassandraCountersDefault { + return &FindConfigEnableDangerousDirectImportOfCassandraCountersDefault{ + _statusCode: code, + } +} + +/* +FindConfigEnableDangerousDirectImportOfCassandraCountersDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigEnableDangerousDirectImportOfCassandraCountersDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config enable dangerous direct import of cassandra counters default response +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigEnableDangerousDirectImportOfCassandraCountersDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_deprecated_partitioners_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_deprecated_partitioners_parameters.go new file mode 100644 index 00000000000..137ab80c820 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_deprecated_partitioners_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigEnableDeprecatedPartitionersParams creates a new FindConfigEnableDeprecatedPartitionersParams object +// with the default values initialized. +func NewFindConfigEnableDeprecatedPartitionersParams() *FindConfigEnableDeprecatedPartitionersParams { + + return &FindConfigEnableDeprecatedPartitionersParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigEnableDeprecatedPartitionersParamsWithTimeout creates a new FindConfigEnableDeprecatedPartitionersParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigEnableDeprecatedPartitionersParamsWithTimeout(timeout time.Duration) *FindConfigEnableDeprecatedPartitionersParams { + + return &FindConfigEnableDeprecatedPartitionersParams{ + + timeout: timeout, + } +} + +// NewFindConfigEnableDeprecatedPartitionersParamsWithContext creates a new FindConfigEnableDeprecatedPartitionersParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigEnableDeprecatedPartitionersParamsWithContext(ctx context.Context) *FindConfigEnableDeprecatedPartitionersParams { + + return &FindConfigEnableDeprecatedPartitionersParams{ + + Context: ctx, + } +} + +// NewFindConfigEnableDeprecatedPartitionersParamsWithHTTPClient creates a new FindConfigEnableDeprecatedPartitionersParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigEnableDeprecatedPartitionersParamsWithHTTPClient(client *http.Client) *FindConfigEnableDeprecatedPartitionersParams { + + return &FindConfigEnableDeprecatedPartitionersParams{ + HTTPClient: client, + } +} + +/* +FindConfigEnableDeprecatedPartitionersParams contains all the parameters to send to the API endpoint +for the find config enable deprecated partitioners operation typically these are written to a http.Request +*/ +type FindConfigEnableDeprecatedPartitionersParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config enable deprecated partitioners params +func (o *FindConfigEnableDeprecatedPartitionersParams) WithTimeout(timeout time.Duration) *FindConfigEnableDeprecatedPartitionersParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config enable deprecated partitioners params +func (o *FindConfigEnableDeprecatedPartitionersParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config enable deprecated partitioners params +func (o *FindConfigEnableDeprecatedPartitionersParams) WithContext(ctx context.Context) *FindConfigEnableDeprecatedPartitionersParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config enable deprecated partitioners params +func (o *FindConfigEnableDeprecatedPartitionersParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config enable deprecated partitioners params +func (o *FindConfigEnableDeprecatedPartitionersParams) WithHTTPClient(client *http.Client) *FindConfigEnableDeprecatedPartitionersParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config enable deprecated partitioners params +func (o *FindConfigEnableDeprecatedPartitionersParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigEnableDeprecatedPartitionersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_deprecated_partitioners_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_deprecated_partitioners_responses.go new file mode 100644 index 00000000000..e17eba43089 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_deprecated_partitioners_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigEnableDeprecatedPartitionersReader is a Reader for the FindConfigEnableDeprecatedPartitioners structure. +type FindConfigEnableDeprecatedPartitionersReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigEnableDeprecatedPartitionersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigEnableDeprecatedPartitionersOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigEnableDeprecatedPartitionersDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigEnableDeprecatedPartitionersOK creates a FindConfigEnableDeprecatedPartitionersOK with default headers values +func NewFindConfigEnableDeprecatedPartitionersOK() *FindConfigEnableDeprecatedPartitionersOK { + return &FindConfigEnableDeprecatedPartitionersOK{} +} + +/* +FindConfigEnableDeprecatedPartitionersOK handles this case with default header values. + +Config value +*/ +type FindConfigEnableDeprecatedPartitionersOK struct { + Payload bool +} + +func (o *FindConfigEnableDeprecatedPartitionersOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigEnableDeprecatedPartitionersOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigEnableDeprecatedPartitionersDefault creates a FindConfigEnableDeprecatedPartitionersDefault with default headers values +func NewFindConfigEnableDeprecatedPartitionersDefault(code int) *FindConfigEnableDeprecatedPartitionersDefault { + return &FindConfigEnableDeprecatedPartitionersDefault{ + _statusCode: code, + } +} + +/* +FindConfigEnableDeprecatedPartitionersDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigEnableDeprecatedPartitionersDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config enable deprecated partitioners default response +func (o *FindConfigEnableDeprecatedPartitionersDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigEnableDeprecatedPartitionersDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigEnableDeprecatedPartitionersDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigEnableDeprecatedPartitionersDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_in_memory_data_store_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_in_memory_data_store_parameters.go new file mode 100644 index 00000000000..07a740d2ff4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_in_memory_data_store_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigEnableInMemoryDataStoreParams creates a new FindConfigEnableInMemoryDataStoreParams object +// with the default values initialized. +func NewFindConfigEnableInMemoryDataStoreParams() *FindConfigEnableInMemoryDataStoreParams { + + return &FindConfigEnableInMemoryDataStoreParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigEnableInMemoryDataStoreParamsWithTimeout creates a new FindConfigEnableInMemoryDataStoreParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigEnableInMemoryDataStoreParamsWithTimeout(timeout time.Duration) *FindConfigEnableInMemoryDataStoreParams { + + return &FindConfigEnableInMemoryDataStoreParams{ + + timeout: timeout, + } +} + +// NewFindConfigEnableInMemoryDataStoreParamsWithContext creates a new FindConfigEnableInMemoryDataStoreParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigEnableInMemoryDataStoreParamsWithContext(ctx context.Context) *FindConfigEnableInMemoryDataStoreParams { + + return &FindConfigEnableInMemoryDataStoreParams{ + + Context: ctx, + } +} + +// NewFindConfigEnableInMemoryDataStoreParamsWithHTTPClient creates a new FindConfigEnableInMemoryDataStoreParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigEnableInMemoryDataStoreParamsWithHTTPClient(client *http.Client) *FindConfigEnableInMemoryDataStoreParams { + + return &FindConfigEnableInMemoryDataStoreParams{ + HTTPClient: client, + } +} + +/* +FindConfigEnableInMemoryDataStoreParams contains all the parameters to send to the API endpoint +for the find config enable in memory data store operation typically these are written to a http.Request +*/ +type FindConfigEnableInMemoryDataStoreParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config enable in memory data store params +func (o *FindConfigEnableInMemoryDataStoreParams) WithTimeout(timeout time.Duration) *FindConfigEnableInMemoryDataStoreParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config enable in memory data store params +func (o *FindConfigEnableInMemoryDataStoreParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config enable in memory data store params +func (o *FindConfigEnableInMemoryDataStoreParams) WithContext(ctx context.Context) *FindConfigEnableInMemoryDataStoreParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config enable in memory data store params +func (o *FindConfigEnableInMemoryDataStoreParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config enable in memory data store params +func (o *FindConfigEnableInMemoryDataStoreParams) WithHTTPClient(client *http.Client) *FindConfigEnableInMemoryDataStoreParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config enable in memory data store params +func (o *FindConfigEnableInMemoryDataStoreParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigEnableInMemoryDataStoreParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_in_memory_data_store_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_in_memory_data_store_responses.go new file mode 100644 index 00000000000..aa9e2a58ca2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_in_memory_data_store_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigEnableInMemoryDataStoreReader is a Reader for the FindConfigEnableInMemoryDataStore structure. +type FindConfigEnableInMemoryDataStoreReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigEnableInMemoryDataStoreReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigEnableInMemoryDataStoreOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigEnableInMemoryDataStoreDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigEnableInMemoryDataStoreOK creates a FindConfigEnableInMemoryDataStoreOK with default headers values +func NewFindConfigEnableInMemoryDataStoreOK() *FindConfigEnableInMemoryDataStoreOK { + return &FindConfigEnableInMemoryDataStoreOK{} +} + +/* +FindConfigEnableInMemoryDataStoreOK handles this case with default header values. + +Config value +*/ +type FindConfigEnableInMemoryDataStoreOK struct { + Payload bool +} + +func (o *FindConfigEnableInMemoryDataStoreOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigEnableInMemoryDataStoreOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigEnableInMemoryDataStoreDefault creates a FindConfigEnableInMemoryDataStoreDefault with default headers values +func NewFindConfigEnableInMemoryDataStoreDefault(code int) *FindConfigEnableInMemoryDataStoreDefault { + return &FindConfigEnableInMemoryDataStoreDefault{ + _statusCode: code, + } +} + +/* +FindConfigEnableInMemoryDataStoreDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigEnableInMemoryDataStoreDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config enable in memory data store default response +func (o *FindConfigEnableInMemoryDataStoreDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigEnableInMemoryDataStoreDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigEnableInMemoryDataStoreDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigEnableInMemoryDataStoreDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_keyspace_column_family_metrics_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_keyspace_column_family_metrics_parameters.go new file mode 100644 index 00000000000..6cc99b6ebea --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_keyspace_column_family_metrics_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigEnableKeyspaceColumnFamilyMetricsParams creates a new FindConfigEnableKeyspaceColumnFamilyMetricsParams object +// with the default values initialized. +func NewFindConfigEnableKeyspaceColumnFamilyMetricsParams() *FindConfigEnableKeyspaceColumnFamilyMetricsParams { + + return &FindConfigEnableKeyspaceColumnFamilyMetricsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigEnableKeyspaceColumnFamilyMetricsParamsWithTimeout creates a new FindConfigEnableKeyspaceColumnFamilyMetricsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigEnableKeyspaceColumnFamilyMetricsParamsWithTimeout(timeout time.Duration) *FindConfigEnableKeyspaceColumnFamilyMetricsParams { + + return &FindConfigEnableKeyspaceColumnFamilyMetricsParams{ + + timeout: timeout, + } +} + +// NewFindConfigEnableKeyspaceColumnFamilyMetricsParamsWithContext creates a new FindConfigEnableKeyspaceColumnFamilyMetricsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigEnableKeyspaceColumnFamilyMetricsParamsWithContext(ctx context.Context) *FindConfigEnableKeyspaceColumnFamilyMetricsParams { + + return &FindConfigEnableKeyspaceColumnFamilyMetricsParams{ + + Context: ctx, + } +} + +// NewFindConfigEnableKeyspaceColumnFamilyMetricsParamsWithHTTPClient creates a new FindConfigEnableKeyspaceColumnFamilyMetricsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigEnableKeyspaceColumnFamilyMetricsParamsWithHTTPClient(client *http.Client) *FindConfigEnableKeyspaceColumnFamilyMetricsParams { + + return &FindConfigEnableKeyspaceColumnFamilyMetricsParams{ + HTTPClient: client, + } +} + +/* +FindConfigEnableKeyspaceColumnFamilyMetricsParams contains all the parameters to send to the API endpoint +for the find config enable keyspace column family metrics operation typically these are written to a http.Request +*/ +type FindConfigEnableKeyspaceColumnFamilyMetricsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config enable keyspace column family metrics params +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsParams) WithTimeout(timeout time.Duration) *FindConfigEnableKeyspaceColumnFamilyMetricsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config enable keyspace column family metrics params +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config enable keyspace column family metrics params +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsParams) WithContext(ctx context.Context) *FindConfigEnableKeyspaceColumnFamilyMetricsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config enable keyspace column family metrics params +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config enable keyspace column family metrics params +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsParams) WithHTTPClient(client *http.Client) *FindConfigEnableKeyspaceColumnFamilyMetricsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config enable keyspace column family metrics params +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_keyspace_column_family_metrics_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_keyspace_column_family_metrics_responses.go new file mode 100644 index 00000000000..863e9c6a5fb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_keyspace_column_family_metrics_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigEnableKeyspaceColumnFamilyMetricsReader is a Reader for the FindConfigEnableKeyspaceColumnFamilyMetrics structure. +type FindConfigEnableKeyspaceColumnFamilyMetricsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigEnableKeyspaceColumnFamilyMetricsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigEnableKeyspaceColumnFamilyMetricsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigEnableKeyspaceColumnFamilyMetricsOK creates a FindConfigEnableKeyspaceColumnFamilyMetricsOK with default headers values +func NewFindConfigEnableKeyspaceColumnFamilyMetricsOK() *FindConfigEnableKeyspaceColumnFamilyMetricsOK { + return &FindConfigEnableKeyspaceColumnFamilyMetricsOK{} +} + +/* +FindConfigEnableKeyspaceColumnFamilyMetricsOK handles this case with default header values. + +Config value +*/ +type FindConfigEnableKeyspaceColumnFamilyMetricsOK struct { + Payload bool +} + +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigEnableKeyspaceColumnFamilyMetricsDefault creates a FindConfigEnableKeyspaceColumnFamilyMetricsDefault with default headers values +func NewFindConfigEnableKeyspaceColumnFamilyMetricsDefault(code int) *FindConfigEnableKeyspaceColumnFamilyMetricsDefault { + return &FindConfigEnableKeyspaceColumnFamilyMetricsDefault{ + _statusCode: code, + } +} + +/* +FindConfigEnableKeyspaceColumnFamilyMetricsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigEnableKeyspaceColumnFamilyMetricsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config enable keyspace column family metrics default response +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigEnableKeyspaceColumnFamilyMetricsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_shard_aware_drivers_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_shard_aware_drivers_parameters.go new file mode 100644 index 00000000000..e532a086d6a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_shard_aware_drivers_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigEnableShardAwareDriversParams creates a new FindConfigEnableShardAwareDriversParams object +// with the default values initialized. +func NewFindConfigEnableShardAwareDriversParams() *FindConfigEnableShardAwareDriversParams { + + return &FindConfigEnableShardAwareDriversParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigEnableShardAwareDriversParamsWithTimeout creates a new FindConfigEnableShardAwareDriversParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigEnableShardAwareDriversParamsWithTimeout(timeout time.Duration) *FindConfigEnableShardAwareDriversParams { + + return &FindConfigEnableShardAwareDriversParams{ + + timeout: timeout, + } +} + +// NewFindConfigEnableShardAwareDriversParamsWithContext creates a new FindConfigEnableShardAwareDriversParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigEnableShardAwareDriversParamsWithContext(ctx context.Context) *FindConfigEnableShardAwareDriversParams { + + return &FindConfigEnableShardAwareDriversParams{ + + Context: ctx, + } +} + +// NewFindConfigEnableShardAwareDriversParamsWithHTTPClient creates a new FindConfigEnableShardAwareDriversParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigEnableShardAwareDriversParamsWithHTTPClient(client *http.Client) *FindConfigEnableShardAwareDriversParams { + + return &FindConfigEnableShardAwareDriversParams{ + HTTPClient: client, + } +} + +/* +FindConfigEnableShardAwareDriversParams contains all the parameters to send to the API endpoint +for the find config enable shard aware drivers operation typically these are written to a http.Request +*/ +type FindConfigEnableShardAwareDriversParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config enable shard aware drivers params +func (o *FindConfigEnableShardAwareDriversParams) WithTimeout(timeout time.Duration) *FindConfigEnableShardAwareDriversParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config enable shard aware drivers params +func (o *FindConfigEnableShardAwareDriversParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config enable shard aware drivers params +func (o *FindConfigEnableShardAwareDriversParams) WithContext(ctx context.Context) *FindConfigEnableShardAwareDriversParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config enable shard aware drivers params +func (o *FindConfigEnableShardAwareDriversParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config enable shard aware drivers params +func (o *FindConfigEnableShardAwareDriversParams) WithHTTPClient(client *http.Client) *FindConfigEnableShardAwareDriversParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config enable shard aware drivers params +func (o *FindConfigEnableShardAwareDriversParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigEnableShardAwareDriversParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_shard_aware_drivers_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_shard_aware_drivers_responses.go new file mode 100644 index 00000000000..505e6c1f98a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_shard_aware_drivers_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigEnableShardAwareDriversReader is a Reader for the FindConfigEnableShardAwareDrivers structure. +type FindConfigEnableShardAwareDriversReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigEnableShardAwareDriversReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigEnableShardAwareDriversOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigEnableShardAwareDriversDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigEnableShardAwareDriversOK creates a FindConfigEnableShardAwareDriversOK with default headers values +func NewFindConfigEnableShardAwareDriversOK() *FindConfigEnableShardAwareDriversOK { + return &FindConfigEnableShardAwareDriversOK{} +} + +/* +FindConfigEnableShardAwareDriversOK handles this case with default header values. + +Config value +*/ +type FindConfigEnableShardAwareDriversOK struct { + Payload bool +} + +func (o *FindConfigEnableShardAwareDriversOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigEnableShardAwareDriversOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigEnableShardAwareDriversDefault creates a FindConfigEnableShardAwareDriversDefault with default headers values +func NewFindConfigEnableShardAwareDriversDefault(code int) *FindConfigEnableShardAwareDriversDefault { + return &FindConfigEnableShardAwareDriversDefault{ + _statusCode: code, + } +} + +/* +FindConfigEnableShardAwareDriversDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigEnableShardAwareDriversDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config enable shard aware drivers default response +func (o *FindConfigEnableShardAwareDriversDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigEnableShardAwareDriversDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigEnableShardAwareDriversDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigEnableShardAwareDriversDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstable_data_integrity_check_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstable_data_integrity_check_parameters.go new file mode 100644 index 00000000000..de1d40638f8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstable_data_integrity_check_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigEnableSstableDataIntegrityCheckParams creates a new FindConfigEnableSstableDataIntegrityCheckParams object +// with the default values initialized. +func NewFindConfigEnableSstableDataIntegrityCheckParams() *FindConfigEnableSstableDataIntegrityCheckParams { + + return &FindConfigEnableSstableDataIntegrityCheckParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigEnableSstableDataIntegrityCheckParamsWithTimeout creates a new FindConfigEnableSstableDataIntegrityCheckParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigEnableSstableDataIntegrityCheckParamsWithTimeout(timeout time.Duration) *FindConfigEnableSstableDataIntegrityCheckParams { + + return &FindConfigEnableSstableDataIntegrityCheckParams{ + + timeout: timeout, + } +} + +// NewFindConfigEnableSstableDataIntegrityCheckParamsWithContext creates a new FindConfigEnableSstableDataIntegrityCheckParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigEnableSstableDataIntegrityCheckParamsWithContext(ctx context.Context) *FindConfigEnableSstableDataIntegrityCheckParams { + + return &FindConfigEnableSstableDataIntegrityCheckParams{ + + Context: ctx, + } +} + +// NewFindConfigEnableSstableDataIntegrityCheckParamsWithHTTPClient creates a new FindConfigEnableSstableDataIntegrityCheckParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigEnableSstableDataIntegrityCheckParamsWithHTTPClient(client *http.Client) *FindConfigEnableSstableDataIntegrityCheckParams { + + return &FindConfigEnableSstableDataIntegrityCheckParams{ + HTTPClient: client, + } +} + +/* +FindConfigEnableSstableDataIntegrityCheckParams contains all the parameters to send to the API endpoint +for the find config enable sstable data integrity check operation typically these are written to a http.Request +*/ +type FindConfigEnableSstableDataIntegrityCheckParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config enable sstable data integrity check params +func (o *FindConfigEnableSstableDataIntegrityCheckParams) WithTimeout(timeout time.Duration) *FindConfigEnableSstableDataIntegrityCheckParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config enable sstable data integrity check params +func (o *FindConfigEnableSstableDataIntegrityCheckParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config enable sstable data integrity check params +func (o *FindConfigEnableSstableDataIntegrityCheckParams) WithContext(ctx context.Context) *FindConfigEnableSstableDataIntegrityCheckParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config enable sstable data integrity check params +func (o *FindConfigEnableSstableDataIntegrityCheckParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config enable sstable data integrity check params +func (o *FindConfigEnableSstableDataIntegrityCheckParams) WithHTTPClient(client *http.Client) *FindConfigEnableSstableDataIntegrityCheckParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config enable sstable data integrity check params +func (o *FindConfigEnableSstableDataIntegrityCheckParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigEnableSstableDataIntegrityCheckParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstable_data_integrity_check_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstable_data_integrity_check_responses.go new file mode 100644 index 00000000000..62d03edc7c9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstable_data_integrity_check_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigEnableSstableDataIntegrityCheckReader is a Reader for the FindConfigEnableSstableDataIntegrityCheck structure. +type FindConfigEnableSstableDataIntegrityCheckReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigEnableSstableDataIntegrityCheckReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigEnableSstableDataIntegrityCheckOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigEnableSstableDataIntegrityCheckDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigEnableSstableDataIntegrityCheckOK creates a FindConfigEnableSstableDataIntegrityCheckOK with default headers values +func NewFindConfigEnableSstableDataIntegrityCheckOK() *FindConfigEnableSstableDataIntegrityCheckOK { + return &FindConfigEnableSstableDataIntegrityCheckOK{} +} + +/* +FindConfigEnableSstableDataIntegrityCheckOK handles this case with default header values. + +Config value +*/ +type FindConfigEnableSstableDataIntegrityCheckOK struct { + Payload bool +} + +func (o *FindConfigEnableSstableDataIntegrityCheckOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigEnableSstableDataIntegrityCheckOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigEnableSstableDataIntegrityCheckDefault creates a FindConfigEnableSstableDataIntegrityCheckDefault with default headers values +func NewFindConfigEnableSstableDataIntegrityCheckDefault(code int) *FindConfigEnableSstableDataIntegrityCheckDefault { + return &FindConfigEnableSstableDataIntegrityCheckDefault{ + _statusCode: code, + } +} + +/* +FindConfigEnableSstableDataIntegrityCheckDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigEnableSstableDataIntegrityCheckDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config enable sstable data integrity check default response +func (o *FindConfigEnableSstableDataIntegrityCheckDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigEnableSstableDataIntegrityCheckDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigEnableSstableDataIntegrityCheckDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigEnableSstableDataIntegrityCheckDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstables_mc_format_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstables_mc_format_parameters.go new file mode 100644 index 00000000000..0efb9484d63 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstables_mc_format_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigEnableSstablesMcFormatParams creates a new FindConfigEnableSstablesMcFormatParams object +// with the default values initialized. +func NewFindConfigEnableSstablesMcFormatParams() *FindConfigEnableSstablesMcFormatParams { + + return &FindConfigEnableSstablesMcFormatParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigEnableSstablesMcFormatParamsWithTimeout creates a new FindConfigEnableSstablesMcFormatParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigEnableSstablesMcFormatParamsWithTimeout(timeout time.Duration) *FindConfigEnableSstablesMcFormatParams { + + return &FindConfigEnableSstablesMcFormatParams{ + + timeout: timeout, + } +} + +// NewFindConfigEnableSstablesMcFormatParamsWithContext creates a new FindConfigEnableSstablesMcFormatParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigEnableSstablesMcFormatParamsWithContext(ctx context.Context) *FindConfigEnableSstablesMcFormatParams { + + return &FindConfigEnableSstablesMcFormatParams{ + + Context: ctx, + } +} + +// NewFindConfigEnableSstablesMcFormatParamsWithHTTPClient creates a new FindConfigEnableSstablesMcFormatParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigEnableSstablesMcFormatParamsWithHTTPClient(client *http.Client) *FindConfigEnableSstablesMcFormatParams { + + return &FindConfigEnableSstablesMcFormatParams{ + HTTPClient: client, + } +} + +/* +FindConfigEnableSstablesMcFormatParams contains all the parameters to send to the API endpoint +for the find config enable sstables mc format operation typically these are written to a http.Request +*/ +type FindConfigEnableSstablesMcFormatParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config enable sstables mc format params +func (o *FindConfigEnableSstablesMcFormatParams) WithTimeout(timeout time.Duration) *FindConfigEnableSstablesMcFormatParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config enable sstables mc format params +func (o *FindConfigEnableSstablesMcFormatParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config enable sstables mc format params +func (o *FindConfigEnableSstablesMcFormatParams) WithContext(ctx context.Context) *FindConfigEnableSstablesMcFormatParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config enable sstables mc format params +func (o *FindConfigEnableSstablesMcFormatParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config enable sstables mc format params +func (o *FindConfigEnableSstablesMcFormatParams) WithHTTPClient(client *http.Client) *FindConfigEnableSstablesMcFormatParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config enable sstables mc format params +func (o *FindConfigEnableSstablesMcFormatParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigEnableSstablesMcFormatParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstables_mc_format_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstables_mc_format_responses.go new file mode 100644 index 00000000000..60115e3ca6b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_enable_sstables_mc_format_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigEnableSstablesMcFormatReader is a Reader for the FindConfigEnableSstablesMcFormat structure. +type FindConfigEnableSstablesMcFormatReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigEnableSstablesMcFormatReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigEnableSstablesMcFormatOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigEnableSstablesMcFormatDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigEnableSstablesMcFormatOK creates a FindConfigEnableSstablesMcFormatOK with default headers values +func NewFindConfigEnableSstablesMcFormatOK() *FindConfigEnableSstablesMcFormatOK { + return &FindConfigEnableSstablesMcFormatOK{} +} + +/* +FindConfigEnableSstablesMcFormatOK handles this case with default header values. + +Config value +*/ +type FindConfigEnableSstablesMcFormatOK struct { + Payload bool +} + +func (o *FindConfigEnableSstablesMcFormatOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigEnableSstablesMcFormatOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigEnableSstablesMcFormatDefault creates a FindConfigEnableSstablesMcFormatDefault with default headers values +func NewFindConfigEnableSstablesMcFormatDefault(code int) *FindConfigEnableSstablesMcFormatDefault { + return &FindConfigEnableSstablesMcFormatDefault{ + _statusCode: code, + } +} + +/* +FindConfigEnableSstablesMcFormatDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigEnableSstablesMcFormatDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config enable sstables mc format default response +func (o *FindConfigEnableSstablesMcFormatDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigEnableSstablesMcFormatDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigEnableSstablesMcFormatDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigEnableSstablesMcFormatDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_endpoint_snitch_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_endpoint_snitch_parameters.go new file mode 100644 index 00000000000..46f5a1ec323 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_endpoint_snitch_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigEndpointSnitchParams creates a new FindConfigEndpointSnitchParams object +// with the default values initialized. +func NewFindConfigEndpointSnitchParams() *FindConfigEndpointSnitchParams { + + return &FindConfigEndpointSnitchParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigEndpointSnitchParamsWithTimeout creates a new FindConfigEndpointSnitchParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigEndpointSnitchParamsWithTimeout(timeout time.Duration) *FindConfigEndpointSnitchParams { + + return &FindConfigEndpointSnitchParams{ + + timeout: timeout, + } +} + +// NewFindConfigEndpointSnitchParamsWithContext creates a new FindConfigEndpointSnitchParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigEndpointSnitchParamsWithContext(ctx context.Context) *FindConfigEndpointSnitchParams { + + return &FindConfigEndpointSnitchParams{ + + Context: ctx, + } +} + +// NewFindConfigEndpointSnitchParamsWithHTTPClient creates a new FindConfigEndpointSnitchParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigEndpointSnitchParamsWithHTTPClient(client *http.Client) *FindConfigEndpointSnitchParams { + + return &FindConfigEndpointSnitchParams{ + HTTPClient: client, + } +} + +/* +FindConfigEndpointSnitchParams contains all the parameters to send to the API endpoint +for the find config endpoint snitch operation typically these are written to a http.Request +*/ +type FindConfigEndpointSnitchParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config endpoint snitch params +func (o *FindConfigEndpointSnitchParams) WithTimeout(timeout time.Duration) *FindConfigEndpointSnitchParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config endpoint snitch params +func (o *FindConfigEndpointSnitchParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config endpoint snitch params +func (o *FindConfigEndpointSnitchParams) WithContext(ctx context.Context) *FindConfigEndpointSnitchParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config endpoint snitch params +func (o *FindConfigEndpointSnitchParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config endpoint snitch params +func (o *FindConfigEndpointSnitchParams) WithHTTPClient(client *http.Client) *FindConfigEndpointSnitchParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config endpoint snitch params +func (o *FindConfigEndpointSnitchParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigEndpointSnitchParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_endpoint_snitch_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_endpoint_snitch_responses.go new file mode 100644 index 00000000000..e6d95d46a95 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_endpoint_snitch_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigEndpointSnitchReader is a Reader for the FindConfigEndpointSnitch structure. +type FindConfigEndpointSnitchReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigEndpointSnitchReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigEndpointSnitchOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigEndpointSnitchDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigEndpointSnitchOK creates a FindConfigEndpointSnitchOK with default headers values +func NewFindConfigEndpointSnitchOK() *FindConfigEndpointSnitchOK { + return &FindConfigEndpointSnitchOK{} +} + +/* +FindConfigEndpointSnitchOK handles this case with default header values. + +Config value +*/ +type FindConfigEndpointSnitchOK struct { + Payload string +} + +func (o *FindConfigEndpointSnitchOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigEndpointSnitchOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigEndpointSnitchDefault creates a FindConfigEndpointSnitchDefault with default headers values +func NewFindConfigEndpointSnitchDefault(code int) *FindConfigEndpointSnitchDefault { + return &FindConfigEndpointSnitchDefault{ + _statusCode: code, + } +} + +/* +FindConfigEndpointSnitchDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigEndpointSnitchDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config endpoint snitch default response +func (o *FindConfigEndpointSnitchDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigEndpointSnitchDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigEndpointSnitchDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigEndpointSnitchDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_experimental_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_experimental_parameters.go new file mode 100644 index 00000000000..a398adef9fc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_experimental_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigExperimentalParams creates a new FindConfigExperimentalParams object +// with the default values initialized. +func NewFindConfigExperimentalParams() *FindConfigExperimentalParams { + + return &FindConfigExperimentalParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigExperimentalParamsWithTimeout creates a new FindConfigExperimentalParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigExperimentalParamsWithTimeout(timeout time.Duration) *FindConfigExperimentalParams { + + return &FindConfigExperimentalParams{ + + timeout: timeout, + } +} + +// NewFindConfigExperimentalParamsWithContext creates a new FindConfigExperimentalParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigExperimentalParamsWithContext(ctx context.Context) *FindConfigExperimentalParams { + + return &FindConfigExperimentalParams{ + + Context: ctx, + } +} + +// NewFindConfigExperimentalParamsWithHTTPClient creates a new FindConfigExperimentalParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigExperimentalParamsWithHTTPClient(client *http.Client) *FindConfigExperimentalParams { + + return &FindConfigExperimentalParams{ + HTTPClient: client, + } +} + +/* +FindConfigExperimentalParams contains all the parameters to send to the API endpoint +for the find config experimental operation typically these are written to a http.Request +*/ +type FindConfigExperimentalParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config experimental params +func (o *FindConfigExperimentalParams) WithTimeout(timeout time.Duration) *FindConfigExperimentalParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config experimental params +func (o *FindConfigExperimentalParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config experimental params +func (o *FindConfigExperimentalParams) WithContext(ctx context.Context) *FindConfigExperimentalParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config experimental params +func (o *FindConfigExperimentalParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config experimental params +func (o *FindConfigExperimentalParams) WithHTTPClient(client *http.Client) *FindConfigExperimentalParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config experimental params +func (o *FindConfigExperimentalParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigExperimentalParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_experimental_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_experimental_responses.go new file mode 100644 index 00000000000..25e785cba69 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_experimental_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigExperimentalReader is a Reader for the FindConfigExperimental structure. +type FindConfigExperimentalReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigExperimentalReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigExperimentalOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigExperimentalDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigExperimentalOK creates a FindConfigExperimentalOK with default headers values +func NewFindConfigExperimentalOK() *FindConfigExperimentalOK { + return &FindConfigExperimentalOK{} +} + +/* +FindConfigExperimentalOK handles this case with default header values. + +Config value +*/ +type FindConfigExperimentalOK struct { + Payload bool +} + +func (o *FindConfigExperimentalOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigExperimentalOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigExperimentalDefault creates a FindConfigExperimentalDefault with default headers values +func NewFindConfigExperimentalDefault(code int) *FindConfigExperimentalDefault { + return &FindConfigExperimentalDefault{ + _statusCode: code, + } +} + +/* +FindConfigExperimentalDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigExperimentalDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config experimental default response +func (o *FindConfigExperimentalDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigExperimentalDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigExperimentalDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigExperimentalDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_initial_value_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_initial_value_ms_parameters.go new file mode 100644 index 00000000000..2ddd1e73777 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_initial_value_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigFdInitialValueMsParams creates a new FindConfigFdInitialValueMsParams object +// with the default values initialized. +func NewFindConfigFdInitialValueMsParams() *FindConfigFdInitialValueMsParams { + + return &FindConfigFdInitialValueMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigFdInitialValueMsParamsWithTimeout creates a new FindConfigFdInitialValueMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigFdInitialValueMsParamsWithTimeout(timeout time.Duration) *FindConfigFdInitialValueMsParams { + + return &FindConfigFdInitialValueMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigFdInitialValueMsParamsWithContext creates a new FindConfigFdInitialValueMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigFdInitialValueMsParamsWithContext(ctx context.Context) *FindConfigFdInitialValueMsParams { + + return &FindConfigFdInitialValueMsParams{ + + Context: ctx, + } +} + +// NewFindConfigFdInitialValueMsParamsWithHTTPClient creates a new FindConfigFdInitialValueMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigFdInitialValueMsParamsWithHTTPClient(client *http.Client) *FindConfigFdInitialValueMsParams { + + return &FindConfigFdInitialValueMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigFdInitialValueMsParams contains all the parameters to send to the API endpoint +for the find config fd initial value ms operation typically these are written to a http.Request +*/ +type FindConfigFdInitialValueMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config fd initial value ms params +func (o *FindConfigFdInitialValueMsParams) WithTimeout(timeout time.Duration) *FindConfigFdInitialValueMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config fd initial value ms params +func (o *FindConfigFdInitialValueMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config fd initial value ms params +func (o *FindConfigFdInitialValueMsParams) WithContext(ctx context.Context) *FindConfigFdInitialValueMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config fd initial value ms params +func (o *FindConfigFdInitialValueMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config fd initial value ms params +func (o *FindConfigFdInitialValueMsParams) WithHTTPClient(client *http.Client) *FindConfigFdInitialValueMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config fd initial value ms params +func (o *FindConfigFdInitialValueMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigFdInitialValueMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_initial_value_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_initial_value_ms_responses.go new file mode 100644 index 00000000000..f9aab81d6d0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_initial_value_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigFdInitialValueMsReader is a Reader for the FindConfigFdInitialValueMs structure. +type FindConfigFdInitialValueMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigFdInitialValueMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigFdInitialValueMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigFdInitialValueMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigFdInitialValueMsOK creates a FindConfigFdInitialValueMsOK with default headers values +func NewFindConfigFdInitialValueMsOK() *FindConfigFdInitialValueMsOK { + return &FindConfigFdInitialValueMsOK{} +} + +/* +FindConfigFdInitialValueMsOK handles this case with default header values. + +Config value +*/ +type FindConfigFdInitialValueMsOK struct { + Payload int64 +} + +func (o *FindConfigFdInitialValueMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigFdInitialValueMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigFdInitialValueMsDefault creates a FindConfigFdInitialValueMsDefault with default headers values +func NewFindConfigFdInitialValueMsDefault(code int) *FindConfigFdInitialValueMsDefault { + return &FindConfigFdInitialValueMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigFdInitialValueMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigFdInitialValueMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config fd initial value ms default response +func (o *FindConfigFdInitialValueMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigFdInitialValueMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigFdInitialValueMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigFdInitialValueMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_max_interval_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_max_interval_ms_parameters.go new file mode 100644 index 00000000000..2c2734af59d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_max_interval_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigFdMaxIntervalMsParams creates a new FindConfigFdMaxIntervalMsParams object +// with the default values initialized. +func NewFindConfigFdMaxIntervalMsParams() *FindConfigFdMaxIntervalMsParams { + + return &FindConfigFdMaxIntervalMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigFdMaxIntervalMsParamsWithTimeout creates a new FindConfigFdMaxIntervalMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigFdMaxIntervalMsParamsWithTimeout(timeout time.Duration) *FindConfigFdMaxIntervalMsParams { + + return &FindConfigFdMaxIntervalMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigFdMaxIntervalMsParamsWithContext creates a new FindConfigFdMaxIntervalMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigFdMaxIntervalMsParamsWithContext(ctx context.Context) *FindConfigFdMaxIntervalMsParams { + + return &FindConfigFdMaxIntervalMsParams{ + + Context: ctx, + } +} + +// NewFindConfigFdMaxIntervalMsParamsWithHTTPClient creates a new FindConfigFdMaxIntervalMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigFdMaxIntervalMsParamsWithHTTPClient(client *http.Client) *FindConfigFdMaxIntervalMsParams { + + return &FindConfigFdMaxIntervalMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigFdMaxIntervalMsParams contains all the parameters to send to the API endpoint +for the find config fd max interval ms operation typically these are written to a http.Request +*/ +type FindConfigFdMaxIntervalMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config fd max interval ms params +func (o *FindConfigFdMaxIntervalMsParams) WithTimeout(timeout time.Duration) *FindConfigFdMaxIntervalMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config fd max interval ms params +func (o *FindConfigFdMaxIntervalMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config fd max interval ms params +func (o *FindConfigFdMaxIntervalMsParams) WithContext(ctx context.Context) *FindConfigFdMaxIntervalMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config fd max interval ms params +func (o *FindConfigFdMaxIntervalMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config fd max interval ms params +func (o *FindConfigFdMaxIntervalMsParams) WithHTTPClient(client *http.Client) *FindConfigFdMaxIntervalMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config fd max interval ms params +func (o *FindConfigFdMaxIntervalMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigFdMaxIntervalMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_max_interval_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_max_interval_ms_responses.go new file mode 100644 index 00000000000..16ef3ee647a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_fd_max_interval_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigFdMaxIntervalMsReader is a Reader for the FindConfigFdMaxIntervalMs structure. +type FindConfigFdMaxIntervalMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigFdMaxIntervalMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigFdMaxIntervalMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigFdMaxIntervalMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigFdMaxIntervalMsOK creates a FindConfigFdMaxIntervalMsOK with default headers values +func NewFindConfigFdMaxIntervalMsOK() *FindConfigFdMaxIntervalMsOK { + return &FindConfigFdMaxIntervalMsOK{} +} + +/* +FindConfigFdMaxIntervalMsOK handles this case with default header values. + +Config value +*/ +type FindConfigFdMaxIntervalMsOK struct { + Payload int64 +} + +func (o *FindConfigFdMaxIntervalMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigFdMaxIntervalMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigFdMaxIntervalMsDefault creates a FindConfigFdMaxIntervalMsDefault with default headers values +func NewFindConfigFdMaxIntervalMsDefault(code int) *FindConfigFdMaxIntervalMsDefault { + return &FindConfigFdMaxIntervalMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigFdMaxIntervalMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigFdMaxIntervalMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config fd max interval ms default response +func (o *FindConfigFdMaxIntervalMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigFdMaxIntervalMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigFdMaxIntervalMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigFdMaxIntervalMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_file_cache_size_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_file_cache_size_in_mb_parameters.go new file mode 100644 index 00000000000..b97422ad7dc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_file_cache_size_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigFileCacheSizeInMbParams creates a new FindConfigFileCacheSizeInMbParams object +// with the default values initialized. +func NewFindConfigFileCacheSizeInMbParams() *FindConfigFileCacheSizeInMbParams { + + return &FindConfigFileCacheSizeInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigFileCacheSizeInMbParamsWithTimeout creates a new FindConfigFileCacheSizeInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigFileCacheSizeInMbParamsWithTimeout(timeout time.Duration) *FindConfigFileCacheSizeInMbParams { + + return &FindConfigFileCacheSizeInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigFileCacheSizeInMbParamsWithContext creates a new FindConfigFileCacheSizeInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigFileCacheSizeInMbParamsWithContext(ctx context.Context) *FindConfigFileCacheSizeInMbParams { + + return &FindConfigFileCacheSizeInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigFileCacheSizeInMbParamsWithHTTPClient creates a new FindConfigFileCacheSizeInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigFileCacheSizeInMbParamsWithHTTPClient(client *http.Client) *FindConfigFileCacheSizeInMbParams { + + return &FindConfigFileCacheSizeInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigFileCacheSizeInMbParams contains all the parameters to send to the API endpoint +for the find config file cache size in mb operation typically these are written to a http.Request +*/ +type FindConfigFileCacheSizeInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config file cache size in mb params +func (o *FindConfigFileCacheSizeInMbParams) WithTimeout(timeout time.Duration) *FindConfigFileCacheSizeInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config file cache size in mb params +func (o *FindConfigFileCacheSizeInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config file cache size in mb params +func (o *FindConfigFileCacheSizeInMbParams) WithContext(ctx context.Context) *FindConfigFileCacheSizeInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config file cache size in mb params +func (o *FindConfigFileCacheSizeInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config file cache size in mb params +func (o *FindConfigFileCacheSizeInMbParams) WithHTTPClient(client *http.Client) *FindConfigFileCacheSizeInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config file cache size in mb params +func (o *FindConfigFileCacheSizeInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigFileCacheSizeInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_file_cache_size_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_file_cache_size_in_mb_responses.go new file mode 100644 index 00000000000..15104627076 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_file_cache_size_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigFileCacheSizeInMbReader is a Reader for the FindConfigFileCacheSizeInMb structure. +type FindConfigFileCacheSizeInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigFileCacheSizeInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigFileCacheSizeInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigFileCacheSizeInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigFileCacheSizeInMbOK creates a FindConfigFileCacheSizeInMbOK with default headers values +func NewFindConfigFileCacheSizeInMbOK() *FindConfigFileCacheSizeInMbOK { + return &FindConfigFileCacheSizeInMbOK{} +} + +/* +FindConfigFileCacheSizeInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigFileCacheSizeInMbOK struct { + Payload int64 +} + +func (o *FindConfigFileCacheSizeInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigFileCacheSizeInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigFileCacheSizeInMbDefault creates a FindConfigFileCacheSizeInMbDefault with default headers values +func NewFindConfigFileCacheSizeInMbDefault(code int) *FindConfigFileCacheSizeInMbDefault { + return &FindConfigFileCacheSizeInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigFileCacheSizeInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigFileCacheSizeInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config file cache size in mb default response +func (o *FindConfigFileCacheSizeInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigFileCacheSizeInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigFileCacheSizeInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigFileCacheSizeInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_enabled_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_enabled_parameters.go new file mode 100644 index 00000000000..85fed76ebd1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_enabled_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigHintedHandoffEnabledParams creates a new FindConfigHintedHandoffEnabledParams object +// with the default values initialized. +func NewFindConfigHintedHandoffEnabledParams() *FindConfigHintedHandoffEnabledParams { + + return &FindConfigHintedHandoffEnabledParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigHintedHandoffEnabledParamsWithTimeout creates a new FindConfigHintedHandoffEnabledParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigHintedHandoffEnabledParamsWithTimeout(timeout time.Duration) *FindConfigHintedHandoffEnabledParams { + + return &FindConfigHintedHandoffEnabledParams{ + + timeout: timeout, + } +} + +// NewFindConfigHintedHandoffEnabledParamsWithContext creates a new FindConfigHintedHandoffEnabledParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigHintedHandoffEnabledParamsWithContext(ctx context.Context) *FindConfigHintedHandoffEnabledParams { + + return &FindConfigHintedHandoffEnabledParams{ + + Context: ctx, + } +} + +// NewFindConfigHintedHandoffEnabledParamsWithHTTPClient creates a new FindConfigHintedHandoffEnabledParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigHintedHandoffEnabledParamsWithHTTPClient(client *http.Client) *FindConfigHintedHandoffEnabledParams { + + return &FindConfigHintedHandoffEnabledParams{ + HTTPClient: client, + } +} + +/* +FindConfigHintedHandoffEnabledParams contains all the parameters to send to the API endpoint +for the find config hinted handoff enabled operation typically these are written to a http.Request +*/ +type FindConfigHintedHandoffEnabledParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config hinted handoff enabled params +func (o *FindConfigHintedHandoffEnabledParams) WithTimeout(timeout time.Duration) *FindConfigHintedHandoffEnabledParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config hinted handoff enabled params +func (o *FindConfigHintedHandoffEnabledParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config hinted handoff enabled params +func (o *FindConfigHintedHandoffEnabledParams) WithContext(ctx context.Context) *FindConfigHintedHandoffEnabledParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config hinted handoff enabled params +func (o *FindConfigHintedHandoffEnabledParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config hinted handoff enabled params +func (o *FindConfigHintedHandoffEnabledParams) WithHTTPClient(client *http.Client) *FindConfigHintedHandoffEnabledParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config hinted handoff enabled params +func (o *FindConfigHintedHandoffEnabledParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigHintedHandoffEnabledParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_enabled_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_enabled_responses.go new file mode 100644 index 00000000000..9e228552b0c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_enabled_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigHintedHandoffEnabledReader is a Reader for the FindConfigHintedHandoffEnabled structure. +type FindConfigHintedHandoffEnabledReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigHintedHandoffEnabledReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigHintedHandoffEnabledOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigHintedHandoffEnabledDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigHintedHandoffEnabledOK creates a FindConfigHintedHandoffEnabledOK with default headers values +func NewFindConfigHintedHandoffEnabledOK() *FindConfigHintedHandoffEnabledOK { + return &FindConfigHintedHandoffEnabledOK{} +} + +/* +FindConfigHintedHandoffEnabledOK handles this case with default header values. + +Config value +*/ +type FindConfigHintedHandoffEnabledOK struct { + Payload string +} + +func (o *FindConfigHintedHandoffEnabledOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigHintedHandoffEnabledOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigHintedHandoffEnabledDefault creates a FindConfigHintedHandoffEnabledDefault with default headers values +func NewFindConfigHintedHandoffEnabledDefault(code int) *FindConfigHintedHandoffEnabledDefault { + return &FindConfigHintedHandoffEnabledDefault{ + _statusCode: code, + } +} + +/* +FindConfigHintedHandoffEnabledDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigHintedHandoffEnabledDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config hinted handoff enabled default response +func (o *FindConfigHintedHandoffEnabledDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigHintedHandoffEnabledDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigHintedHandoffEnabledDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigHintedHandoffEnabledDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_throttle_in_kb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_throttle_in_kb_parameters.go new file mode 100644 index 00000000000..f87c75fc8fd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_throttle_in_kb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigHintedHandoffThrottleInKbParams creates a new FindConfigHintedHandoffThrottleInKbParams object +// with the default values initialized. +func NewFindConfigHintedHandoffThrottleInKbParams() *FindConfigHintedHandoffThrottleInKbParams { + + return &FindConfigHintedHandoffThrottleInKbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigHintedHandoffThrottleInKbParamsWithTimeout creates a new FindConfigHintedHandoffThrottleInKbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigHintedHandoffThrottleInKbParamsWithTimeout(timeout time.Duration) *FindConfigHintedHandoffThrottleInKbParams { + + return &FindConfigHintedHandoffThrottleInKbParams{ + + timeout: timeout, + } +} + +// NewFindConfigHintedHandoffThrottleInKbParamsWithContext creates a new FindConfigHintedHandoffThrottleInKbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigHintedHandoffThrottleInKbParamsWithContext(ctx context.Context) *FindConfigHintedHandoffThrottleInKbParams { + + return &FindConfigHintedHandoffThrottleInKbParams{ + + Context: ctx, + } +} + +// NewFindConfigHintedHandoffThrottleInKbParamsWithHTTPClient creates a new FindConfigHintedHandoffThrottleInKbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigHintedHandoffThrottleInKbParamsWithHTTPClient(client *http.Client) *FindConfigHintedHandoffThrottleInKbParams { + + return &FindConfigHintedHandoffThrottleInKbParams{ + HTTPClient: client, + } +} + +/* +FindConfigHintedHandoffThrottleInKbParams contains all the parameters to send to the API endpoint +for the find config hinted handoff throttle in kb operation typically these are written to a http.Request +*/ +type FindConfigHintedHandoffThrottleInKbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config hinted handoff throttle in kb params +func (o *FindConfigHintedHandoffThrottleInKbParams) WithTimeout(timeout time.Duration) *FindConfigHintedHandoffThrottleInKbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config hinted handoff throttle in kb params +func (o *FindConfigHintedHandoffThrottleInKbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config hinted handoff throttle in kb params +func (o *FindConfigHintedHandoffThrottleInKbParams) WithContext(ctx context.Context) *FindConfigHintedHandoffThrottleInKbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config hinted handoff throttle in kb params +func (o *FindConfigHintedHandoffThrottleInKbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config hinted handoff throttle in kb params +func (o *FindConfigHintedHandoffThrottleInKbParams) WithHTTPClient(client *http.Client) *FindConfigHintedHandoffThrottleInKbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config hinted handoff throttle in kb params +func (o *FindConfigHintedHandoffThrottleInKbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigHintedHandoffThrottleInKbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_throttle_in_kb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_throttle_in_kb_responses.go new file mode 100644 index 00000000000..30bc8094a24 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hinted_handoff_throttle_in_kb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigHintedHandoffThrottleInKbReader is a Reader for the FindConfigHintedHandoffThrottleInKb structure. +type FindConfigHintedHandoffThrottleInKbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigHintedHandoffThrottleInKbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigHintedHandoffThrottleInKbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigHintedHandoffThrottleInKbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigHintedHandoffThrottleInKbOK creates a FindConfigHintedHandoffThrottleInKbOK with default headers values +func NewFindConfigHintedHandoffThrottleInKbOK() *FindConfigHintedHandoffThrottleInKbOK { + return &FindConfigHintedHandoffThrottleInKbOK{} +} + +/* +FindConfigHintedHandoffThrottleInKbOK handles this case with default header values. + +Config value +*/ +type FindConfigHintedHandoffThrottleInKbOK struct { + Payload int64 +} + +func (o *FindConfigHintedHandoffThrottleInKbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigHintedHandoffThrottleInKbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigHintedHandoffThrottleInKbDefault creates a FindConfigHintedHandoffThrottleInKbDefault with default headers values +func NewFindConfigHintedHandoffThrottleInKbDefault(code int) *FindConfigHintedHandoffThrottleInKbDefault { + return &FindConfigHintedHandoffThrottleInKbDefault{ + _statusCode: code, + } +} + +/* +FindConfigHintedHandoffThrottleInKbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigHintedHandoffThrottleInKbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config hinted handoff throttle in kb default response +func (o *FindConfigHintedHandoffThrottleInKbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigHintedHandoffThrottleInKbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigHintedHandoffThrottleInKbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigHintedHandoffThrottleInKbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hints_directory_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hints_directory_parameters.go new file mode 100644 index 00000000000..ef3bf712fd3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hints_directory_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigHintsDirectoryParams creates a new FindConfigHintsDirectoryParams object +// with the default values initialized. +func NewFindConfigHintsDirectoryParams() *FindConfigHintsDirectoryParams { + + return &FindConfigHintsDirectoryParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigHintsDirectoryParamsWithTimeout creates a new FindConfigHintsDirectoryParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigHintsDirectoryParamsWithTimeout(timeout time.Duration) *FindConfigHintsDirectoryParams { + + return &FindConfigHintsDirectoryParams{ + + timeout: timeout, + } +} + +// NewFindConfigHintsDirectoryParamsWithContext creates a new FindConfigHintsDirectoryParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigHintsDirectoryParamsWithContext(ctx context.Context) *FindConfigHintsDirectoryParams { + + return &FindConfigHintsDirectoryParams{ + + Context: ctx, + } +} + +// NewFindConfigHintsDirectoryParamsWithHTTPClient creates a new FindConfigHintsDirectoryParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigHintsDirectoryParamsWithHTTPClient(client *http.Client) *FindConfigHintsDirectoryParams { + + return &FindConfigHintsDirectoryParams{ + HTTPClient: client, + } +} + +/* +FindConfigHintsDirectoryParams contains all the parameters to send to the API endpoint +for the find config hints directory operation typically these are written to a http.Request +*/ +type FindConfigHintsDirectoryParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config hints directory params +func (o *FindConfigHintsDirectoryParams) WithTimeout(timeout time.Duration) *FindConfigHintsDirectoryParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config hints directory params +func (o *FindConfigHintsDirectoryParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config hints directory params +func (o *FindConfigHintsDirectoryParams) WithContext(ctx context.Context) *FindConfigHintsDirectoryParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config hints directory params +func (o *FindConfigHintsDirectoryParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config hints directory params +func (o *FindConfigHintsDirectoryParams) WithHTTPClient(client *http.Client) *FindConfigHintsDirectoryParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config hints directory params +func (o *FindConfigHintsDirectoryParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigHintsDirectoryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hints_directory_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hints_directory_responses.go new file mode 100644 index 00000000000..864679aad7a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_hints_directory_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigHintsDirectoryReader is a Reader for the FindConfigHintsDirectory structure. +type FindConfigHintsDirectoryReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigHintsDirectoryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigHintsDirectoryOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigHintsDirectoryDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigHintsDirectoryOK creates a FindConfigHintsDirectoryOK with default headers values +func NewFindConfigHintsDirectoryOK() *FindConfigHintsDirectoryOK { + return &FindConfigHintsDirectoryOK{} +} + +/* +FindConfigHintsDirectoryOK handles this case with default header values. + +Config value +*/ +type FindConfigHintsDirectoryOK struct { + Payload string +} + +func (o *FindConfigHintsDirectoryOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigHintsDirectoryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigHintsDirectoryDefault creates a FindConfigHintsDirectoryDefault with default headers values +func NewFindConfigHintsDirectoryDefault(code int) *FindConfigHintsDirectoryDefault { + return &FindConfigHintsDirectoryDefault{ + _statusCode: code, + } +} + +/* +FindConfigHintsDirectoryDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigHintsDirectoryDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config hints directory default response +func (o *FindConfigHintsDirectoryDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigHintsDirectoryDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigHintsDirectoryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigHintsDirectoryDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_in_memory_compaction_limit_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_in_memory_compaction_limit_in_mb_parameters.go new file mode 100644 index 00000000000..c5ef4c8989c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_in_memory_compaction_limit_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigInMemoryCompactionLimitInMbParams creates a new FindConfigInMemoryCompactionLimitInMbParams object +// with the default values initialized. +func NewFindConfigInMemoryCompactionLimitInMbParams() *FindConfigInMemoryCompactionLimitInMbParams { + + return &FindConfigInMemoryCompactionLimitInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigInMemoryCompactionLimitInMbParamsWithTimeout creates a new FindConfigInMemoryCompactionLimitInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigInMemoryCompactionLimitInMbParamsWithTimeout(timeout time.Duration) *FindConfigInMemoryCompactionLimitInMbParams { + + return &FindConfigInMemoryCompactionLimitInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigInMemoryCompactionLimitInMbParamsWithContext creates a new FindConfigInMemoryCompactionLimitInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigInMemoryCompactionLimitInMbParamsWithContext(ctx context.Context) *FindConfigInMemoryCompactionLimitInMbParams { + + return &FindConfigInMemoryCompactionLimitInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigInMemoryCompactionLimitInMbParamsWithHTTPClient creates a new FindConfigInMemoryCompactionLimitInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigInMemoryCompactionLimitInMbParamsWithHTTPClient(client *http.Client) *FindConfigInMemoryCompactionLimitInMbParams { + + return &FindConfigInMemoryCompactionLimitInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigInMemoryCompactionLimitInMbParams contains all the parameters to send to the API endpoint +for the find config in memory compaction limit in mb operation typically these are written to a http.Request +*/ +type FindConfigInMemoryCompactionLimitInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config in memory compaction limit in mb params +func (o *FindConfigInMemoryCompactionLimitInMbParams) WithTimeout(timeout time.Duration) *FindConfigInMemoryCompactionLimitInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config in memory compaction limit in mb params +func (o *FindConfigInMemoryCompactionLimitInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config in memory compaction limit in mb params +func (o *FindConfigInMemoryCompactionLimitInMbParams) WithContext(ctx context.Context) *FindConfigInMemoryCompactionLimitInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config in memory compaction limit in mb params +func (o *FindConfigInMemoryCompactionLimitInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config in memory compaction limit in mb params +func (o *FindConfigInMemoryCompactionLimitInMbParams) WithHTTPClient(client *http.Client) *FindConfigInMemoryCompactionLimitInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config in memory compaction limit in mb params +func (o *FindConfigInMemoryCompactionLimitInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigInMemoryCompactionLimitInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_in_memory_compaction_limit_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_in_memory_compaction_limit_in_mb_responses.go new file mode 100644 index 00000000000..6e22d0225cd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_in_memory_compaction_limit_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigInMemoryCompactionLimitInMbReader is a Reader for the FindConfigInMemoryCompactionLimitInMb structure. +type FindConfigInMemoryCompactionLimitInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigInMemoryCompactionLimitInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigInMemoryCompactionLimitInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigInMemoryCompactionLimitInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigInMemoryCompactionLimitInMbOK creates a FindConfigInMemoryCompactionLimitInMbOK with default headers values +func NewFindConfigInMemoryCompactionLimitInMbOK() *FindConfigInMemoryCompactionLimitInMbOK { + return &FindConfigInMemoryCompactionLimitInMbOK{} +} + +/* +FindConfigInMemoryCompactionLimitInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigInMemoryCompactionLimitInMbOK struct { + Payload int64 +} + +func (o *FindConfigInMemoryCompactionLimitInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigInMemoryCompactionLimitInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigInMemoryCompactionLimitInMbDefault creates a FindConfigInMemoryCompactionLimitInMbDefault with default headers values +func NewFindConfigInMemoryCompactionLimitInMbDefault(code int) *FindConfigInMemoryCompactionLimitInMbDefault { + return &FindConfigInMemoryCompactionLimitInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigInMemoryCompactionLimitInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigInMemoryCompactionLimitInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config in memory compaction limit in mb default response +func (o *FindConfigInMemoryCompactionLimitInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigInMemoryCompactionLimitInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigInMemoryCompactionLimitInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigInMemoryCompactionLimitInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_incremental_backups_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_incremental_backups_parameters.go new file mode 100644 index 00000000000..b55f93f0a54 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_incremental_backups_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigIncrementalBackupsParams creates a new FindConfigIncrementalBackupsParams object +// with the default values initialized. +func NewFindConfigIncrementalBackupsParams() *FindConfigIncrementalBackupsParams { + + return &FindConfigIncrementalBackupsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigIncrementalBackupsParamsWithTimeout creates a new FindConfigIncrementalBackupsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigIncrementalBackupsParamsWithTimeout(timeout time.Duration) *FindConfigIncrementalBackupsParams { + + return &FindConfigIncrementalBackupsParams{ + + timeout: timeout, + } +} + +// NewFindConfigIncrementalBackupsParamsWithContext creates a new FindConfigIncrementalBackupsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigIncrementalBackupsParamsWithContext(ctx context.Context) *FindConfigIncrementalBackupsParams { + + return &FindConfigIncrementalBackupsParams{ + + Context: ctx, + } +} + +// NewFindConfigIncrementalBackupsParamsWithHTTPClient creates a new FindConfigIncrementalBackupsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigIncrementalBackupsParamsWithHTTPClient(client *http.Client) *FindConfigIncrementalBackupsParams { + + return &FindConfigIncrementalBackupsParams{ + HTTPClient: client, + } +} + +/* +FindConfigIncrementalBackupsParams contains all the parameters to send to the API endpoint +for the find config incremental backups operation typically these are written to a http.Request +*/ +type FindConfigIncrementalBackupsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config incremental backups params +func (o *FindConfigIncrementalBackupsParams) WithTimeout(timeout time.Duration) *FindConfigIncrementalBackupsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config incremental backups params +func (o *FindConfigIncrementalBackupsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config incremental backups params +func (o *FindConfigIncrementalBackupsParams) WithContext(ctx context.Context) *FindConfigIncrementalBackupsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config incremental backups params +func (o *FindConfigIncrementalBackupsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config incremental backups params +func (o *FindConfigIncrementalBackupsParams) WithHTTPClient(client *http.Client) *FindConfigIncrementalBackupsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config incremental backups params +func (o *FindConfigIncrementalBackupsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigIncrementalBackupsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_incremental_backups_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_incremental_backups_responses.go new file mode 100644 index 00000000000..28e8d8f8a3d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_incremental_backups_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigIncrementalBackupsReader is a Reader for the FindConfigIncrementalBackups structure. +type FindConfigIncrementalBackupsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigIncrementalBackupsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigIncrementalBackupsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigIncrementalBackupsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigIncrementalBackupsOK creates a FindConfigIncrementalBackupsOK with default headers values +func NewFindConfigIncrementalBackupsOK() *FindConfigIncrementalBackupsOK { + return &FindConfigIncrementalBackupsOK{} +} + +/* +FindConfigIncrementalBackupsOK handles this case with default header values. + +Config value +*/ +type FindConfigIncrementalBackupsOK struct { + Payload bool +} + +func (o *FindConfigIncrementalBackupsOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigIncrementalBackupsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigIncrementalBackupsDefault creates a FindConfigIncrementalBackupsDefault with default headers values +func NewFindConfigIncrementalBackupsDefault(code int) *FindConfigIncrementalBackupsDefault { + return &FindConfigIncrementalBackupsDefault{ + _statusCode: code, + } +} + +/* +FindConfigIncrementalBackupsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigIncrementalBackupsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config incremental backups default response +func (o *FindConfigIncrementalBackupsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigIncrementalBackupsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigIncrementalBackupsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigIncrementalBackupsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_capacity_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_capacity_in_mb_parameters.go new file mode 100644 index 00000000000..c49cb4e6e77 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_capacity_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigIndexSummaryCapacityInMbParams creates a new FindConfigIndexSummaryCapacityInMbParams object +// with the default values initialized. +func NewFindConfigIndexSummaryCapacityInMbParams() *FindConfigIndexSummaryCapacityInMbParams { + + return &FindConfigIndexSummaryCapacityInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigIndexSummaryCapacityInMbParamsWithTimeout creates a new FindConfigIndexSummaryCapacityInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigIndexSummaryCapacityInMbParamsWithTimeout(timeout time.Duration) *FindConfigIndexSummaryCapacityInMbParams { + + return &FindConfigIndexSummaryCapacityInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigIndexSummaryCapacityInMbParamsWithContext creates a new FindConfigIndexSummaryCapacityInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigIndexSummaryCapacityInMbParamsWithContext(ctx context.Context) *FindConfigIndexSummaryCapacityInMbParams { + + return &FindConfigIndexSummaryCapacityInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigIndexSummaryCapacityInMbParamsWithHTTPClient creates a new FindConfigIndexSummaryCapacityInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigIndexSummaryCapacityInMbParamsWithHTTPClient(client *http.Client) *FindConfigIndexSummaryCapacityInMbParams { + + return &FindConfigIndexSummaryCapacityInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigIndexSummaryCapacityInMbParams contains all the parameters to send to the API endpoint +for the find config index summary capacity in mb operation typically these are written to a http.Request +*/ +type FindConfigIndexSummaryCapacityInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config index summary capacity in mb params +func (o *FindConfigIndexSummaryCapacityInMbParams) WithTimeout(timeout time.Duration) *FindConfigIndexSummaryCapacityInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config index summary capacity in mb params +func (o *FindConfigIndexSummaryCapacityInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config index summary capacity in mb params +func (o *FindConfigIndexSummaryCapacityInMbParams) WithContext(ctx context.Context) *FindConfigIndexSummaryCapacityInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config index summary capacity in mb params +func (o *FindConfigIndexSummaryCapacityInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config index summary capacity in mb params +func (o *FindConfigIndexSummaryCapacityInMbParams) WithHTTPClient(client *http.Client) *FindConfigIndexSummaryCapacityInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config index summary capacity in mb params +func (o *FindConfigIndexSummaryCapacityInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigIndexSummaryCapacityInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_capacity_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_capacity_in_mb_responses.go new file mode 100644 index 00000000000..d6d6e40fc3f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_capacity_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigIndexSummaryCapacityInMbReader is a Reader for the FindConfigIndexSummaryCapacityInMb structure. +type FindConfigIndexSummaryCapacityInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigIndexSummaryCapacityInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigIndexSummaryCapacityInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigIndexSummaryCapacityInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigIndexSummaryCapacityInMbOK creates a FindConfigIndexSummaryCapacityInMbOK with default headers values +func NewFindConfigIndexSummaryCapacityInMbOK() *FindConfigIndexSummaryCapacityInMbOK { + return &FindConfigIndexSummaryCapacityInMbOK{} +} + +/* +FindConfigIndexSummaryCapacityInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigIndexSummaryCapacityInMbOK struct { + Payload int64 +} + +func (o *FindConfigIndexSummaryCapacityInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigIndexSummaryCapacityInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigIndexSummaryCapacityInMbDefault creates a FindConfigIndexSummaryCapacityInMbDefault with default headers values +func NewFindConfigIndexSummaryCapacityInMbDefault(code int) *FindConfigIndexSummaryCapacityInMbDefault { + return &FindConfigIndexSummaryCapacityInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigIndexSummaryCapacityInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigIndexSummaryCapacityInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config index summary capacity in mb default response +func (o *FindConfigIndexSummaryCapacityInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigIndexSummaryCapacityInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigIndexSummaryCapacityInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigIndexSummaryCapacityInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_resize_interval_in_minutes_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_resize_interval_in_minutes_parameters.go new file mode 100644 index 00000000000..f038617ef97 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_resize_interval_in_minutes_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigIndexSummaryResizeIntervalInMinutesParams creates a new FindConfigIndexSummaryResizeIntervalInMinutesParams object +// with the default values initialized. +func NewFindConfigIndexSummaryResizeIntervalInMinutesParams() *FindConfigIndexSummaryResizeIntervalInMinutesParams { + + return &FindConfigIndexSummaryResizeIntervalInMinutesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigIndexSummaryResizeIntervalInMinutesParamsWithTimeout creates a new FindConfigIndexSummaryResizeIntervalInMinutesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigIndexSummaryResizeIntervalInMinutesParamsWithTimeout(timeout time.Duration) *FindConfigIndexSummaryResizeIntervalInMinutesParams { + + return &FindConfigIndexSummaryResizeIntervalInMinutesParams{ + + timeout: timeout, + } +} + +// NewFindConfigIndexSummaryResizeIntervalInMinutesParamsWithContext creates a new FindConfigIndexSummaryResizeIntervalInMinutesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigIndexSummaryResizeIntervalInMinutesParamsWithContext(ctx context.Context) *FindConfigIndexSummaryResizeIntervalInMinutesParams { + + return &FindConfigIndexSummaryResizeIntervalInMinutesParams{ + + Context: ctx, + } +} + +// NewFindConfigIndexSummaryResizeIntervalInMinutesParamsWithHTTPClient creates a new FindConfigIndexSummaryResizeIntervalInMinutesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigIndexSummaryResizeIntervalInMinutesParamsWithHTTPClient(client *http.Client) *FindConfigIndexSummaryResizeIntervalInMinutesParams { + + return &FindConfigIndexSummaryResizeIntervalInMinutesParams{ + HTTPClient: client, + } +} + +/* +FindConfigIndexSummaryResizeIntervalInMinutesParams contains all the parameters to send to the API endpoint +for the find config index summary resize interval in minutes operation typically these are written to a http.Request +*/ +type FindConfigIndexSummaryResizeIntervalInMinutesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config index summary resize interval in minutes params +func (o *FindConfigIndexSummaryResizeIntervalInMinutesParams) WithTimeout(timeout time.Duration) *FindConfigIndexSummaryResizeIntervalInMinutesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config index summary resize interval in minutes params +func (o *FindConfigIndexSummaryResizeIntervalInMinutesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config index summary resize interval in minutes params +func (o *FindConfigIndexSummaryResizeIntervalInMinutesParams) WithContext(ctx context.Context) *FindConfigIndexSummaryResizeIntervalInMinutesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config index summary resize interval in minutes params +func (o *FindConfigIndexSummaryResizeIntervalInMinutesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config index summary resize interval in minutes params +func (o *FindConfigIndexSummaryResizeIntervalInMinutesParams) WithHTTPClient(client *http.Client) *FindConfigIndexSummaryResizeIntervalInMinutesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config index summary resize interval in minutes params +func (o *FindConfigIndexSummaryResizeIntervalInMinutesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigIndexSummaryResizeIntervalInMinutesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_resize_interval_in_minutes_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_resize_interval_in_minutes_responses.go new file mode 100644 index 00000000000..e15812013dc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_index_summary_resize_interval_in_minutes_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigIndexSummaryResizeIntervalInMinutesReader is a Reader for the FindConfigIndexSummaryResizeIntervalInMinutes structure. +type FindConfigIndexSummaryResizeIntervalInMinutesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigIndexSummaryResizeIntervalInMinutesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigIndexSummaryResizeIntervalInMinutesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigIndexSummaryResizeIntervalInMinutesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigIndexSummaryResizeIntervalInMinutesOK creates a FindConfigIndexSummaryResizeIntervalInMinutesOK with default headers values +func NewFindConfigIndexSummaryResizeIntervalInMinutesOK() *FindConfigIndexSummaryResizeIntervalInMinutesOK { + return &FindConfigIndexSummaryResizeIntervalInMinutesOK{} +} + +/* +FindConfigIndexSummaryResizeIntervalInMinutesOK handles this case with default header values. + +Config value +*/ +type FindConfigIndexSummaryResizeIntervalInMinutesOK struct { + Payload int64 +} + +func (o *FindConfigIndexSummaryResizeIntervalInMinutesOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigIndexSummaryResizeIntervalInMinutesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigIndexSummaryResizeIntervalInMinutesDefault creates a FindConfigIndexSummaryResizeIntervalInMinutesDefault with default headers values +func NewFindConfigIndexSummaryResizeIntervalInMinutesDefault(code int) *FindConfigIndexSummaryResizeIntervalInMinutesDefault { + return &FindConfigIndexSummaryResizeIntervalInMinutesDefault{ + _statusCode: code, + } +} + +/* +FindConfigIndexSummaryResizeIntervalInMinutesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigIndexSummaryResizeIntervalInMinutesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config index summary resize interval in minutes default response +func (o *FindConfigIndexSummaryResizeIntervalInMinutesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigIndexSummaryResizeIntervalInMinutesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigIndexSummaryResizeIntervalInMinutesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigIndexSummaryResizeIntervalInMinutesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_initial_token_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_initial_token_parameters.go new file mode 100644 index 00000000000..037ed282129 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_initial_token_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigInitialTokenParams creates a new FindConfigInitialTokenParams object +// with the default values initialized. +func NewFindConfigInitialTokenParams() *FindConfigInitialTokenParams { + + return &FindConfigInitialTokenParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigInitialTokenParamsWithTimeout creates a new FindConfigInitialTokenParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigInitialTokenParamsWithTimeout(timeout time.Duration) *FindConfigInitialTokenParams { + + return &FindConfigInitialTokenParams{ + + timeout: timeout, + } +} + +// NewFindConfigInitialTokenParamsWithContext creates a new FindConfigInitialTokenParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigInitialTokenParamsWithContext(ctx context.Context) *FindConfigInitialTokenParams { + + return &FindConfigInitialTokenParams{ + + Context: ctx, + } +} + +// NewFindConfigInitialTokenParamsWithHTTPClient creates a new FindConfigInitialTokenParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigInitialTokenParamsWithHTTPClient(client *http.Client) *FindConfigInitialTokenParams { + + return &FindConfigInitialTokenParams{ + HTTPClient: client, + } +} + +/* +FindConfigInitialTokenParams contains all the parameters to send to the API endpoint +for the find config initial token operation typically these are written to a http.Request +*/ +type FindConfigInitialTokenParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config initial token params +func (o *FindConfigInitialTokenParams) WithTimeout(timeout time.Duration) *FindConfigInitialTokenParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config initial token params +func (o *FindConfigInitialTokenParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config initial token params +func (o *FindConfigInitialTokenParams) WithContext(ctx context.Context) *FindConfigInitialTokenParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config initial token params +func (o *FindConfigInitialTokenParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config initial token params +func (o *FindConfigInitialTokenParams) WithHTTPClient(client *http.Client) *FindConfigInitialTokenParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config initial token params +func (o *FindConfigInitialTokenParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigInitialTokenParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_initial_token_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_initial_token_responses.go new file mode 100644 index 00000000000..f2ea2c7bc4b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_initial_token_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigInitialTokenReader is a Reader for the FindConfigInitialToken structure. +type FindConfigInitialTokenReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigInitialTokenReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigInitialTokenOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigInitialTokenDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigInitialTokenOK creates a FindConfigInitialTokenOK with default headers values +func NewFindConfigInitialTokenOK() *FindConfigInitialTokenOK { + return &FindConfigInitialTokenOK{} +} + +/* +FindConfigInitialTokenOK handles this case with default header values. + +Config value +*/ +type FindConfigInitialTokenOK struct { + Payload string +} + +func (o *FindConfigInitialTokenOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigInitialTokenOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigInitialTokenDefault creates a FindConfigInitialTokenDefault with default headers values +func NewFindConfigInitialTokenDefault(code int) *FindConfigInitialTokenDefault { + return &FindConfigInitialTokenDefault{ + _statusCode: code, + } +} + +/* +FindConfigInitialTokenDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigInitialTokenDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config initial token default response +func (o *FindConfigInitialTokenDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigInitialTokenDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigInitialTokenDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigInitialTokenDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_stream_throughput_outbound_megabits_per_sec_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_stream_throughput_outbound_megabits_per_sec_parameters.go new file mode 100644 index 00000000000..8706a0ab78a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_stream_throughput_outbound_megabits_per_sec_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams creates a new FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams object +// with the default values initialized. +func NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams() *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams { + + return &FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecParamsWithTimeout creates a new FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecParamsWithTimeout(timeout time.Duration) *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams { + + return &FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams{ + + timeout: timeout, + } +} + +// NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecParamsWithContext creates a new FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecParamsWithContext(ctx context.Context) *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams { + + return &FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams{ + + Context: ctx, + } +} + +// NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecParamsWithHTTPClient creates a new FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecParamsWithHTTPClient(client *http.Client) *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams { + + return &FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams{ + HTTPClient: client, + } +} + +/* +FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams contains all the parameters to send to the API endpoint +for the find config inter dc stream throughput outbound megabits per sec operation typically these are written to a http.Request +*/ +type FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config inter dc stream throughput outbound megabits per sec params +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams) WithTimeout(timeout time.Duration) *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config inter dc stream throughput outbound megabits per sec params +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config inter dc stream throughput outbound megabits per sec params +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams) WithContext(ctx context.Context) *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config inter dc stream throughput outbound megabits per sec params +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config inter dc stream throughput outbound megabits per sec params +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams) WithHTTPClient(client *http.Client) *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config inter dc stream throughput outbound megabits per sec params +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_stream_throughput_outbound_megabits_per_sec_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_stream_throughput_outbound_megabits_per_sec_responses.go new file mode 100644 index 00000000000..da132f051d9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_stream_throughput_outbound_megabits_per_sec_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigInterDcStreamThroughputOutboundMegabitsPerSecReader is a Reader for the FindConfigInterDcStreamThroughputOutboundMegabitsPerSec structure. +type FindConfigInterDcStreamThroughputOutboundMegabitsPerSecReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK creates a FindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK with default headers values +func NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK() *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK { + return &FindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK{} +} + +/* +FindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK handles this case with default header values. + +Config value +*/ +type FindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK struct { + Payload int64 +} + +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault creates a FindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault with default headers values +func NewFindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault(code int) *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault { + return &FindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault{ + _statusCode: code, + } +} + +/* +FindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config inter dc stream throughput outbound megabits per sec default response +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigInterDcStreamThroughputOutboundMegabitsPerSecDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_tcp_nodelay_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_tcp_nodelay_parameters.go new file mode 100644 index 00000000000..d6b6054f22b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_tcp_nodelay_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigInterDcTCPNodelayParams creates a new FindConfigInterDcTCPNodelayParams object +// with the default values initialized. +func NewFindConfigInterDcTCPNodelayParams() *FindConfigInterDcTCPNodelayParams { + + return &FindConfigInterDcTCPNodelayParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigInterDcTCPNodelayParamsWithTimeout creates a new FindConfigInterDcTCPNodelayParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigInterDcTCPNodelayParamsWithTimeout(timeout time.Duration) *FindConfigInterDcTCPNodelayParams { + + return &FindConfigInterDcTCPNodelayParams{ + + timeout: timeout, + } +} + +// NewFindConfigInterDcTCPNodelayParamsWithContext creates a new FindConfigInterDcTCPNodelayParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigInterDcTCPNodelayParamsWithContext(ctx context.Context) *FindConfigInterDcTCPNodelayParams { + + return &FindConfigInterDcTCPNodelayParams{ + + Context: ctx, + } +} + +// NewFindConfigInterDcTCPNodelayParamsWithHTTPClient creates a new FindConfigInterDcTCPNodelayParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigInterDcTCPNodelayParamsWithHTTPClient(client *http.Client) *FindConfigInterDcTCPNodelayParams { + + return &FindConfigInterDcTCPNodelayParams{ + HTTPClient: client, + } +} + +/* +FindConfigInterDcTCPNodelayParams contains all the parameters to send to the API endpoint +for the find config inter dc tcp nodelay operation typically these are written to a http.Request +*/ +type FindConfigInterDcTCPNodelayParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config inter dc tcp nodelay params +func (o *FindConfigInterDcTCPNodelayParams) WithTimeout(timeout time.Duration) *FindConfigInterDcTCPNodelayParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config inter dc tcp nodelay params +func (o *FindConfigInterDcTCPNodelayParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config inter dc tcp nodelay params +func (o *FindConfigInterDcTCPNodelayParams) WithContext(ctx context.Context) *FindConfigInterDcTCPNodelayParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config inter dc tcp nodelay params +func (o *FindConfigInterDcTCPNodelayParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config inter dc tcp nodelay params +func (o *FindConfigInterDcTCPNodelayParams) WithHTTPClient(client *http.Client) *FindConfigInterDcTCPNodelayParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config inter dc tcp nodelay params +func (o *FindConfigInterDcTCPNodelayParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigInterDcTCPNodelayParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_tcp_nodelay_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_tcp_nodelay_responses.go new file mode 100644 index 00000000000..c2c17e3b948 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_inter_dc_tcp_nodelay_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigInterDcTCPNodelayReader is a Reader for the FindConfigInterDcTCPNodelay structure. +type FindConfigInterDcTCPNodelayReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigInterDcTCPNodelayReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigInterDcTCPNodelayOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigInterDcTCPNodelayDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigInterDcTCPNodelayOK creates a FindConfigInterDcTCPNodelayOK with default headers values +func NewFindConfigInterDcTCPNodelayOK() *FindConfigInterDcTCPNodelayOK { + return &FindConfigInterDcTCPNodelayOK{} +} + +/* +FindConfigInterDcTCPNodelayOK handles this case with default header values. + +Config value +*/ +type FindConfigInterDcTCPNodelayOK struct { + Payload bool +} + +func (o *FindConfigInterDcTCPNodelayOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigInterDcTCPNodelayOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigInterDcTCPNodelayDefault creates a FindConfigInterDcTCPNodelayDefault with default headers values +func NewFindConfigInterDcTCPNodelayDefault(code int) *FindConfigInterDcTCPNodelayDefault { + return &FindConfigInterDcTCPNodelayDefault{ + _statusCode: code, + } +} + +/* +FindConfigInterDcTCPNodelayDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigInterDcTCPNodelayDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config inter dc tcp nodelay default response +func (o *FindConfigInterDcTCPNodelayDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigInterDcTCPNodelayDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigInterDcTCPNodelayDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigInterDcTCPNodelayDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_authenticator_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_authenticator_parameters.go new file mode 100644 index 00000000000..28ced9e7bbb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_authenticator_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigInternodeAuthenticatorParams creates a new FindConfigInternodeAuthenticatorParams object +// with the default values initialized. +func NewFindConfigInternodeAuthenticatorParams() *FindConfigInternodeAuthenticatorParams { + + return &FindConfigInternodeAuthenticatorParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigInternodeAuthenticatorParamsWithTimeout creates a new FindConfigInternodeAuthenticatorParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigInternodeAuthenticatorParamsWithTimeout(timeout time.Duration) *FindConfigInternodeAuthenticatorParams { + + return &FindConfigInternodeAuthenticatorParams{ + + timeout: timeout, + } +} + +// NewFindConfigInternodeAuthenticatorParamsWithContext creates a new FindConfigInternodeAuthenticatorParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigInternodeAuthenticatorParamsWithContext(ctx context.Context) *FindConfigInternodeAuthenticatorParams { + + return &FindConfigInternodeAuthenticatorParams{ + + Context: ctx, + } +} + +// NewFindConfigInternodeAuthenticatorParamsWithHTTPClient creates a new FindConfigInternodeAuthenticatorParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigInternodeAuthenticatorParamsWithHTTPClient(client *http.Client) *FindConfigInternodeAuthenticatorParams { + + return &FindConfigInternodeAuthenticatorParams{ + HTTPClient: client, + } +} + +/* +FindConfigInternodeAuthenticatorParams contains all the parameters to send to the API endpoint +for the find config internode authenticator operation typically these are written to a http.Request +*/ +type FindConfigInternodeAuthenticatorParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config internode authenticator params +func (o *FindConfigInternodeAuthenticatorParams) WithTimeout(timeout time.Duration) *FindConfigInternodeAuthenticatorParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config internode authenticator params +func (o *FindConfigInternodeAuthenticatorParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config internode authenticator params +func (o *FindConfigInternodeAuthenticatorParams) WithContext(ctx context.Context) *FindConfigInternodeAuthenticatorParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config internode authenticator params +func (o *FindConfigInternodeAuthenticatorParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config internode authenticator params +func (o *FindConfigInternodeAuthenticatorParams) WithHTTPClient(client *http.Client) *FindConfigInternodeAuthenticatorParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config internode authenticator params +func (o *FindConfigInternodeAuthenticatorParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigInternodeAuthenticatorParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_authenticator_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_authenticator_responses.go new file mode 100644 index 00000000000..c5ff359f56b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_authenticator_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigInternodeAuthenticatorReader is a Reader for the FindConfigInternodeAuthenticator structure. +type FindConfigInternodeAuthenticatorReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigInternodeAuthenticatorReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigInternodeAuthenticatorOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigInternodeAuthenticatorDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigInternodeAuthenticatorOK creates a FindConfigInternodeAuthenticatorOK with default headers values +func NewFindConfigInternodeAuthenticatorOK() *FindConfigInternodeAuthenticatorOK { + return &FindConfigInternodeAuthenticatorOK{} +} + +/* +FindConfigInternodeAuthenticatorOK handles this case with default header values. + +Config value +*/ +type FindConfigInternodeAuthenticatorOK struct { + Payload string +} + +func (o *FindConfigInternodeAuthenticatorOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigInternodeAuthenticatorOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigInternodeAuthenticatorDefault creates a FindConfigInternodeAuthenticatorDefault with default headers values +func NewFindConfigInternodeAuthenticatorDefault(code int) *FindConfigInternodeAuthenticatorDefault { + return &FindConfigInternodeAuthenticatorDefault{ + _statusCode: code, + } +} + +/* +FindConfigInternodeAuthenticatorDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigInternodeAuthenticatorDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config internode authenticator default response +func (o *FindConfigInternodeAuthenticatorDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigInternodeAuthenticatorDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigInternodeAuthenticatorDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigInternodeAuthenticatorDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_compression_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_compression_parameters.go new file mode 100644 index 00000000000..39e2f75f4ca --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_compression_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigInternodeCompressionParams creates a new FindConfigInternodeCompressionParams object +// with the default values initialized. +func NewFindConfigInternodeCompressionParams() *FindConfigInternodeCompressionParams { + + return &FindConfigInternodeCompressionParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigInternodeCompressionParamsWithTimeout creates a new FindConfigInternodeCompressionParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigInternodeCompressionParamsWithTimeout(timeout time.Duration) *FindConfigInternodeCompressionParams { + + return &FindConfigInternodeCompressionParams{ + + timeout: timeout, + } +} + +// NewFindConfigInternodeCompressionParamsWithContext creates a new FindConfigInternodeCompressionParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigInternodeCompressionParamsWithContext(ctx context.Context) *FindConfigInternodeCompressionParams { + + return &FindConfigInternodeCompressionParams{ + + Context: ctx, + } +} + +// NewFindConfigInternodeCompressionParamsWithHTTPClient creates a new FindConfigInternodeCompressionParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigInternodeCompressionParamsWithHTTPClient(client *http.Client) *FindConfigInternodeCompressionParams { + + return &FindConfigInternodeCompressionParams{ + HTTPClient: client, + } +} + +/* +FindConfigInternodeCompressionParams contains all the parameters to send to the API endpoint +for the find config internode compression operation typically these are written to a http.Request +*/ +type FindConfigInternodeCompressionParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config internode compression params +func (o *FindConfigInternodeCompressionParams) WithTimeout(timeout time.Duration) *FindConfigInternodeCompressionParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config internode compression params +func (o *FindConfigInternodeCompressionParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config internode compression params +func (o *FindConfigInternodeCompressionParams) WithContext(ctx context.Context) *FindConfigInternodeCompressionParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config internode compression params +func (o *FindConfigInternodeCompressionParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config internode compression params +func (o *FindConfigInternodeCompressionParams) WithHTTPClient(client *http.Client) *FindConfigInternodeCompressionParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config internode compression params +func (o *FindConfigInternodeCompressionParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigInternodeCompressionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_compression_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_compression_responses.go new file mode 100644 index 00000000000..64ec3572805 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_compression_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigInternodeCompressionReader is a Reader for the FindConfigInternodeCompression structure. +type FindConfigInternodeCompressionReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigInternodeCompressionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigInternodeCompressionOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigInternodeCompressionDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigInternodeCompressionOK creates a FindConfigInternodeCompressionOK with default headers values +func NewFindConfigInternodeCompressionOK() *FindConfigInternodeCompressionOK { + return &FindConfigInternodeCompressionOK{} +} + +/* +FindConfigInternodeCompressionOK handles this case with default header values. + +Config value +*/ +type FindConfigInternodeCompressionOK struct { + Payload string +} + +func (o *FindConfigInternodeCompressionOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigInternodeCompressionOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigInternodeCompressionDefault creates a FindConfigInternodeCompressionDefault with default headers values +func NewFindConfigInternodeCompressionDefault(code int) *FindConfigInternodeCompressionDefault { + return &FindConfigInternodeCompressionDefault{ + _statusCode: code, + } +} + +/* +FindConfigInternodeCompressionDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigInternodeCompressionDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config internode compression default response +func (o *FindConfigInternodeCompressionDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigInternodeCompressionDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigInternodeCompressionDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigInternodeCompressionDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_recv_buff_size_in_bytes_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_recv_buff_size_in_bytes_parameters.go new file mode 100644 index 00000000000..34e10cf8d04 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_recv_buff_size_in_bytes_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigInternodeRecvBuffSizeInBytesParams creates a new FindConfigInternodeRecvBuffSizeInBytesParams object +// with the default values initialized. +func NewFindConfigInternodeRecvBuffSizeInBytesParams() *FindConfigInternodeRecvBuffSizeInBytesParams { + + return &FindConfigInternodeRecvBuffSizeInBytesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigInternodeRecvBuffSizeInBytesParamsWithTimeout creates a new FindConfigInternodeRecvBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigInternodeRecvBuffSizeInBytesParamsWithTimeout(timeout time.Duration) *FindConfigInternodeRecvBuffSizeInBytesParams { + + return &FindConfigInternodeRecvBuffSizeInBytesParams{ + + timeout: timeout, + } +} + +// NewFindConfigInternodeRecvBuffSizeInBytesParamsWithContext creates a new FindConfigInternodeRecvBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigInternodeRecvBuffSizeInBytesParamsWithContext(ctx context.Context) *FindConfigInternodeRecvBuffSizeInBytesParams { + + return &FindConfigInternodeRecvBuffSizeInBytesParams{ + + Context: ctx, + } +} + +// NewFindConfigInternodeRecvBuffSizeInBytesParamsWithHTTPClient creates a new FindConfigInternodeRecvBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigInternodeRecvBuffSizeInBytesParamsWithHTTPClient(client *http.Client) *FindConfigInternodeRecvBuffSizeInBytesParams { + + return &FindConfigInternodeRecvBuffSizeInBytesParams{ + HTTPClient: client, + } +} + +/* +FindConfigInternodeRecvBuffSizeInBytesParams contains all the parameters to send to the API endpoint +for the find config internode recv buff size in bytes operation typically these are written to a http.Request +*/ +type FindConfigInternodeRecvBuffSizeInBytesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config internode recv buff size in bytes params +func (o *FindConfigInternodeRecvBuffSizeInBytesParams) WithTimeout(timeout time.Duration) *FindConfigInternodeRecvBuffSizeInBytesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config internode recv buff size in bytes params +func (o *FindConfigInternodeRecvBuffSizeInBytesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config internode recv buff size in bytes params +func (o *FindConfigInternodeRecvBuffSizeInBytesParams) WithContext(ctx context.Context) *FindConfigInternodeRecvBuffSizeInBytesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config internode recv buff size in bytes params +func (o *FindConfigInternodeRecvBuffSizeInBytesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config internode recv buff size in bytes params +func (o *FindConfigInternodeRecvBuffSizeInBytesParams) WithHTTPClient(client *http.Client) *FindConfigInternodeRecvBuffSizeInBytesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config internode recv buff size in bytes params +func (o *FindConfigInternodeRecvBuffSizeInBytesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigInternodeRecvBuffSizeInBytesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_recv_buff_size_in_bytes_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_recv_buff_size_in_bytes_responses.go new file mode 100644 index 00000000000..4a6627975d6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_recv_buff_size_in_bytes_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigInternodeRecvBuffSizeInBytesReader is a Reader for the FindConfigInternodeRecvBuffSizeInBytes structure. +type FindConfigInternodeRecvBuffSizeInBytesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigInternodeRecvBuffSizeInBytesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigInternodeRecvBuffSizeInBytesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigInternodeRecvBuffSizeInBytesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigInternodeRecvBuffSizeInBytesOK creates a FindConfigInternodeRecvBuffSizeInBytesOK with default headers values +func NewFindConfigInternodeRecvBuffSizeInBytesOK() *FindConfigInternodeRecvBuffSizeInBytesOK { + return &FindConfigInternodeRecvBuffSizeInBytesOK{} +} + +/* +FindConfigInternodeRecvBuffSizeInBytesOK handles this case with default header values. + +Config value +*/ +type FindConfigInternodeRecvBuffSizeInBytesOK struct { + Payload int64 +} + +func (o *FindConfigInternodeRecvBuffSizeInBytesOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigInternodeRecvBuffSizeInBytesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigInternodeRecvBuffSizeInBytesDefault creates a FindConfigInternodeRecvBuffSizeInBytesDefault with default headers values +func NewFindConfigInternodeRecvBuffSizeInBytesDefault(code int) *FindConfigInternodeRecvBuffSizeInBytesDefault { + return &FindConfigInternodeRecvBuffSizeInBytesDefault{ + _statusCode: code, + } +} + +/* +FindConfigInternodeRecvBuffSizeInBytesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigInternodeRecvBuffSizeInBytesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config internode recv buff size in bytes default response +func (o *FindConfigInternodeRecvBuffSizeInBytesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigInternodeRecvBuffSizeInBytesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigInternodeRecvBuffSizeInBytesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigInternodeRecvBuffSizeInBytesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_send_buff_size_in_bytes_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_send_buff_size_in_bytes_parameters.go new file mode 100644 index 00000000000..37c24aa22c5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_send_buff_size_in_bytes_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigInternodeSendBuffSizeInBytesParams creates a new FindConfigInternodeSendBuffSizeInBytesParams object +// with the default values initialized. +func NewFindConfigInternodeSendBuffSizeInBytesParams() *FindConfigInternodeSendBuffSizeInBytesParams { + + return &FindConfigInternodeSendBuffSizeInBytesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigInternodeSendBuffSizeInBytesParamsWithTimeout creates a new FindConfigInternodeSendBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigInternodeSendBuffSizeInBytesParamsWithTimeout(timeout time.Duration) *FindConfigInternodeSendBuffSizeInBytesParams { + + return &FindConfigInternodeSendBuffSizeInBytesParams{ + + timeout: timeout, + } +} + +// NewFindConfigInternodeSendBuffSizeInBytesParamsWithContext creates a new FindConfigInternodeSendBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigInternodeSendBuffSizeInBytesParamsWithContext(ctx context.Context) *FindConfigInternodeSendBuffSizeInBytesParams { + + return &FindConfigInternodeSendBuffSizeInBytesParams{ + + Context: ctx, + } +} + +// NewFindConfigInternodeSendBuffSizeInBytesParamsWithHTTPClient creates a new FindConfigInternodeSendBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigInternodeSendBuffSizeInBytesParamsWithHTTPClient(client *http.Client) *FindConfigInternodeSendBuffSizeInBytesParams { + + return &FindConfigInternodeSendBuffSizeInBytesParams{ + HTTPClient: client, + } +} + +/* +FindConfigInternodeSendBuffSizeInBytesParams contains all the parameters to send to the API endpoint +for the find config internode send buff size in bytes operation typically these are written to a http.Request +*/ +type FindConfigInternodeSendBuffSizeInBytesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config internode send buff size in bytes params +func (o *FindConfigInternodeSendBuffSizeInBytesParams) WithTimeout(timeout time.Duration) *FindConfigInternodeSendBuffSizeInBytesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config internode send buff size in bytes params +func (o *FindConfigInternodeSendBuffSizeInBytesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config internode send buff size in bytes params +func (o *FindConfigInternodeSendBuffSizeInBytesParams) WithContext(ctx context.Context) *FindConfigInternodeSendBuffSizeInBytesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config internode send buff size in bytes params +func (o *FindConfigInternodeSendBuffSizeInBytesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config internode send buff size in bytes params +func (o *FindConfigInternodeSendBuffSizeInBytesParams) WithHTTPClient(client *http.Client) *FindConfigInternodeSendBuffSizeInBytesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config internode send buff size in bytes params +func (o *FindConfigInternodeSendBuffSizeInBytesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigInternodeSendBuffSizeInBytesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_send_buff_size_in_bytes_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_send_buff_size_in_bytes_responses.go new file mode 100644 index 00000000000..244f2ac4244 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_internode_send_buff_size_in_bytes_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigInternodeSendBuffSizeInBytesReader is a Reader for the FindConfigInternodeSendBuffSizeInBytes structure. +type FindConfigInternodeSendBuffSizeInBytesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigInternodeSendBuffSizeInBytesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigInternodeSendBuffSizeInBytesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigInternodeSendBuffSizeInBytesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigInternodeSendBuffSizeInBytesOK creates a FindConfigInternodeSendBuffSizeInBytesOK with default headers values +func NewFindConfigInternodeSendBuffSizeInBytesOK() *FindConfigInternodeSendBuffSizeInBytesOK { + return &FindConfigInternodeSendBuffSizeInBytesOK{} +} + +/* +FindConfigInternodeSendBuffSizeInBytesOK handles this case with default header values. + +Config value +*/ +type FindConfigInternodeSendBuffSizeInBytesOK struct { + Payload int64 +} + +func (o *FindConfigInternodeSendBuffSizeInBytesOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigInternodeSendBuffSizeInBytesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigInternodeSendBuffSizeInBytesDefault creates a FindConfigInternodeSendBuffSizeInBytesDefault with default headers values +func NewFindConfigInternodeSendBuffSizeInBytesDefault(code int) *FindConfigInternodeSendBuffSizeInBytesDefault { + return &FindConfigInternodeSendBuffSizeInBytesDefault{ + _statusCode: code, + } +} + +/* +FindConfigInternodeSendBuffSizeInBytesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigInternodeSendBuffSizeInBytesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config internode send buff size in bytes default response +func (o *FindConfigInternodeSendBuffSizeInBytesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigInternodeSendBuffSizeInBytesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigInternodeSendBuffSizeInBytesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigInternodeSendBuffSizeInBytesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_join_ring_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_join_ring_parameters.go new file mode 100644 index 00000000000..58331904fe2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_join_ring_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigJoinRingParams creates a new FindConfigJoinRingParams object +// with the default values initialized. +func NewFindConfigJoinRingParams() *FindConfigJoinRingParams { + + return &FindConfigJoinRingParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigJoinRingParamsWithTimeout creates a new FindConfigJoinRingParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigJoinRingParamsWithTimeout(timeout time.Duration) *FindConfigJoinRingParams { + + return &FindConfigJoinRingParams{ + + timeout: timeout, + } +} + +// NewFindConfigJoinRingParamsWithContext creates a new FindConfigJoinRingParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigJoinRingParamsWithContext(ctx context.Context) *FindConfigJoinRingParams { + + return &FindConfigJoinRingParams{ + + Context: ctx, + } +} + +// NewFindConfigJoinRingParamsWithHTTPClient creates a new FindConfigJoinRingParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigJoinRingParamsWithHTTPClient(client *http.Client) *FindConfigJoinRingParams { + + return &FindConfigJoinRingParams{ + HTTPClient: client, + } +} + +/* +FindConfigJoinRingParams contains all the parameters to send to the API endpoint +for the find config join ring operation typically these are written to a http.Request +*/ +type FindConfigJoinRingParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config join ring params +func (o *FindConfigJoinRingParams) WithTimeout(timeout time.Duration) *FindConfigJoinRingParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config join ring params +func (o *FindConfigJoinRingParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config join ring params +func (o *FindConfigJoinRingParams) WithContext(ctx context.Context) *FindConfigJoinRingParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config join ring params +func (o *FindConfigJoinRingParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config join ring params +func (o *FindConfigJoinRingParams) WithHTTPClient(client *http.Client) *FindConfigJoinRingParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config join ring params +func (o *FindConfigJoinRingParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigJoinRingParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_join_ring_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_join_ring_responses.go new file mode 100644 index 00000000000..77229477d4c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_join_ring_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigJoinRingReader is a Reader for the FindConfigJoinRing structure. +type FindConfigJoinRingReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigJoinRingReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigJoinRingOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigJoinRingDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigJoinRingOK creates a FindConfigJoinRingOK with default headers values +func NewFindConfigJoinRingOK() *FindConfigJoinRingOK { + return &FindConfigJoinRingOK{} +} + +/* +FindConfigJoinRingOK handles this case with default header values. + +Config value +*/ +type FindConfigJoinRingOK struct { + Payload bool +} + +func (o *FindConfigJoinRingOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigJoinRingOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigJoinRingDefault creates a FindConfigJoinRingDefault with default headers values +func NewFindConfigJoinRingDefault(code int) *FindConfigJoinRingDefault { + return &FindConfigJoinRingDefault{ + _statusCode: code, + } +} + +/* +FindConfigJoinRingDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigJoinRingDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config join ring default response +func (o *FindConfigJoinRingDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigJoinRingDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigJoinRingDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigJoinRingDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_keys_to_save_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_keys_to_save_parameters.go new file mode 100644 index 00000000000..33e2b3fa5c8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_keys_to_save_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigKeyCacheKeysToSaveParams creates a new FindConfigKeyCacheKeysToSaveParams object +// with the default values initialized. +func NewFindConfigKeyCacheKeysToSaveParams() *FindConfigKeyCacheKeysToSaveParams { + + return &FindConfigKeyCacheKeysToSaveParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigKeyCacheKeysToSaveParamsWithTimeout creates a new FindConfigKeyCacheKeysToSaveParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigKeyCacheKeysToSaveParamsWithTimeout(timeout time.Duration) *FindConfigKeyCacheKeysToSaveParams { + + return &FindConfigKeyCacheKeysToSaveParams{ + + timeout: timeout, + } +} + +// NewFindConfigKeyCacheKeysToSaveParamsWithContext creates a new FindConfigKeyCacheKeysToSaveParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigKeyCacheKeysToSaveParamsWithContext(ctx context.Context) *FindConfigKeyCacheKeysToSaveParams { + + return &FindConfigKeyCacheKeysToSaveParams{ + + Context: ctx, + } +} + +// NewFindConfigKeyCacheKeysToSaveParamsWithHTTPClient creates a new FindConfigKeyCacheKeysToSaveParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigKeyCacheKeysToSaveParamsWithHTTPClient(client *http.Client) *FindConfigKeyCacheKeysToSaveParams { + + return &FindConfigKeyCacheKeysToSaveParams{ + HTTPClient: client, + } +} + +/* +FindConfigKeyCacheKeysToSaveParams contains all the parameters to send to the API endpoint +for the find config key cache keys to save operation typically these are written to a http.Request +*/ +type FindConfigKeyCacheKeysToSaveParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config key cache keys to save params +func (o *FindConfigKeyCacheKeysToSaveParams) WithTimeout(timeout time.Duration) *FindConfigKeyCacheKeysToSaveParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config key cache keys to save params +func (o *FindConfigKeyCacheKeysToSaveParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config key cache keys to save params +func (o *FindConfigKeyCacheKeysToSaveParams) WithContext(ctx context.Context) *FindConfigKeyCacheKeysToSaveParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config key cache keys to save params +func (o *FindConfigKeyCacheKeysToSaveParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config key cache keys to save params +func (o *FindConfigKeyCacheKeysToSaveParams) WithHTTPClient(client *http.Client) *FindConfigKeyCacheKeysToSaveParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config key cache keys to save params +func (o *FindConfigKeyCacheKeysToSaveParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigKeyCacheKeysToSaveParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_keys_to_save_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_keys_to_save_responses.go new file mode 100644 index 00000000000..0b9bc47a3b9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_keys_to_save_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigKeyCacheKeysToSaveReader is a Reader for the FindConfigKeyCacheKeysToSave structure. +type FindConfigKeyCacheKeysToSaveReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigKeyCacheKeysToSaveReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigKeyCacheKeysToSaveOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigKeyCacheKeysToSaveDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigKeyCacheKeysToSaveOK creates a FindConfigKeyCacheKeysToSaveOK with default headers values +func NewFindConfigKeyCacheKeysToSaveOK() *FindConfigKeyCacheKeysToSaveOK { + return &FindConfigKeyCacheKeysToSaveOK{} +} + +/* +FindConfigKeyCacheKeysToSaveOK handles this case with default header values. + +Config value +*/ +type FindConfigKeyCacheKeysToSaveOK struct { + Payload int64 +} + +func (o *FindConfigKeyCacheKeysToSaveOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigKeyCacheKeysToSaveOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigKeyCacheKeysToSaveDefault creates a FindConfigKeyCacheKeysToSaveDefault with default headers values +func NewFindConfigKeyCacheKeysToSaveDefault(code int) *FindConfigKeyCacheKeysToSaveDefault { + return &FindConfigKeyCacheKeysToSaveDefault{ + _statusCode: code, + } +} + +/* +FindConfigKeyCacheKeysToSaveDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigKeyCacheKeysToSaveDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config key cache keys to save default response +func (o *FindConfigKeyCacheKeysToSaveDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigKeyCacheKeysToSaveDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigKeyCacheKeysToSaveDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigKeyCacheKeysToSaveDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_save_period_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_save_period_parameters.go new file mode 100644 index 00000000000..08fe2540369 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_save_period_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigKeyCacheSavePeriodParams creates a new FindConfigKeyCacheSavePeriodParams object +// with the default values initialized. +func NewFindConfigKeyCacheSavePeriodParams() *FindConfigKeyCacheSavePeriodParams { + + return &FindConfigKeyCacheSavePeriodParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigKeyCacheSavePeriodParamsWithTimeout creates a new FindConfigKeyCacheSavePeriodParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigKeyCacheSavePeriodParamsWithTimeout(timeout time.Duration) *FindConfigKeyCacheSavePeriodParams { + + return &FindConfigKeyCacheSavePeriodParams{ + + timeout: timeout, + } +} + +// NewFindConfigKeyCacheSavePeriodParamsWithContext creates a new FindConfigKeyCacheSavePeriodParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigKeyCacheSavePeriodParamsWithContext(ctx context.Context) *FindConfigKeyCacheSavePeriodParams { + + return &FindConfigKeyCacheSavePeriodParams{ + + Context: ctx, + } +} + +// NewFindConfigKeyCacheSavePeriodParamsWithHTTPClient creates a new FindConfigKeyCacheSavePeriodParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigKeyCacheSavePeriodParamsWithHTTPClient(client *http.Client) *FindConfigKeyCacheSavePeriodParams { + + return &FindConfigKeyCacheSavePeriodParams{ + HTTPClient: client, + } +} + +/* +FindConfigKeyCacheSavePeriodParams contains all the parameters to send to the API endpoint +for the find config key cache save period operation typically these are written to a http.Request +*/ +type FindConfigKeyCacheSavePeriodParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config key cache save period params +func (o *FindConfigKeyCacheSavePeriodParams) WithTimeout(timeout time.Duration) *FindConfigKeyCacheSavePeriodParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config key cache save period params +func (o *FindConfigKeyCacheSavePeriodParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config key cache save period params +func (o *FindConfigKeyCacheSavePeriodParams) WithContext(ctx context.Context) *FindConfigKeyCacheSavePeriodParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config key cache save period params +func (o *FindConfigKeyCacheSavePeriodParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config key cache save period params +func (o *FindConfigKeyCacheSavePeriodParams) WithHTTPClient(client *http.Client) *FindConfigKeyCacheSavePeriodParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config key cache save period params +func (o *FindConfigKeyCacheSavePeriodParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigKeyCacheSavePeriodParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_save_period_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_save_period_responses.go new file mode 100644 index 00000000000..d77f6347563 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_save_period_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigKeyCacheSavePeriodReader is a Reader for the FindConfigKeyCacheSavePeriod structure. +type FindConfigKeyCacheSavePeriodReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigKeyCacheSavePeriodReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigKeyCacheSavePeriodOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigKeyCacheSavePeriodDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigKeyCacheSavePeriodOK creates a FindConfigKeyCacheSavePeriodOK with default headers values +func NewFindConfigKeyCacheSavePeriodOK() *FindConfigKeyCacheSavePeriodOK { + return &FindConfigKeyCacheSavePeriodOK{} +} + +/* +FindConfigKeyCacheSavePeriodOK handles this case with default header values. + +Config value +*/ +type FindConfigKeyCacheSavePeriodOK struct { + Payload int64 +} + +func (o *FindConfigKeyCacheSavePeriodOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigKeyCacheSavePeriodOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigKeyCacheSavePeriodDefault creates a FindConfigKeyCacheSavePeriodDefault with default headers values +func NewFindConfigKeyCacheSavePeriodDefault(code int) *FindConfigKeyCacheSavePeriodDefault { + return &FindConfigKeyCacheSavePeriodDefault{ + _statusCode: code, + } +} + +/* +FindConfigKeyCacheSavePeriodDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigKeyCacheSavePeriodDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config key cache save period default response +func (o *FindConfigKeyCacheSavePeriodDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigKeyCacheSavePeriodDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigKeyCacheSavePeriodDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigKeyCacheSavePeriodDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_size_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_size_in_mb_parameters.go new file mode 100644 index 00000000000..74c3d0adda2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_size_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigKeyCacheSizeInMbParams creates a new FindConfigKeyCacheSizeInMbParams object +// with the default values initialized. +func NewFindConfigKeyCacheSizeInMbParams() *FindConfigKeyCacheSizeInMbParams { + + return &FindConfigKeyCacheSizeInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigKeyCacheSizeInMbParamsWithTimeout creates a new FindConfigKeyCacheSizeInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigKeyCacheSizeInMbParamsWithTimeout(timeout time.Duration) *FindConfigKeyCacheSizeInMbParams { + + return &FindConfigKeyCacheSizeInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigKeyCacheSizeInMbParamsWithContext creates a new FindConfigKeyCacheSizeInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigKeyCacheSizeInMbParamsWithContext(ctx context.Context) *FindConfigKeyCacheSizeInMbParams { + + return &FindConfigKeyCacheSizeInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigKeyCacheSizeInMbParamsWithHTTPClient creates a new FindConfigKeyCacheSizeInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigKeyCacheSizeInMbParamsWithHTTPClient(client *http.Client) *FindConfigKeyCacheSizeInMbParams { + + return &FindConfigKeyCacheSizeInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigKeyCacheSizeInMbParams contains all the parameters to send to the API endpoint +for the find config key cache size in mb operation typically these are written to a http.Request +*/ +type FindConfigKeyCacheSizeInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config key cache size in mb params +func (o *FindConfigKeyCacheSizeInMbParams) WithTimeout(timeout time.Duration) *FindConfigKeyCacheSizeInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config key cache size in mb params +func (o *FindConfigKeyCacheSizeInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config key cache size in mb params +func (o *FindConfigKeyCacheSizeInMbParams) WithContext(ctx context.Context) *FindConfigKeyCacheSizeInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config key cache size in mb params +func (o *FindConfigKeyCacheSizeInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config key cache size in mb params +func (o *FindConfigKeyCacheSizeInMbParams) WithHTTPClient(client *http.Client) *FindConfigKeyCacheSizeInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config key cache size in mb params +func (o *FindConfigKeyCacheSizeInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigKeyCacheSizeInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_size_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_size_in_mb_responses.go new file mode 100644 index 00000000000..a6df456e3ed --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_key_cache_size_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigKeyCacheSizeInMbReader is a Reader for the FindConfigKeyCacheSizeInMb structure. +type FindConfigKeyCacheSizeInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigKeyCacheSizeInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigKeyCacheSizeInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigKeyCacheSizeInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigKeyCacheSizeInMbOK creates a FindConfigKeyCacheSizeInMbOK with default headers values +func NewFindConfigKeyCacheSizeInMbOK() *FindConfigKeyCacheSizeInMbOK { + return &FindConfigKeyCacheSizeInMbOK{} +} + +/* +FindConfigKeyCacheSizeInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigKeyCacheSizeInMbOK struct { + Payload int64 +} + +func (o *FindConfigKeyCacheSizeInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigKeyCacheSizeInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigKeyCacheSizeInMbDefault creates a FindConfigKeyCacheSizeInMbDefault with default headers values +func NewFindConfigKeyCacheSizeInMbDefault(code int) *FindConfigKeyCacheSizeInMbDefault { + return &FindConfigKeyCacheSizeInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigKeyCacheSizeInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigKeyCacheSizeInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config key cache size in mb default response +func (o *FindConfigKeyCacheSizeInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigKeyCacheSizeInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigKeyCacheSizeInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigKeyCacheSizeInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_large_memory_allocation_warning_threshold_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_large_memory_allocation_warning_threshold_parameters.go new file mode 100644 index 00000000000..c79fc408d80 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_large_memory_allocation_warning_threshold_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigLargeMemoryAllocationWarningThresholdParams creates a new FindConfigLargeMemoryAllocationWarningThresholdParams object +// with the default values initialized. +func NewFindConfigLargeMemoryAllocationWarningThresholdParams() *FindConfigLargeMemoryAllocationWarningThresholdParams { + + return &FindConfigLargeMemoryAllocationWarningThresholdParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigLargeMemoryAllocationWarningThresholdParamsWithTimeout creates a new FindConfigLargeMemoryAllocationWarningThresholdParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigLargeMemoryAllocationWarningThresholdParamsWithTimeout(timeout time.Duration) *FindConfigLargeMemoryAllocationWarningThresholdParams { + + return &FindConfigLargeMemoryAllocationWarningThresholdParams{ + + timeout: timeout, + } +} + +// NewFindConfigLargeMemoryAllocationWarningThresholdParamsWithContext creates a new FindConfigLargeMemoryAllocationWarningThresholdParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigLargeMemoryAllocationWarningThresholdParamsWithContext(ctx context.Context) *FindConfigLargeMemoryAllocationWarningThresholdParams { + + return &FindConfigLargeMemoryAllocationWarningThresholdParams{ + + Context: ctx, + } +} + +// NewFindConfigLargeMemoryAllocationWarningThresholdParamsWithHTTPClient creates a new FindConfigLargeMemoryAllocationWarningThresholdParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigLargeMemoryAllocationWarningThresholdParamsWithHTTPClient(client *http.Client) *FindConfigLargeMemoryAllocationWarningThresholdParams { + + return &FindConfigLargeMemoryAllocationWarningThresholdParams{ + HTTPClient: client, + } +} + +/* +FindConfigLargeMemoryAllocationWarningThresholdParams contains all the parameters to send to the API endpoint +for the find config large memory allocation warning threshold operation typically these are written to a http.Request +*/ +type FindConfigLargeMemoryAllocationWarningThresholdParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config large memory allocation warning threshold params +func (o *FindConfigLargeMemoryAllocationWarningThresholdParams) WithTimeout(timeout time.Duration) *FindConfigLargeMemoryAllocationWarningThresholdParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config large memory allocation warning threshold params +func (o *FindConfigLargeMemoryAllocationWarningThresholdParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config large memory allocation warning threshold params +func (o *FindConfigLargeMemoryAllocationWarningThresholdParams) WithContext(ctx context.Context) *FindConfigLargeMemoryAllocationWarningThresholdParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config large memory allocation warning threshold params +func (o *FindConfigLargeMemoryAllocationWarningThresholdParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config large memory allocation warning threshold params +func (o *FindConfigLargeMemoryAllocationWarningThresholdParams) WithHTTPClient(client *http.Client) *FindConfigLargeMemoryAllocationWarningThresholdParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config large memory allocation warning threshold params +func (o *FindConfigLargeMemoryAllocationWarningThresholdParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigLargeMemoryAllocationWarningThresholdParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_large_memory_allocation_warning_threshold_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_large_memory_allocation_warning_threshold_responses.go new file mode 100644 index 00000000000..85be34f8da9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_large_memory_allocation_warning_threshold_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigLargeMemoryAllocationWarningThresholdReader is a Reader for the FindConfigLargeMemoryAllocationWarningThreshold structure. +type FindConfigLargeMemoryAllocationWarningThresholdReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigLargeMemoryAllocationWarningThresholdReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigLargeMemoryAllocationWarningThresholdOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigLargeMemoryAllocationWarningThresholdDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigLargeMemoryAllocationWarningThresholdOK creates a FindConfigLargeMemoryAllocationWarningThresholdOK with default headers values +func NewFindConfigLargeMemoryAllocationWarningThresholdOK() *FindConfigLargeMemoryAllocationWarningThresholdOK { + return &FindConfigLargeMemoryAllocationWarningThresholdOK{} +} + +/* +FindConfigLargeMemoryAllocationWarningThresholdOK handles this case with default header values. + +Config value +*/ +type FindConfigLargeMemoryAllocationWarningThresholdOK struct { + Payload int64 +} + +func (o *FindConfigLargeMemoryAllocationWarningThresholdOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigLargeMemoryAllocationWarningThresholdOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigLargeMemoryAllocationWarningThresholdDefault creates a FindConfigLargeMemoryAllocationWarningThresholdDefault with default headers values +func NewFindConfigLargeMemoryAllocationWarningThresholdDefault(code int) *FindConfigLargeMemoryAllocationWarningThresholdDefault { + return &FindConfigLargeMemoryAllocationWarningThresholdDefault{ + _statusCode: code, + } +} + +/* +FindConfigLargeMemoryAllocationWarningThresholdDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigLargeMemoryAllocationWarningThresholdDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config large memory allocation warning threshold default response +func (o *FindConfigLargeMemoryAllocationWarningThresholdDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigLargeMemoryAllocationWarningThresholdDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigLargeMemoryAllocationWarningThresholdDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigLargeMemoryAllocationWarningThresholdDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_address_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_address_parameters.go new file mode 100644 index 00000000000..a9b170e64e1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_address_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigListenAddressParams creates a new FindConfigListenAddressParams object +// with the default values initialized. +func NewFindConfigListenAddressParams() *FindConfigListenAddressParams { + + return &FindConfigListenAddressParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigListenAddressParamsWithTimeout creates a new FindConfigListenAddressParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigListenAddressParamsWithTimeout(timeout time.Duration) *FindConfigListenAddressParams { + + return &FindConfigListenAddressParams{ + + timeout: timeout, + } +} + +// NewFindConfigListenAddressParamsWithContext creates a new FindConfigListenAddressParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigListenAddressParamsWithContext(ctx context.Context) *FindConfigListenAddressParams { + + return &FindConfigListenAddressParams{ + + Context: ctx, + } +} + +// NewFindConfigListenAddressParamsWithHTTPClient creates a new FindConfigListenAddressParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigListenAddressParamsWithHTTPClient(client *http.Client) *FindConfigListenAddressParams { + + return &FindConfigListenAddressParams{ + HTTPClient: client, + } +} + +/* +FindConfigListenAddressParams contains all the parameters to send to the API endpoint +for the find config listen address operation typically these are written to a http.Request +*/ +type FindConfigListenAddressParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config listen address params +func (o *FindConfigListenAddressParams) WithTimeout(timeout time.Duration) *FindConfigListenAddressParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config listen address params +func (o *FindConfigListenAddressParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config listen address params +func (o *FindConfigListenAddressParams) WithContext(ctx context.Context) *FindConfigListenAddressParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config listen address params +func (o *FindConfigListenAddressParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config listen address params +func (o *FindConfigListenAddressParams) WithHTTPClient(client *http.Client) *FindConfigListenAddressParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config listen address params +func (o *FindConfigListenAddressParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigListenAddressParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_address_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_address_responses.go new file mode 100644 index 00000000000..419fa653386 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_address_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigListenAddressReader is a Reader for the FindConfigListenAddress structure. +type FindConfigListenAddressReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigListenAddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigListenAddressOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigListenAddressDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigListenAddressOK creates a FindConfigListenAddressOK with default headers values +func NewFindConfigListenAddressOK() *FindConfigListenAddressOK { + return &FindConfigListenAddressOK{} +} + +/* +FindConfigListenAddressOK handles this case with default header values. + +Config value +*/ +type FindConfigListenAddressOK struct { + Payload string +} + +func (o *FindConfigListenAddressOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigListenAddressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigListenAddressDefault creates a FindConfigListenAddressDefault with default headers values +func NewFindConfigListenAddressDefault(code int) *FindConfigListenAddressDefault { + return &FindConfigListenAddressDefault{ + _statusCode: code, + } +} + +/* +FindConfigListenAddressDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigListenAddressDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config listen address default response +func (o *FindConfigListenAddressDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigListenAddressDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigListenAddressDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigListenAddressDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_interface_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_interface_parameters.go new file mode 100644 index 00000000000..da55e931763 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_interface_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigListenInterfaceParams creates a new FindConfigListenInterfaceParams object +// with the default values initialized. +func NewFindConfigListenInterfaceParams() *FindConfigListenInterfaceParams { + + return &FindConfigListenInterfaceParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigListenInterfaceParamsWithTimeout creates a new FindConfigListenInterfaceParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigListenInterfaceParamsWithTimeout(timeout time.Duration) *FindConfigListenInterfaceParams { + + return &FindConfigListenInterfaceParams{ + + timeout: timeout, + } +} + +// NewFindConfigListenInterfaceParamsWithContext creates a new FindConfigListenInterfaceParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigListenInterfaceParamsWithContext(ctx context.Context) *FindConfigListenInterfaceParams { + + return &FindConfigListenInterfaceParams{ + + Context: ctx, + } +} + +// NewFindConfigListenInterfaceParamsWithHTTPClient creates a new FindConfigListenInterfaceParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigListenInterfaceParamsWithHTTPClient(client *http.Client) *FindConfigListenInterfaceParams { + + return &FindConfigListenInterfaceParams{ + HTTPClient: client, + } +} + +/* +FindConfigListenInterfaceParams contains all the parameters to send to the API endpoint +for the find config listen interface operation typically these are written to a http.Request +*/ +type FindConfigListenInterfaceParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config listen interface params +func (o *FindConfigListenInterfaceParams) WithTimeout(timeout time.Duration) *FindConfigListenInterfaceParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config listen interface params +func (o *FindConfigListenInterfaceParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config listen interface params +func (o *FindConfigListenInterfaceParams) WithContext(ctx context.Context) *FindConfigListenInterfaceParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config listen interface params +func (o *FindConfigListenInterfaceParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config listen interface params +func (o *FindConfigListenInterfaceParams) WithHTTPClient(client *http.Client) *FindConfigListenInterfaceParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config listen interface params +func (o *FindConfigListenInterfaceParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigListenInterfaceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_interface_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_interface_responses.go new file mode 100644 index 00000000000..8ec39837001 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_interface_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigListenInterfaceReader is a Reader for the FindConfigListenInterface structure. +type FindConfigListenInterfaceReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigListenInterfaceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigListenInterfaceOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigListenInterfaceDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigListenInterfaceOK creates a FindConfigListenInterfaceOK with default headers values +func NewFindConfigListenInterfaceOK() *FindConfigListenInterfaceOK { + return &FindConfigListenInterfaceOK{} +} + +/* +FindConfigListenInterfaceOK handles this case with default header values. + +Config value +*/ +type FindConfigListenInterfaceOK struct { + Payload string +} + +func (o *FindConfigListenInterfaceOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigListenInterfaceOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigListenInterfaceDefault creates a FindConfigListenInterfaceDefault with default headers values +func NewFindConfigListenInterfaceDefault(code int) *FindConfigListenInterfaceDefault { + return &FindConfigListenInterfaceDefault{ + _statusCode: code, + } +} + +/* +FindConfigListenInterfaceDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigListenInterfaceDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config listen interface default response +func (o *FindConfigListenInterfaceDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigListenInterfaceDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigListenInterfaceDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigListenInterfaceDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_on_broadcast_address_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_on_broadcast_address_parameters.go new file mode 100644 index 00000000000..3d7314983f5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_on_broadcast_address_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigListenOnBroadcastAddressParams creates a new FindConfigListenOnBroadcastAddressParams object +// with the default values initialized. +func NewFindConfigListenOnBroadcastAddressParams() *FindConfigListenOnBroadcastAddressParams { + + return &FindConfigListenOnBroadcastAddressParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigListenOnBroadcastAddressParamsWithTimeout creates a new FindConfigListenOnBroadcastAddressParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigListenOnBroadcastAddressParamsWithTimeout(timeout time.Duration) *FindConfigListenOnBroadcastAddressParams { + + return &FindConfigListenOnBroadcastAddressParams{ + + timeout: timeout, + } +} + +// NewFindConfigListenOnBroadcastAddressParamsWithContext creates a new FindConfigListenOnBroadcastAddressParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigListenOnBroadcastAddressParamsWithContext(ctx context.Context) *FindConfigListenOnBroadcastAddressParams { + + return &FindConfigListenOnBroadcastAddressParams{ + + Context: ctx, + } +} + +// NewFindConfigListenOnBroadcastAddressParamsWithHTTPClient creates a new FindConfigListenOnBroadcastAddressParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigListenOnBroadcastAddressParamsWithHTTPClient(client *http.Client) *FindConfigListenOnBroadcastAddressParams { + + return &FindConfigListenOnBroadcastAddressParams{ + HTTPClient: client, + } +} + +/* +FindConfigListenOnBroadcastAddressParams contains all the parameters to send to the API endpoint +for the find config listen on broadcast address operation typically these are written to a http.Request +*/ +type FindConfigListenOnBroadcastAddressParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config listen on broadcast address params +func (o *FindConfigListenOnBroadcastAddressParams) WithTimeout(timeout time.Duration) *FindConfigListenOnBroadcastAddressParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config listen on broadcast address params +func (o *FindConfigListenOnBroadcastAddressParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config listen on broadcast address params +func (o *FindConfigListenOnBroadcastAddressParams) WithContext(ctx context.Context) *FindConfigListenOnBroadcastAddressParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config listen on broadcast address params +func (o *FindConfigListenOnBroadcastAddressParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config listen on broadcast address params +func (o *FindConfigListenOnBroadcastAddressParams) WithHTTPClient(client *http.Client) *FindConfigListenOnBroadcastAddressParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config listen on broadcast address params +func (o *FindConfigListenOnBroadcastAddressParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigListenOnBroadcastAddressParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_on_broadcast_address_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_on_broadcast_address_responses.go new file mode 100644 index 00000000000..b0bef0c03ea --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_listen_on_broadcast_address_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigListenOnBroadcastAddressReader is a Reader for the FindConfigListenOnBroadcastAddress structure. +type FindConfigListenOnBroadcastAddressReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigListenOnBroadcastAddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigListenOnBroadcastAddressOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigListenOnBroadcastAddressDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigListenOnBroadcastAddressOK creates a FindConfigListenOnBroadcastAddressOK with default headers values +func NewFindConfigListenOnBroadcastAddressOK() *FindConfigListenOnBroadcastAddressOK { + return &FindConfigListenOnBroadcastAddressOK{} +} + +/* +FindConfigListenOnBroadcastAddressOK handles this case with default header values. + +Config value +*/ +type FindConfigListenOnBroadcastAddressOK struct { + Payload bool +} + +func (o *FindConfigListenOnBroadcastAddressOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigListenOnBroadcastAddressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigListenOnBroadcastAddressDefault creates a FindConfigListenOnBroadcastAddressDefault with default headers values +func NewFindConfigListenOnBroadcastAddressDefault(code int) *FindConfigListenOnBroadcastAddressDefault { + return &FindConfigListenOnBroadcastAddressDefault{ + _statusCode: code, + } +} + +/* +FindConfigListenOnBroadcastAddressDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigListenOnBroadcastAddressDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config listen on broadcast address default response +func (o *FindConfigListenOnBroadcastAddressDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigListenOnBroadcastAddressDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigListenOnBroadcastAddressDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigListenOnBroadcastAddressDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_balance_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_balance_parameters.go new file mode 100644 index 00000000000..a40e870a9d3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_balance_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigLoadBalanceParams creates a new FindConfigLoadBalanceParams object +// with the default values initialized. +func NewFindConfigLoadBalanceParams() *FindConfigLoadBalanceParams { + + return &FindConfigLoadBalanceParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigLoadBalanceParamsWithTimeout creates a new FindConfigLoadBalanceParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigLoadBalanceParamsWithTimeout(timeout time.Duration) *FindConfigLoadBalanceParams { + + return &FindConfigLoadBalanceParams{ + + timeout: timeout, + } +} + +// NewFindConfigLoadBalanceParamsWithContext creates a new FindConfigLoadBalanceParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigLoadBalanceParamsWithContext(ctx context.Context) *FindConfigLoadBalanceParams { + + return &FindConfigLoadBalanceParams{ + + Context: ctx, + } +} + +// NewFindConfigLoadBalanceParamsWithHTTPClient creates a new FindConfigLoadBalanceParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigLoadBalanceParamsWithHTTPClient(client *http.Client) *FindConfigLoadBalanceParams { + + return &FindConfigLoadBalanceParams{ + HTTPClient: client, + } +} + +/* +FindConfigLoadBalanceParams contains all the parameters to send to the API endpoint +for the find config load balance operation typically these are written to a http.Request +*/ +type FindConfigLoadBalanceParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config load balance params +func (o *FindConfigLoadBalanceParams) WithTimeout(timeout time.Duration) *FindConfigLoadBalanceParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config load balance params +func (o *FindConfigLoadBalanceParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config load balance params +func (o *FindConfigLoadBalanceParams) WithContext(ctx context.Context) *FindConfigLoadBalanceParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config load balance params +func (o *FindConfigLoadBalanceParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config load balance params +func (o *FindConfigLoadBalanceParams) WithHTTPClient(client *http.Client) *FindConfigLoadBalanceParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config load balance params +func (o *FindConfigLoadBalanceParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigLoadBalanceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_balance_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_balance_responses.go new file mode 100644 index 00000000000..b681adaa27a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_balance_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigLoadBalanceReader is a Reader for the FindConfigLoadBalance structure. +type FindConfigLoadBalanceReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigLoadBalanceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigLoadBalanceOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigLoadBalanceDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigLoadBalanceOK creates a FindConfigLoadBalanceOK with default headers values +func NewFindConfigLoadBalanceOK() *FindConfigLoadBalanceOK { + return &FindConfigLoadBalanceOK{} +} + +/* +FindConfigLoadBalanceOK handles this case with default header values. + +Config value +*/ +type FindConfigLoadBalanceOK struct { + Payload string +} + +func (o *FindConfigLoadBalanceOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigLoadBalanceOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigLoadBalanceDefault creates a FindConfigLoadBalanceDefault with default headers values +func NewFindConfigLoadBalanceDefault(code int) *FindConfigLoadBalanceDefault { + return &FindConfigLoadBalanceDefault{ + _statusCode: code, + } +} + +/* +FindConfigLoadBalanceDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigLoadBalanceDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config load balance default response +func (o *FindConfigLoadBalanceDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigLoadBalanceDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigLoadBalanceDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigLoadBalanceDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_ring_state_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_ring_state_parameters.go new file mode 100644 index 00000000000..290c158cd6c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_ring_state_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigLoadRingStateParams creates a new FindConfigLoadRingStateParams object +// with the default values initialized. +func NewFindConfigLoadRingStateParams() *FindConfigLoadRingStateParams { + + return &FindConfigLoadRingStateParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigLoadRingStateParamsWithTimeout creates a new FindConfigLoadRingStateParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigLoadRingStateParamsWithTimeout(timeout time.Duration) *FindConfigLoadRingStateParams { + + return &FindConfigLoadRingStateParams{ + + timeout: timeout, + } +} + +// NewFindConfigLoadRingStateParamsWithContext creates a new FindConfigLoadRingStateParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigLoadRingStateParamsWithContext(ctx context.Context) *FindConfigLoadRingStateParams { + + return &FindConfigLoadRingStateParams{ + + Context: ctx, + } +} + +// NewFindConfigLoadRingStateParamsWithHTTPClient creates a new FindConfigLoadRingStateParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigLoadRingStateParamsWithHTTPClient(client *http.Client) *FindConfigLoadRingStateParams { + + return &FindConfigLoadRingStateParams{ + HTTPClient: client, + } +} + +/* +FindConfigLoadRingStateParams contains all the parameters to send to the API endpoint +for the find config load ring state operation typically these are written to a http.Request +*/ +type FindConfigLoadRingStateParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config load ring state params +func (o *FindConfigLoadRingStateParams) WithTimeout(timeout time.Duration) *FindConfigLoadRingStateParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config load ring state params +func (o *FindConfigLoadRingStateParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config load ring state params +func (o *FindConfigLoadRingStateParams) WithContext(ctx context.Context) *FindConfigLoadRingStateParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config load ring state params +func (o *FindConfigLoadRingStateParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config load ring state params +func (o *FindConfigLoadRingStateParams) WithHTTPClient(client *http.Client) *FindConfigLoadRingStateParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config load ring state params +func (o *FindConfigLoadRingStateParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigLoadRingStateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_ring_state_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_ring_state_responses.go new file mode 100644 index 00000000000..3e6352dc012 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_load_ring_state_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigLoadRingStateReader is a Reader for the FindConfigLoadRingState structure. +type FindConfigLoadRingStateReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigLoadRingStateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigLoadRingStateOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigLoadRingStateDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigLoadRingStateOK creates a FindConfigLoadRingStateOK with default headers values +func NewFindConfigLoadRingStateOK() *FindConfigLoadRingStateOK { + return &FindConfigLoadRingStateOK{} +} + +/* +FindConfigLoadRingStateOK handles this case with default header values. + +Config value +*/ +type FindConfigLoadRingStateOK struct { + Payload bool +} + +func (o *FindConfigLoadRingStateOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigLoadRingStateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigLoadRingStateDefault creates a FindConfigLoadRingStateDefault with default headers values +func NewFindConfigLoadRingStateDefault(code int) *FindConfigLoadRingStateDefault { + return &FindConfigLoadRingStateDefault{ + _statusCode: code, + } +} + +/* +FindConfigLoadRingStateDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigLoadRingStateDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config load ring state default response +func (o *FindConfigLoadRingStateDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigLoadRingStateDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigLoadRingStateDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigLoadRingStateDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_stdout_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_stdout_parameters.go new file mode 100644 index 00000000000..32180b8a296 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_stdout_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigLogToStdoutParams creates a new FindConfigLogToStdoutParams object +// with the default values initialized. +func NewFindConfigLogToStdoutParams() *FindConfigLogToStdoutParams { + + return &FindConfigLogToStdoutParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigLogToStdoutParamsWithTimeout creates a new FindConfigLogToStdoutParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigLogToStdoutParamsWithTimeout(timeout time.Duration) *FindConfigLogToStdoutParams { + + return &FindConfigLogToStdoutParams{ + + timeout: timeout, + } +} + +// NewFindConfigLogToStdoutParamsWithContext creates a new FindConfigLogToStdoutParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigLogToStdoutParamsWithContext(ctx context.Context) *FindConfigLogToStdoutParams { + + return &FindConfigLogToStdoutParams{ + + Context: ctx, + } +} + +// NewFindConfigLogToStdoutParamsWithHTTPClient creates a new FindConfigLogToStdoutParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigLogToStdoutParamsWithHTTPClient(client *http.Client) *FindConfigLogToStdoutParams { + + return &FindConfigLogToStdoutParams{ + HTTPClient: client, + } +} + +/* +FindConfigLogToStdoutParams contains all the parameters to send to the API endpoint +for the find config log to stdout operation typically these are written to a http.Request +*/ +type FindConfigLogToStdoutParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config log to stdout params +func (o *FindConfigLogToStdoutParams) WithTimeout(timeout time.Duration) *FindConfigLogToStdoutParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config log to stdout params +func (o *FindConfigLogToStdoutParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config log to stdout params +func (o *FindConfigLogToStdoutParams) WithContext(ctx context.Context) *FindConfigLogToStdoutParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config log to stdout params +func (o *FindConfigLogToStdoutParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config log to stdout params +func (o *FindConfigLogToStdoutParams) WithHTTPClient(client *http.Client) *FindConfigLogToStdoutParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config log to stdout params +func (o *FindConfigLogToStdoutParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigLogToStdoutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_stdout_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_stdout_responses.go new file mode 100644 index 00000000000..769d4632fde --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_stdout_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigLogToStdoutReader is a Reader for the FindConfigLogToStdout structure. +type FindConfigLogToStdoutReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigLogToStdoutReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigLogToStdoutOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigLogToStdoutDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigLogToStdoutOK creates a FindConfigLogToStdoutOK with default headers values +func NewFindConfigLogToStdoutOK() *FindConfigLogToStdoutOK { + return &FindConfigLogToStdoutOK{} +} + +/* +FindConfigLogToStdoutOK handles this case with default header values. + +Config value +*/ +type FindConfigLogToStdoutOK struct { + Payload bool +} + +func (o *FindConfigLogToStdoutOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigLogToStdoutOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigLogToStdoutDefault creates a FindConfigLogToStdoutDefault with default headers values +func NewFindConfigLogToStdoutDefault(code int) *FindConfigLogToStdoutDefault { + return &FindConfigLogToStdoutDefault{ + _statusCode: code, + } +} + +/* +FindConfigLogToStdoutDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigLogToStdoutDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config log to stdout default response +func (o *FindConfigLogToStdoutDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigLogToStdoutDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigLogToStdoutDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigLogToStdoutDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_syslog_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_syslog_parameters.go new file mode 100644 index 00000000000..cd33acbd511 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_syslog_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigLogToSyslogParams creates a new FindConfigLogToSyslogParams object +// with the default values initialized. +func NewFindConfigLogToSyslogParams() *FindConfigLogToSyslogParams { + + return &FindConfigLogToSyslogParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigLogToSyslogParamsWithTimeout creates a new FindConfigLogToSyslogParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigLogToSyslogParamsWithTimeout(timeout time.Duration) *FindConfigLogToSyslogParams { + + return &FindConfigLogToSyslogParams{ + + timeout: timeout, + } +} + +// NewFindConfigLogToSyslogParamsWithContext creates a new FindConfigLogToSyslogParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigLogToSyslogParamsWithContext(ctx context.Context) *FindConfigLogToSyslogParams { + + return &FindConfigLogToSyslogParams{ + + Context: ctx, + } +} + +// NewFindConfigLogToSyslogParamsWithHTTPClient creates a new FindConfigLogToSyslogParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigLogToSyslogParamsWithHTTPClient(client *http.Client) *FindConfigLogToSyslogParams { + + return &FindConfigLogToSyslogParams{ + HTTPClient: client, + } +} + +/* +FindConfigLogToSyslogParams contains all the parameters to send to the API endpoint +for the find config log to syslog operation typically these are written to a http.Request +*/ +type FindConfigLogToSyslogParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config log to syslog params +func (o *FindConfigLogToSyslogParams) WithTimeout(timeout time.Duration) *FindConfigLogToSyslogParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config log to syslog params +func (o *FindConfigLogToSyslogParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config log to syslog params +func (o *FindConfigLogToSyslogParams) WithContext(ctx context.Context) *FindConfigLogToSyslogParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config log to syslog params +func (o *FindConfigLogToSyslogParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config log to syslog params +func (o *FindConfigLogToSyslogParams) WithHTTPClient(client *http.Client) *FindConfigLogToSyslogParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config log to syslog params +func (o *FindConfigLogToSyslogParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigLogToSyslogParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_syslog_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_syslog_responses.go new file mode 100644 index 00000000000..1f30ad1507f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_log_to_syslog_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigLogToSyslogReader is a Reader for the FindConfigLogToSyslog structure. +type FindConfigLogToSyslogReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigLogToSyslogReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigLogToSyslogOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigLogToSyslogDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigLogToSyslogOK creates a FindConfigLogToSyslogOK with default headers values +func NewFindConfigLogToSyslogOK() *FindConfigLogToSyslogOK { + return &FindConfigLogToSyslogOK{} +} + +/* +FindConfigLogToSyslogOK handles this case with default header values. + +Config value +*/ +type FindConfigLogToSyslogOK struct { + Payload bool +} + +func (o *FindConfigLogToSyslogOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigLogToSyslogOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigLogToSyslogDefault creates a FindConfigLogToSyslogDefault with default headers values +func NewFindConfigLogToSyslogDefault(code int) *FindConfigLogToSyslogDefault { + return &FindConfigLogToSyslogDefault{ + _statusCode: code, + } +} + +/* +FindConfigLogToSyslogDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigLogToSyslogDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config log to syslog default response +func (o *FindConfigLogToSyslogDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigLogToSyslogDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigLogToSyslogDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigLogToSyslogDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_logger_log_level_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_logger_log_level_parameters.go new file mode 100644 index 00000000000..4671581e1f1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_logger_log_level_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigLoggerLogLevelParams creates a new FindConfigLoggerLogLevelParams object +// with the default values initialized. +func NewFindConfigLoggerLogLevelParams() *FindConfigLoggerLogLevelParams { + + return &FindConfigLoggerLogLevelParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigLoggerLogLevelParamsWithTimeout creates a new FindConfigLoggerLogLevelParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigLoggerLogLevelParamsWithTimeout(timeout time.Duration) *FindConfigLoggerLogLevelParams { + + return &FindConfigLoggerLogLevelParams{ + + timeout: timeout, + } +} + +// NewFindConfigLoggerLogLevelParamsWithContext creates a new FindConfigLoggerLogLevelParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigLoggerLogLevelParamsWithContext(ctx context.Context) *FindConfigLoggerLogLevelParams { + + return &FindConfigLoggerLogLevelParams{ + + Context: ctx, + } +} + +// NewFindConfigLoggerLogLevelParamsWithHTTPClient creates a new FindConfigLoggerLogLevelParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigLoggerLogLevelParamsWithHTTPClient(client *http.Client) *FindConfigLoggerLogLevelParams { + + return &FindConfigLoggerLogLevelParams{ + HTTPClient: client, + } +} + +/* +FindConfigLoggerLogLevelParams contains all the parameters to send to the API endpoint +for the find config logger log level operation typically these are written to a http.Request +*/ +type FindConfigLoggerLogLevelParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config logger log level params +func (o *FindConfigLoggerLogLevelParams) WithTimeout(timeout time.Duration) *FindConfigLoggerLogLevelParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config logger log level params +func (o *FindConfigLoggerLogLevelParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config logger log level params +func (o *FindConfigLoggerLogLevelParams) WithContext(ctx context.Context) *FindConfigLoggerLogLevelParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config logger log level params +func (o *FindConfigLoggerLogLevelParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config logger log level params +func (o *FindConfigLoggerLogLevelParams) WithHTTPClient(client *http.Client) *FindConfigLoggerLogLevelParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config logger log level params +func (o *FindConfigLoggerLogLevelParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigLoggerLogLevelParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_logger_log_level_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_logger_log_level_responses.go new file mode 100644 index 00000000000..9fc3c1f0eb2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_logger_log_level_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigLoggerLogLevelReader is a Reader for the FindConfigLoggerLogLevel structure. +type FindConfigLoggerLogLevelReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigLoggerLogLevelReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigLoggerLogLevelOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigLoggerLogLevelDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigLoggerLogLevelOK creates a FindConfigLoggerLogLevelOK with default headers values +func NewFindConfigLoggerLogLevelOK() *FindConfigLoggerLogLevelOK { + return &FindConfigLoggerLogLevelOK{} +} + +/* +FindConfigLoggerLogLevelOK handles this case with default header values. + +Config value +*/ +type FindConfigLoggerLogLevelOK struct { + Payload []string +} + +func (o *FindConfigLoggerLogLevelOK) GetPayload() []string { + return o.Payload +} + +func (o *FindConfigLoggerLogLevelOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigLoggerLogLevelDefault creates a FindConfigLoggerLogLevelDefault with default headers values +func NewFindConfigLoggerLogLevelDefault(code int) *FindConfigLoggerLogLevelDefault { + return &FindConfigLoggerLogLevelDefault{ + _statusCode: code, + } +} + +/* +FindConfigLoggerLogLevelDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigLoggerLogLevelDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config logger log level default response +func (o *FindConfigLoggerLogLevelDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigLoggerLogLevelDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigLoggerLogLevelDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigLoggerLogLevelDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_lsa_reclamation_step_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_lsa_reclamation_step_parameters.go new file mode 100644 index 00000000000..2d96d41472c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_lsa_reclamation_step_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigLsaReclamationStepParams creates a new FindConfigLsaReclamationStepParams object +// with the default values initialized. +func NewFindConfigLsaReclamationStepParams() *FindConfigLsaReclamationStepParams { + + return &FindConfigLsaReclamationStepParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigLsaReclamationStepParamsWithTimeout creates a new FindConfigLsaReclamationStepParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigLsaReclamationStepParamsWithTimeout(timeout time.Duration) *FindConfigLsaReclamationStepParams { + + return &FindConfigLsaReclamationStepParams{ + + timeout: timeout, + } +} + +// NewFindConfigLsaReclamationStepParamsWithContext creates a new FindConfigLsaReclamationStepParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigLsaReclamationStepParamsWithContext(ctx context.Context) *FindConfigLsaReclamationStepParams { + + return &FindConfigLsaReclamationStepParams{ + + Context: ctx, + } +} + +// NewFindConfigLsaReclamationStepParamsWithHTTPClient creates a new FindConfigLsaReclamationStepParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigLsaReclamationStepParamsWithHTTPClient(client *http.Client) *FindConfigLsaReclamationStepParams { + + return &FindConfigLsaReclamationStepParams{ + HTTPClient: client, + } +} + +/* +FindConfigLsaReclamationStepParams contains all the parameters to send to the API endpoint +for the find config lsa reclamation step operation typically these are written to a http.Request +*/ +type FindConfigLsaReclamationStepParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config lsa reclamation step params +func (o *FindConfigLsaReclamationStepParams) WithTimeout(timeout time.Duration) *FindConfigLsaReclamationStepParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config lsa reclamation step params +func (o *FindConfigLsaReclamationStepParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config lsa reclamation step params +func (o *FindConfigLsaReclamationStepParams) WithContext(ctx context.Context) *FindConfigLsaReclamationStepParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config lsa reclamation step params +func (o *FindConfigLsaReclamationStepParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config lsa reclamation step params +func (o *FindConfigLsaReclamationStepParams) WithHTTPClient(client *http.Client) *FindConfigLsaReclamationStepParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config lsa reclamation step params +func (o *FindConfigLsaReclamationStepParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigLsaReclamationStepParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_lsa_reclamation_step_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_lsa_reclamation_step_responses.go new file mode 100644 index 00000000000..13b4bd2c073 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_lsa_reclamation_step_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigLsaReclamationStepReader is a Reader for the FindConfigLsaReclamationStep structure. +type FindConfigLsaReclamationStepReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigLsaReclamationStepReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigLsaReclamationStepOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigLsaReclamationStepDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigLsaReclamationStepOK creates a FindConfigLsaReclamationStepOK with default headers values +func NewFindConfigLsaReclamationStepOK() *FindConfigLsaReclamationStepOK { + return &FindConfigLsaReclamationStepOK{} +} + +/* +FindConfigLsaReclamationStepOK handles this case with default header values. + +Config value +*/ +type FindConfigLsaReclamationStepOK struct { + Payload int64 +} + +func (o *FindConfigLsaReclamationStepOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigLsaReclamationStepOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigLsaReclamationStepDefault creates a FindConfigLsaReclamationStepDefault with default headers values +func NewFindConfigLsaReclamationStepDefault(code int) *FindConfigLsaReclamationStepDefault { + return &FindConfigLsaReclamationStepDefault{ + _statusCode: code, + } +} + +/* +FindConfigLsaReclamationStepDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigLsaReclamationStepDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config lsa reclamation step default response +func (o *FindConfigLsaReclamationStepDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigLsaReclamationStepDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigLsaReclamationStepDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigLsaReclamationStepDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hint_window_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hint_window_in_ms_parameters.go new file mode 100644 index 00000000000..89d46720604 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hint_window_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMaxHintWindowInMsParams creates a new FindConfigMaxHintWindowInMsParams object +// with the default values initialized. +func NewFindConfigMaxHintWindowInMsParams() *FindConfigMaxHintWindowInMsParams { + + return &FindConfigMaxHintWindowInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMaxHintWindowInMsParamsWithTimeout creates a new FindConfigMaxHintWindowInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMaxHintWindowInMsParamsWithTimeout(timeout time.Duration) *FindConfigMaxHintWindowInMsParams { + + return &FindConfigMaxHintWindowInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigMaxHintWindowInMsParamsWithContext creates a new FindConfigMaxHintWindowInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMaxHintWindowInMsParamsWithContext(ctx context.Context) *FindConfigMaxHintWindowInMsParams { + + return &FindConfigMaxHintWindowInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigMaxHintWindowInMsParamsWithHTTPClient creates a new FindConfigMaxHintWindowInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMaxHintWindowInMsParamsWithHTTPClient(client *http.Client) *FindConfigMaxHintWindowInMsParams { + + return &FindConfigMaxHintWindowInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigMaxHintWindowInMsParams contains all the parameters to send to the API endpoint +for the find config max hint window in ms operation typically these are written to a http.Request +*/ +type FindConfigMaxHintWindowInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config max hint window in ms params +func (o *FindConfigMaxHintWindowInMsParams) WithTimeout(timeout time.Duration) *FindConfigMaxHintWindowInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config max hint window in ms params +func (o *FindConfigMaxHintWindowInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config max hint window in ms params +func (o *FindConfigMaxHintWindowInMsParams) WithContext(ctx context.Context) *FindConfigMaxHintWindowInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config max hint window in ms params +func (o *FindConfigMaxHintWindowInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config max hint window in ms params +func (o *FindConfigMaxHintWindowInMsParams) WithHTTPClient(client *http.Client) *FindConfigMaxHintWindowInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config max hint window in ms params +func (o *FindConfigMaxHintWindowInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMaxHintWindowInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hint_window_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hint_window_in_ms_responses.go new file mode 100644 index 00000000000..4249efd02ce --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hint_window_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMaxHintWindowInMsReader is a Reader for the FindConfigMaxHintWindowInMs structure. +type FindConfigMaxHintWindowInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMaxHintWindowInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMaxHintWindowInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMaxHintWindowInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMaxHintWindowInMsOK creates a FindConfigMaxHintWindowInMsOK with default headers values +func NewFindConfigMaxHintWindowInMsOK() *FindConfigMaxHintWindowInMsOK { + return &FindConfigMaxHintWindowInMsOK{} +} + +/* +FindConfigMaxHintWindowInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigMaxHintWindowInMsOK struct { + Payload int64 +} + +func (o *FindConfigMaxHintWindowInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigMaxHintWindowInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMaxHintWindowInMsDefault creates a FindConfigMaxHintWindowInMsDefault with default headers values +func NewFindConfigMaxHintWindowInMsDefault(code int) *FindConfigMaxHintWindowInMsDefault { + return &FindConfigMaxHintWindowInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigMaxHintWindowInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMaxHintWindowInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config max hint window in ms default response +func (o *FindConfigMaxHintWindowInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMaxHintWindowInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMaxHintWindowInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMaxHintWindowInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hints_delivery_threads_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hints_delivery_threads_parameters.go new file mode 100644 index 00000000000..0897f87d5fa --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hints_delivery_threads_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMaxHintsDeliveryThreadsParams creates a new FindConfigMaxHintsDeliveryThreadsParams object +// with the default values initialized. +func NewFindConfigMaxHintsDeliveryThreadsParams() *FindConfigMaxHintsDeliveryThreadsParams { + + return &FindConfigMaxHintsDeliveryThreadsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMaxHintsDeliveryThreadsParamsWithTimeout creates a new FindConfigMaxHintsDeliveryThreadsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMaxHintsDeliveryThreadsParamsWithTimeout(timeout time.Duration) *FindConfigMaxHintsDeliveryThreadsParams { + + return &FindConfigMaxHintsDeliveryThreadsParams{ + + timeout: timeout, + } +} + +// NewFindConfigMaxHintsDeliveryThreadsParamsWithContext creates a new FindConfigMaxHintsDeliveryThreadsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMaxHintsDeliveryThreadsParamsWithContext(ctx context.Context) *FindConfigMaxHintsDeliveryThreadsParams { + + return &FindConfigMaxHintsDeliveryThreadsParams{ + + Context: ctx, + } +} + +// NewFindConfigMaxHintsDeliveryThreadsParamsWithHTTPClient creates a new FindConfigMaxHintsDeliveryThreadsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMaxHintsDeliveryThreadsParamsWithHTTPClient(client *http.Client) *FindConfigMaxHintsDeliveryThreadsParams { + + return &FindConfigMaxHintsDeliveryThreadsParams{ + HTTPClient: client, + } +} + +/* +FindConfigMaxHintsDeliveryThreadsParams contains all the parameters to send to the API endpoint +for the find config max hints delivery threads operation typically these are written to a http.Request +*/ +type FindConfigMaxHintsDeliveryThreadsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config max hints delivery threads params +func (o *FindConfigMaxHintsDeliveryThreadsParams) WithTimeout(timeout time.Duration) *FindConfigMaxHintsDeliveryThreadsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config max hints delivery threads params +func (o *FindConfigMaxHintsDeliveryThreadsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config max hints delivery threads params +func (o *FindConfigMaxHintsDeliveryThreadsParams) WithContext(ctx context.Context) *FindConfigMaxHintsDeliveryThreadsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config max hints delivery threads params +func (o *FindConfigMaxHintsDeliveryThreadsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config max hints delivery threads params +func (o *FindConfigMaxHintsDeliveryThreadsParams) WithHTTPClient(client *http.Client) *FindConfigMaxHintsDeliveryThreadsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config max hints delivery threads params +func (o *FindConfigMaxHintsDeliveryThreadsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMaxHintsDeliveryThreadsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hints_delivery_threads_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hints_delivery_threads_responses.go new file mode 100644 index 00000000000..a5f350f39a6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_max_hints_delivery_threads_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMaxHintsDeliveryThreadsReader is a Reader for the FindConfigMaxHintsDeliveryThreads structure. +type FindConfigMaxHintsDeliveryThreadsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMaxHintsDeliveryThreadsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMaxHintsDeliveryThreadsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMaxHintsDeliveryThreadsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMaxHintsDeliveryThreadsOK creates a FindConfigMaxHintsDeliveryThreadsOK with default headers values +func NewFindConfigMaxHintsDeliveryThreadsOK() *FindConfigMaxHintsDeliveryThreadsOK { + return &FindConfigMaxHintsDeliveryThreadsOK{} +} + +/* +FindConfigMaxHintsDeliveryThreadsOK handles this case with default header values. + +Config value +*/ +type FindConfigMaxHintsDeliveryThreadsOK struct { + Payload int64 +} + +func (o *FindConfigMaxHintsDeliveryThreadsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigMaxHintsDeliveryThreadsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMaxHintsDeliveryThreadsDefault creates a FindConfigMaxHintsDeliveryThreadsDefault with default headers values +func NewFindConfigMaxHintsDeliveryThreadsDefault(code int) *FindConfigMaxHintsDeliveryThreadsDefault { + return &FindConfigMaxHintsDeliveryThreadsDefault{ + _statusCode: code, + } +} + +/* +FindConfigMaxHintsDeliveryThreadsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMaxHintsDeliveryThreadsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config max hints delivery threads default response +func (o *FindConfigMaxHintsDeliveryThreadsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMaxHintsDeliveryThreadsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMaxHintsDeliveryThreadsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMaxHintsDeliveryThreadsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memory_allocator_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memory_allocator_parameters.go new file mode 100644 index 00000000000..f9f44bd14a8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memory_allocator_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMemoryAllocatorParams creates a new FindConfigMemoryAllocatorParams object +// with the default values initialized. +func NewFindConfigMemoryAllocatorParams() *FindConfigMemoryAllocatorParams { + + return &FindConfigMemoryAllocatorParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMemoryAllocatorParamsWithTimeout creates a new FindConfigMemoryAllocatorParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMemoryAllocatorParamsWithTimeout(timeout time.Duration) *FindConfigMemoryAllocatorParams { + + return &FindConfigMemoryAllocatorParams{ + + timeout: timeout, + } +} + +// NewFindConfigMemoryAllocatorParamsWithContext creates a new FindConfigMemoryAllocatorParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMemoryAllocatorParamsWithContext(ctx context.Context) *FindConfigMemoryAllocatorParams { + + return &FindConfigMemoryAllocatorParams{ + + Context: ctx, + } +} + +// NewFindConfigMemoryAllocatorParamsWithHTTPClient creates a new FindConfigMemoryAllocatorParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMemoryAllocatorParamsWithHTTPClient(client *http.Client) *FindConfigMemoryAllocatorParams { + + return &FindConfigMemoryAllocatorParams{ + HTTPClient: client, + } +} + +/* +FindConfigMemoryAllocatorParams contains all the parameters to send to the API endpoint +for the find config memory allocator operation typically these are written to a http.Request +*/ +type FindConfigMemoryAllocatorParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config memory allocator params +func (o *FindConfigMemoryAllocatorParams) WithTimeout(timeout time.Duration) *FindConfigMemoryAllocatorParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config memory allocator params +func (o *FindConfigMemoryAllocatorParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config memory allocator params +func (o *FindConfigMemoryAllocatorParams) WithContext(ctx context.Context) *FindConfigMemoryAllocatorParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config memory allocator params +func (o *FindConfigMemoryAllocatorParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config memory allocator params +func (o *FindConfigMemoryAllocatorParams) WithHTTPClient(client *http.Client) *FindConfigMemoryAllocatorParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config memory allocator params +func (o *FindConfigMemoryAllocatorParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMemoryAllocatorParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memory_allocator_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memory_allocator_responses.go new file mode 100644 index 00000000000..06c459c391c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memory_allocator_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMemoryAllocatorReader is a Reader for the FindConfigMemoryAllocator structure. +type FindConfigMemoryAllocatorReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMemoryAllocatorReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMemoryAllocatorOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMemoryAllocatorDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMemoryAllocatorOK creates a FindConfigMemoryAllocatorOK with default headers values +func NewFindConfigMemoryAllocatorOK() *FindConfigMemoryAllocatorOK { + return &FindConfigMemoryAllocatorOK{} +} + +/* +FindConfigMemoryAllocatorOK handles this case with default header values. + +Config value +*/ +type FindConfigMemoryAllocatorOK struct { + Payload string +} + +func (o *FindConfigMemoryAllocatorOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigMemoryAllocatorOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMemoryAllocatorDefault creates a FindConfigMemoryAllocatorDefault with default headers values +func NewFindConfigMemoryAllocatorDefault(code int) *FindConfigMemoryAllocatorDefault { + return &FindConfigMemoryAllocatorDefault{ + _statusCode: code, + } +} + +/* +FindConfigMemoryAllocatorDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMemoryAllocatorDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config memory allocator default response +func (o *FindConfigMemoryAllocatorDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMemoryAllocatorDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMemoryAllocatorDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMemoryAllocatorDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_allocation_type_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_allocation_type_parameters.go new file mode 100644 index 00000000000..68335b87053 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_allocation_type_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMemtableAllocationTypeParams creates a new FindConfigMemtableAllocationTypeParams object +// with the default values initialized. +func NewFindConfigMemtableAllocationTypeParams() *FindConfigMemtableAllocationTypeParams { + + return &FindConfigMemtableAllocationTypeParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMemtableAllocationTypeParamsWithTimeout creates a new FindConfigMemtableAllocationTypeParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMemtableAllocationTypeParamsWithTimeout(timeout time.Duration) *FindConfigMemtableAllocationTypeParams { + + return &FindConfigMemtableAllocationTypeParams{ + + timeout: timeout, + } +} + +// NewFindConfigMemtableAllocationTypeParamsWithContext creates a new FindConfigMemtableAllocationTypeParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMemtableAllocationTypeParamsWithContext(ctx context.Context) *FindConfigMemtableAllocationTypeParams { + + return &FindConfigMemtableAllocationTypeParams{ + + Context: ctx, + } +} + +// NewFindConfigMemtableAllocationTypeParamsWithHTTPClient creates a new FindConfigMemtableAllocationTypeParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMemtableAllocationTypeParamsWithHTTPClient(client *http.Client) *FindConfigMemtableAllocationTypeParams { + + return &FindConfigMemtableAllocationTypeParams{ + HTTPClient: client, + } +} + +/* +FindConfigMemtableAllocationTypeParams contains all the parameters to send to the API endpoint +for the find config memtable allocation type operation typically these are written to a http.Request +*/ +type FindConfigMemtableAllocationTypeParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config memtable allocation type params +func (o *FindConfigMemtableAllocationTypeParams) WithTimeout(timeout time.Duration) *FindConfigMemtableAllocationTypeParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config memtable allocation type params +func (o *FindConfigMemtableAllocationTypeParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config memtable allocation type params +func (o *FindConfigMemtableAllocationTypeParams) WithContext(ctx context.Context) *FindConfigMemtableAllocationTypeParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config memtable allocation type params +func (o *FindConfigMemtableAllocationTypeParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config memtable allocation type params +func (o *FindConfigMemtableAllocationTypeParams) WithHTTPClient(client *http.Client) *FindConfigMemtableAllocationTypeParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config memtable allocation type params +func (o *FindConfigMemtableAllocationTypeParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMemtableAllocationTypeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_allocation_type_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_allocation_type_responses.go new file mode 100644 index 00000000000..5c334e823ac --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_allocation_type_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMemtableAllocationTypeReader is a Reader for the FindConfigMemtableAllocationType structure. +type FindConfigMemtableAllocationTypeReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMemtableAllocationTypeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMemtableAllocationTypeOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMemtableAllocationTypeDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMemtableAllocationTypeOK creates a FindConfigMemtableAllocationTypeOK with default headers values +func NewFindConfigMemtableAllocationTypeOK() *FindConfigMemtableAllocationTypeOK { + return &FindConfigMemtableAllocationTypeOK{} +} + +/* +FindConfigMemtableAllocationTypeOK handles this case with default header values. + +Config value +*/ +type FindConfigMemtableAllocationTypeOK struct { + Payload string +} + +func (o *FindConfigMemtableAllocationTypeOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigMemtableAllocationTypeOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMemtableAllocationTypeDefault creates a FindConfigMemtableAllocationTypeDefault with default headers values +func NewFindConfigMemtableAllocationTypeDefault(code int) *FindConfigMemtableAllocationTypeDefault { + return &FindConfigMemtableAllocationTypeDefault{ + _statusCode: code, + } +} + +/* +FindConfigMemtableAllocationTypeDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMemtableAllocationTypeDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config memtable allocation type default response +func (o *FindConfigMemtableAllocationTypeDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMemtableAllocationTypeDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMemtableAllocationTypeDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMemtableAllocationTypeDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_cleanup_threshold_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_cleanup_threshold_parameters.go new file mode 100644 index 00000000000..3c924bd1560 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_cleanup_threshold_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMemtableCleanupThresholdParams creates a new FindConfigMemtableCleanupThresholdParams object +// with the default values initialized. +func NewFindConfigMemtableCleanupThresholdParams() *FindConfigMemtableCleanupThresholdParams { + + return &FindConfigMemtableCleanupThresholdParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMemtableCleanupThresholdParamsWithTimeout creates a new FindConfigMemtableCleanupThresholdParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMemtableCleanupThresholdParamsWithTimeout(timeout time.Duration) *FindConfigMemtableCleanupThresholdParams { + + return &FindConfigMemtableCleanupThresholdParams{ + + timeout: timeout, + } +} + +// NewFindConfigMemtableCleanupThresholdParamsWithContext creates a new FindConfigMemtableCleanupThresholdParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMemtableCleanupThresholdParamsWithContext(ctx context.Context) *FindConfigMemtableCleanupThresholdParams { + + return &FindConfigMemtableCleanupThresholdParams{ + + Context: ctx, + } +} + +// NewFindConfigMemtableCleanupThresholdParamsWithHTTPClient creates a new FindConfigMemtableCleanupThresholdParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMemtableCleanupThresholdParamsWithHTTPClient(client *http.Client) *FindConfigMemtableCleanupThresholdParams { + + return &FindConfigMemtableCleanupThresholdParams{ + HTTPClient: client, + } +} + +/* +FindConfigMemtableCleanupThresholdParams contains all the parameters to send to the API endpoint +for the find config memtable cleanup threshold operation typically these are written to a http.Request +*/ +type FindConfigMemtableCleanupThresholdParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config memtable cleanup threshold params +func (o *FindConfigMemtableCleanupThresholdParams) WithTimeout(timeout time.Duration) *FindConfigMemtableCleanupThresholdParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config memtable cleanup threshold params +func (o *FindConfigMemtableCleanupThresholdParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config memtable cleanup threshold params +func (o *FindConfigMemtableCleanupThresholdParams) WithContext(ctx context.Context) *FindConfigMemtableCleanupThresholdParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config memtable cleanup threshold params +func (o *FindConfigMemtableCleanupThresholdParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config memtable cleanup threshold params +func (o *FindConfigMemtableCleanupThresholdParams) WithHTTPClient(client *http.Client) *FindConfigMemtableCleanupThresholdParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config memtable cleanup threshold params +func (o *FindConfigMemtableCleanupThresholdParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMemtableCleanupThresholdParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_cleanup_threshold_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_cleanup_threshold_responses.go new file mode 100644 index 00000000000..3c68ed4526e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_cleanup_threshold_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMemtableCleanupThresholdReader is a Reader for the FindConfigMemtableCleanupThreshold structure. +type FindConfigMemtableCleanupThresholdReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMemtableCleanupThresholdReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMemtableCleanupThresholdOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMemtableCleanupThresholdDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMemtableCleanupThresholdOK creates a FindConfigMemtableCleanupThresholdOK with default headers values +func NewFindConfigMemtableCleanupThresholdOK() *FindConfigMemtableCleanupThresholdOK { + return &FindConfigMemtableCleanupThresholdOK{} +} + +/* +FindConfigMemtableCleanupThresholdOK handles this case with default header values. + +Config value +*/ +type FindConfigMemtableCleanupThresholdOK struct { + Payload float64 +} + +func (o *FindConfigMemtableCleanupThresholdOK) GetPayload() float64 { + return o.Payload +} + +func (o *FindConfigMemtableCleanupThresholdOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMemtableCleanupThresholdDefault creates a FindConfigMemtableCleanupThresholdDefault with default headers values +func NewFindConfigMemtableCleanupThresholdDefault(code int) *FindConfigMemtableCleanupThresholdDefault { + return &FindConfigMemtableCleanupThresholdDefault{ + _statusCode: code, + } +} + +/* +FindConfigMemtableCleanupThresholdDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMemtableCleanupThresholdDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config memtable cleanup threshold default response +func (o *FindConfigMemtableCleanupThresholdDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMemtableCleanupThresholdDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMemtableCleanupThresholdDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMemtableCleanupThresholdDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_queue_size_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_queue_size_parameters.go new file mode 100644 index 00000000000..09c8c26e95f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_queue_size_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMemtableFlushQueueSizeParams creates a new FindConfigMemtableFlushQueueSizeParams object +// with the default values initialized. +func NewFindConfigMemtableFlushQueueSizeParams() *FindConfigMemtableFlushQueueSizeParams { + + return &FindConfigMemtableFlushQueueSizeParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMemtableFlushQueueSizeParamsWithTimeout creates a new FindConfigMemtableFlushQueueSizeParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMemtableFlushQueueSizeParamsWithTimeout(timeout time.Duration) *FindConfigMemtableFlushQueueSizeParams { + + return &FindConfigMemtableFlushQueueSizeParams{ + + timeout: timeout, + } +} + +// NewFindConfigMemtableFlushQueueSizeParamsWithContext creates a new FindConfigMemtableFlushQueueSizeParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMemtableFlushQueueSizeParamsWithContext(ctx context.Context) *FindConfigMemtableFlushQueueSizeParams { + + return &FindConfigMemtableFlushQueueSizeParams{ + + Context: ctx, + } +} + +// NewFindConfigMemtableFlushQueueSizeParamsWithHTTPClient creates a new FindConfigMemtableFlushQueueSizeParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMemtableFlushQueueSizeParamsWithHTTPClient(client *http.Client) *FindConfigMemtableFlushQueueSizeParams { + + return &FindConfigMemtableFlushQueueSizeParams{ + HTTPClient: client, + } +} + +/* +FindConfigMemtableFlushQueueSizeParams contains all the parameters to send to the API endpoint +for the find config memtable flush queue size operation typically these are written to a http.Request +*/ +type FindConfigMemtableFlushQueueSizeParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config memtable flush queue size params +func (o *FindConfigMemtableFlushQueueSizeParams) WithTimeout(timeout time.Duration) *FindConfigMemtableFlushQueueSizeParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config memtable flush queue size params +func (o *FindConfigMemtableFlushQueueSizeParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config memtable flush queue size params +func (o *FindConfigMemtableFlushQueueSizeParams) WithContext(ctx context.Context) *FindConfigMemtableFlushQueueSizeParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config memtable flush queue size params +func (o *FindConfigMemtableFlushQueueSizeParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config memtable flush queue size params +func (o *FindConfigMemtableFlushQueueSizeParams) WithHTTPClient(client *http.Client) *FindConfigMemtableFlushQueueSizeParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config memtable flush queue size params +func (o *FindConfigMemtableFlushQueueSizeParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMemtableFlushQueueSizeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_queue_size_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_queue_size_responses.go new file mode 100644 index 00000000000..e042c8aa148 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_queue_size_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMemtableFlushQueueSizeReader is a Reader for the FindConfigMemtableFlushQueueSize structure. +type FindConfigMemtableFlushQueueSizeReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMemtableFlushQueueSizeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMemtableFlushQueueSizeOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMemtableFlushQueueSizeDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMemtableFlushQueueSizeOK creates a FindConfigMemtableFlushQueueSizeOK with default headers values +func NewFindConfigMemtableFlushQueueSizeOK() *FindConfigMemtableFlushQueueSizeOK { + return &FindConfigMemtableFlushQueueSizeOK{} +} + +/* +FindConfigMemtableFlushQueueSizeOK handles this case with default header values. + +Config value +*/ +type FindConfigMemtableFlushQueueSizeOK struct { + Payload int64 +} + +func (o *FindConfigMemtableFlushQueueSizeOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigMemtableFlushQueueSizeOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMemtableFlushQueueSizeDefault creates a FindConfigMemtableFlushQueueSizeDefault with default headers values +func NewFindConfigMemtableFlushQueueSizeDefault(code int) *FindConfigMemtableFlushQueueSizeDefault { + return &FindConfigMemtableFlushQueueSizeDefault{ + _statusCode: code, + } +} + +/* +FindConfigMemtableFlushQueueSizeDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMemtableFlushQueueSizeDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config memtable flush queue size default response +func (o *FindConfigMemtableFlushQueueSizeDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMemtableFlushQueueSizeDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMemtableFlushQueueSizeDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMemtableFlushQueueSizeDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_static_shares_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_static_shares_parameters.go new file mode 100644 index 00000000000..930ecc05780 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_static_shares_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMemtableFlushStaticSharesParams creates a new FindConfigMemtableFlushStaticSharesParams object +// with the default values initialized. +func NewFindConfigMemtableFlushStaticSharesParams() *FindConfigMemtableFlushStaticSharesParams { + + return &FindConfigMemtableFlushStaticSharesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMemtableFlushStaticSharesParamsWithTimeout creates a new FindConfigMemtableFlushStaticSharesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMemtableFlushStaticSharesParamsWithTimeout(timeout time.Duration) *FindConfigMemtableFlushStaticSharesParams { + + return &FindConfigMemtableFlushStaticSharesParams{ + + timeout: timeout, + } +} + +// NewFindConfigMemtableFlushStaticSharesParamsWithContext creates a new FindConfigMemtableFlushStaticSharesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMemtableFlushStaticSharesParamsWithContext(ctx context.Context) *FindConfigMemtableFlushStaticSharesParams { + + return &FindConfigMemtableFlushStaticSharesParams{ + + Context: ctx, + } +} + +// NewFindConfigMemtableFlushStaticSharesParamsWithHTTPClient creates a new FindConfigMemtableFlushStaticSharesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMemtableFlushStaticSharesParamsWithHTTPClient(client *http.Client) *FindConfigMemtableFlushStaticSharesParams { + + return &FindConfigMemtableFlushStaticSharesParams{ + HTTPClient: client, + } +} + +/* +FindConfigMemtableFlushStaticSharesParams contains all the parameters to send to the API endpoint +for the find config memtable flush static shares operation typically these are written to a http.Request +*/ +type FindConfigMemtableFlushStaticSharesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config memtable flush static shares params +func (o *FindConfigMemtableFlushStaticSharesParams) WithTimeout(timeout time.Duration) *FindConfigMemtableFlushStaticSharesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config memtable flush static shares params +func (o *FindConfigMemtableFlushStaticSharesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config memtable flush static shares params +func (o *FindConfigMemtableFlushStaticSharesParams) WithContext(ctx context.Context) *FindConfigMemtableFlushStaticSharesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config memtable flush static shares params +func (o *FindConfigMemtableFlushStaticSharesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config memtable flush static shares params +func (o *FindConfigMemtableFlushStaticSharesParams) WithHTTPClient(client *http.Client) *FindConfigMemtableFlushStaticSharesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config memtable flush static shares params +func (o *FindConfigMemtableFlushStaticSharesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMemtableFlushStaticSharesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_static_shares_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_static_shares_responses.go new file mode 100644 index 00000000000..73a34edc879 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_static_shares_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMemtableFlushStaticSharesReader is a Reader for the FindConfigMemtableFlushStaticShares structure. +type FindConfigMemtableFlushStaticSharesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMemtableFlushStaticSharesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMemtableFlushStaticSharesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMemtableFlushStaticSharesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMemtableFlushStaticSharesOK creates a FindConfigMemtableFlushStaticSharesOK with default headers values +func NewFindConfigMemtableFlushStaticSharesOK() *FindConfigMemtableFlushStaticSharesOK { + return &FindConfigMemtableFlushStaticSharesOK{} +} + +/* +FindConfigMemtableFlushStaticSharesOK handles this case with default header values. + +Config value +*/ +type FindConfigMemtableFlushStaticSharesOK struct { + Payload float64 +} + +func (o *FindConfigMemtableFlushStaticSharesOK) GetPayload() float64 { + return o.Payload +} + +func (o *FindConfigMemtableFlushStaticSharesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMemtableFlushStaticSharesDefault creates a FindConfigMemtableFlushStaticSharesDefault with default headers values +func NewFindConfigMemtableFlushStaticSharesDefault(code int) *FindConfigMemtableFlushStaticSharesDefault { + return &FindConfigMemtableFlushStaticSharesDefault{ + _statusCode: code, + } +} + +/* +FindConfigMemtableFlushStaticSharesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMemtableFlushStaticSharesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config memtable flush static shares default response +func (o *FindConfigMemtableFlushStaticSharesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMemtableFlushStaticSharesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMemtableFlushStaticSharesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMemtableFlushStaticSharesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_writers_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_writers_parameters.go new file mode 100644 index 00000000000..e842a2f5f8e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_writers_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMemtableFlushWritersParams creates a new FindConfigMemtableFlushWritersParams object +// with the default values initialized. +func NewFindConfigMemtableFlushWritersParams() *FindConfigMemtableFlushWritersParams { + + return &FindConfigMemtableFlushWritersParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMemtableFlushWritersParamsWithTimeout creates a new FindConfigMemtableFlushWritersParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMemtableFlushWritersParamsWithTimeout(timeout time.Duration) *FindConfigMemtableFlushWritersParams { + + return &FindConfigMemtableFlushWritersParams{ + + timeout: timeout, + } +} + +// NewFindConfigMemtableFlushWritersParamsWithContext creates a new FindConfigMemtableFlushWritersParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMemtableFlushWritersParamsWithContext(ctx context.Context) *FindConfigMemtableFlushWritersParams { + + return &FindConfigMemtableFlushWritersParams{ + + Context: ctx, + } +} + +// NewFindConfigMemtableFlushWritersParamsWithHTTPClient creates a new FindConfigMemtableFlushWritersParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMemtableFlushWritersParamsWithHTTPClient(client *http.Client) *FindConfigMemtableFlushWritersParams { + + return &FindConfigMemtableFlushWritersParams{ + HTTPClient: client, + } +} + +/* +FindConfigMemtableFlushWritersParams contains all the parameters to send to the API endpoint +for the find config memtable flush writers operation typically these are written to a http.Request +*/ +type FindConfigMemtableFlushWritersParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config memtable flush writers params +func (o *FindConfigMemtableFlushWritersParams) WithTimeout(timeout time.Duration) *FindConfigMemtableFlushWritersParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config memtable flush writers params +func (o *FindConfigMemtableFlushWritersParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config memtable flush writers params +func (o *FindConfigMemtableFlushWritersParams) WithContext(ctx context.Context) *FindConfigMemtableFlushWritersParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config memtable flush writers params +func (o *FindConfigMemtableFlushWritersParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config memtable flush writers params +func (o *FindConfigMemtableFlushWritersParams) WithHTTPClient(client *http.Client) *FindConfigMemtableFlushWritersParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config memtable flush writers params +func (o *FindConfigMemtableFlushWritersParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMemtableFlushWritersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_writers_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_writers_responses.go new file mode 100644 index 00000000000..5e06e445190 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_flush_writers_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMemtableFlushWritersReader is a Reader for the FindConfigMemtableFlushWriters structure. +type FindConfigMemtableFlushWritersReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMemtableFlushWritersReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMemtableFlushWritersOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMemtableFlushWritersDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMemtableFlushWritersOK creates a FindConfigMemtableFlushWritersOK with default headers values +func NewFindConfigMemtableFlushWritersOK() *FindConfigMemtableFlushWritersOK { + return &FindConfigMemtableFlushWritersOK{} +} + +/* +FindConfigMemtableFlushWritersOK handles this case with default header values. + +Config value +*/ +type FindConfigMemtableFlushWritersOK struct { + Payload int64 +} + +func (o *FindConfigMemtableFlushWritersOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigMemtableFlushWritersOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMemtableFlushWritersDefault creates a FindConfigMemtableFlushWritersDefault with default headers values +func NewFindConfigMemtableFlushWritersDefault(code int) *FindConfigMemtableFlushWritersDefault { + return &FindConfigMemtableFlushWritersDefault{ + _statusCode: code, + } +} + +/* +FindConfigMemtableFlushWritersDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMemtableFlushWritersDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config memtable flush writers default response +func (o *FindConfigMemtableFlushWritersDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMemtableFlushWritersDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMemtableFlushWritersDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMemtableFlushWritersDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_heap_space_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_heap_space_in_mb_parameters.go new file mode 100644 index 00000000000..eab74d9684a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_heap_space_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMemtableHeapSpaceInMbParams creates a new FindConfigMemtableHeapSpaceInMbParams object +// with the default values initialized. +func NewFindConfigMemtableHeapSpaceInMbParams() *FindConfigMemtableHeapSpaceInMbParams { + + return &FindConfigMemtableHeapSpaceInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMemtableHeapSpaceInMbParamsWithTimeout creates a new FindConfigMemtableHeapSpaceInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMemtableHeapSpaceInMbParamsWithTimeout(timeout time.Duration) *FindConfigMemtableHeapSpaceInMbParams { + + return &FindConfigMemtableHeapSpaceInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigMemtableHeapSpaceInMbParamsWithContext creates a new FindConfigMemtableHeapSpaceInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMemtableHeapSpaceInMbParamsWithContext(ctx context.Context) *FindConfigMemtableHeapSpaceInMbParams { + + return &FindConfigMemtableHeapSpaceInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigMemtableHeapSpaceInMbParamsWithHTTPClient creates a new FindConfigMemtableHeapSpaceInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMemtableHeapSpaceInMbParamsWithHTTPClient(client *http.Client) *FindConfigMemtableHeapSpaceInMbParams { + + return &FindConfigMemtableHeapSpaceInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigMemtableHeapSpaceInMbParams contains all the parameters to send to the API endpoint +for the find config memtable heap space in mb operation typically these are written to a http.Request +*/ +type FindConfigMemtableHeapSpaceInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config memtable heap space in mb params +func (o *FindConfigMemtableHeapSpaceInMbParams) WithTimeout(timeout time.Duration) *FindConfigMemtableHeapSpaceInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config memtable heap space in mb params +func (o *FindConfigMemtableHeapSpaceInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config memtable heap space in mb params +func (o *FindConfigMemtableHeapSpaceInMbParams) WithContext(ctx context.Context) *FindConfigMemtableHeapSpaceInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config memtable heap space in mb params +func (o *FindConfigMemtableHeapSpaceInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config memtable heap space in mb params +func (o *FindConfigMemtableHeapSpaceInMbParams) WithHTTPClient(client *http.Client) *FindConfigMemtableHeapSpaceInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config memtable heap space in mb params +func (o *FindConfigMemtableHeapSpaceInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMemtableHeapSpaceInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_heap_space_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_heap_space_in_mb_responses.go new file mode 100644 index 00000000000..8b49e2e3bdb --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_heap_space_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMemtableHeapSpaceInMbReader is a Reader for the FindConfigMemtableHeapSpaceInMb structure. +type FindConfigMemtableHeapSpaceInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMemtableHeapSpaceInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMemtableHeapSpaceInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMemtableHeapSpaceInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMemtableHeapSpaceInMbOK creates a FindConfigMemtableHeapSpaceInMbOK with default headers values +func NewFindConfigMemtableHeapSpaceInMbOK() *FindConfigMemtableHeapSpaceInMbOK { + return &FindConfigMemtableHeapSpaceInMbOK{} +} + +/* +FindConfigMemtableHeapSpaceInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigMemtableHeapSpaceInMbOK struct { + Payload int64 +} + +func (o *FindConfigMemtableHeapSpaceInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigMemtableHeapSpaceInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMemtableHeapSpaceInMbDefault creates a FindConfigMemtableHeapSpaceInMbDefault with default headers values +func NewFindConfigMemtableHeapSpaceInMbDefault(code int) *FindConfigMemtableHeapSpaceInMbDefault { + return &FindConfigMemtableHeapSpaceInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigMemtableHeapSpaceInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMemtableHeapSpaceInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config memtable heap space in mb default response +func (o *FindConfigMemtableHeapSpaceInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMemtableHeapSpaceInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMemtableHeapSpaceInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMemtableHeapSpaceInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_offheap_space_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_offheap_space_in_mb_parameters.go new file mode 100644 index 00000000000..5a5beab8b53 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_offheap_space_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMemtableOffheapSpaceInMbParams creates a new FindConfigMemtableOffheapSpaceInMbParams object +// with the default values initialized. +func NewFindConfigMemtableOffheapSpaceInMbParams() *FindConfigMemtableOffheapSpaceInMbParams { + + return &FindConfigMemtableOffheapSpaceInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMemtableOffheapSpaceInMbParamsWithTimeout creates a new FindConfigMemtableOffheapSpaceInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMemtableOffheapSpaceInMbParamsWithTimeout(timeout time.Duration) *FindConfigMemtableOffheapSpaceInMbParams { + + return &FindConfigMemtableOffheapSpaceInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigMemtableOffheapSpaceInMbParamsWithContext creates a new FindConfigMemtableOffheapSpaceInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMemtableOffheapSpaceInMbParamsWithContext(ctx context.Context) *FindConfigMemtableOffheapSpaceInMbParams { + + return &FindConfigMemtableOffheapSpaceInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigMemtableOffheapSpaceInMbParamsWithHTTPClient creates a new FindConfigMemtableOffheapSpaceInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMemtableOffheapSpaceInMbParamsWithHTTPClient(client *http.Client) *FindConfigMemtableOffheapSpaceInMbParams { + + return &FindConfigMemtableOffheapSpaceInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigMemtableOffheapSpaceInMbParams contains all the parameters to send to the API endpoint +for the find config memtable offheap space in mb operation typically these are written to a http.Request +*/ +type FindConfigMemtableOffheapSpaceInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config memtable offheap space in mb params +func (o *FindConfigMemtableOffheapSpaceInMbParams) WithTimeout(timeout time.Duration) *FindConfigMemtableOffheapSpaceInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config memtable offheap space in mb params +func (o *FindConfigMemtableOffheapSpaceInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config memtable offheap space in mb params +func (o *FindConfigMemtableOffheapSpaceInMbParams) WithContext(ctx context.Context) *FindConfigMemtableOffheapSpaceInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config memtable offheap space in mb params +func (o *FindConfigMemtableOffheapSpaceInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config memtable offheap space in mb params +func (o *FindConfigMemtableOffheapSpaceInMbParams) WithHTTPClient(client *http.Client) *FindConfigMemtableOffheapSpaceInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config memtable offheap space in mb params +func (o *FindConfigMemtableOffheapSpaceInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMemtableOffheapSpaceInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_offheap_space_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_offheap_space_in_mb_responses.go new file mode 100644 index 00000000000..a2b3fbc8209 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_offheap_space_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMemtableOffheapSpaceInMbReader is a Reader for the FindConfigMemtableOffheapSpaceInMb structure. +type FindConfigMemtableOffheapSpaceInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMemtableOffheapSpaceInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMemtableOffheapSpaceInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMemtableOffheapSpaceInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMemtableOffheapSpaceInMbOK creates a FindConfigMemtableOffheapSpaceInMbOK with default headers values +func NewFindConfigMemtableOffheapSpaceInMbOK() *FindConfigMemtableOffheapSpaceInMbOK { + return &FindConfigMemtableOffheapSpaceInMbOK{} +} + +/* +FindConfigMemtableOffheapSpaceInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigMemtableOffheapSpaceInMbOK struct { + Payload int64 +} + +func (o *FindConfigMemtableOffheapSpaceInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigMemtableOffheapSpaceInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMemtableOffheapSpaceInMbDefault creates a FindConfigMemtableOffheapSpaceInMbDefault with default headers values +func NewFindConfigMemtableOffheapSpaceInMbDefault(code int) *FindConfigMemtableOffheapSpaceInMbDefault { + return &FindConfigMemtableOffheapSpaceInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigMemtableOffheapSpaceInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMemtableOffheapSpaceInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config memtable offheap space in mb default response +func (o *FindConfigMemtableOffheapSpaceInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMemtableOffheapSpaceInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMemtableOffheapSpaceInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMemtableOffheapSpaceInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_total_space_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_total_space_in_mb_parameters.go new file mode 100644 index 00000000000..5523e5e8a3e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_total_space_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMemtableTotalSpaceInMbParams creates a new FindConfigMemtableTotalSpaceInMbParams object +// with the default values initialized. +func NewFindConfigMemtableTotalSpaceInMbParams() *FindConfigMemtableTotalSpaceInMbParams { + + return &FindConfigMemtableTotalSpaceInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMemtableTotalSpaceInMbParamsWithTimeout creates a new FindConfigMemtableTotalSpaceInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMemtableTotalSpaceInMbParamsWithTimeout(timeout time.Duration) *FindConfigMemtableTotalSpaceInMbParams { + + return &FindConfigMemtableTotalSpaceInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigMemtableTotalSpaceInMbParamsWithContext creates a new FindConfigMemtableTotalSpaceInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMemtableTotalSpaceInMbParamsWithContext(ctx context.Context) *FindConfigMemtableTotalSpaceInMbParams { + + return &FindConfigMemtableTotalSpaceInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigMemtableTotalSpaceInMbParamsWithHTTPClient creates a new FindConfigMemtableTotalSpaceInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMemtableTotalSpaceInMbParamsWithHTTPClient(client *http.Client) *FindConfigMemtableTotalSpaceInMbParams { + + return &FindConfigMemtableTotalSpaceInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigMemtableTotalSpaceInMbParams contains all the parameters to send to the API endpoint +for the find config memtable total space in mb operation typically these are written to a http.Request +*/ +type FindConfigMemtableTotalSpaceInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config memtable total space in mb params +func (o *FindConfigMemtableTotalSpaceInMbParams) WithTimeout(timeout time.Duration) *FindConfigMemtableTotalSpaceInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config memtable total space in mb params +func (o *FindConfigMemtableTotalSpaceInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config memtable total space in mb params +func (o *FindConfigMemtableTotalSpaceInMbParams) WithContext(ctx context.Context) *FindConfigMemtableTotalSpaceInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config memtable total space in mb params +func (o *FindConfigMemtableTotalSpaceInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config memtable total space in mb params +func (o *FindConfigMemtableTotalSpaceInMbParams) WithHTTPClient(client *http.Client) *FindConfigMemtableTotalSpaceInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config memtable total space in mb params +func (o *FindConfigMemtableTotalSpaceInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMemtableTotalSpaceInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_total_space_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_total_space_in_mb_responses.go new file mode 100644 index 00000000000..40d628f4d7d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_memtable_total_space_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMemtableTotalSpaceInMbReader is a Reader for the FindConfigMemtableTotalSpaceInMb structure. +type FindConfigMemtableTotalSpaceInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMemtableTotalSpaceInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMemtableTotalSpaceInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMemtableTotalSpaceInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMemtableTotalSpaceInMbOK creates a FindConfigMemtableTotalSpaceInMbOK with default headers values +func NewFindConfigMemtableTotalSpaceInMbOK() *FindConfigMemtableTotalSpaceInMbOK { + return &FindConfigMemtableTotalSpaceInMbOK{} +} + +/* +FindConfigMemtableTotalSpaceInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigMemtableTotalSpaceInMbOK struct { + Payload int64 +} + +func (o *FindConfigMemtableTotalSpaceInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigMemtableTotalSpaceInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMemtableTotalSpaceInMbDefault creates a FindConfigMemtableTotalSpaceInMbDefault with default headers values +func NewFindConfigMemtableTotalSpaceInMbDefault(code int) *FindConfigMemtableTotalSpaceInMbDefault { + return &FindConfigMemtableTotalSpaceInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigMemtableTotalSpaceInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMemtableTotalSpaceInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config memtable total space in mb default response +func (o *FindConfigMemtableTotalSpaceInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMemtableTotalSpaceInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMemtableTotalSpaceInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMemtableTotalSpaceInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_murmur3_partitioner_ignore_msb_bits_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_murmur3_partitioner_ignore_msb_bits_parameters.go new file mode 100644 index 00000000000..7e6f73ed766 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_murmur3_partitioner_ignore_msb_bits_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigMurmur3PartitionerIgnoreMsbBitsParams creates a new FindConfigMurmur3PartitionerIgnoreMsbBitsParams object +// with the default values initialized. +func NewFindConfigMurmur3PartitionerIgnoreMsbBitsParams() *FindConfigMurmur3PartitionerIgnoreMsbBitsParams { + + return &FindConfigMurmur3PartitionerIgnoreMsbBitsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigMurmur3PartitionerIgnoreMsbBitsParamsWithTimeout creates a new FindConfigMurmur3PartitionerIgnoreMsbBitsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigMurmur3PartitionerIgnoreMsbBitsParamsWithTimeout(timeout time.Duration) *FindConfigMurmur3PartitionerIgnoreMsbBitsParams { + + return &FindConfigMurmur3PartitionerIgnoreMsbBitsParams{ + + timeout: timeout, + } +} + +// NewFindConfigMurmur3PartitionerIgnoreMsbBitsParamsWithContext creates a new FindConfigMurmur3PartitionerIgnoreMsbBitsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigMurmur3PartitionerIgnoreMsbBitsParamsWithContext(ctx context.Context) *FindConfigMurmur3PartitionerIgnoreMsbBitsParams { + + return &FindConfigMurmur3PartitionerIgnoreMsbBitsParams{ + + Context: ctx, + } +} + +// NewFindConfigMurmur3PartitionerIgnoreMsbBitsParamsWithHTTPClient creates a new FindConfigMurmur3PartitionerIgnoreMsbBitsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigMurmur3PartitionerIgnoreMsbBitsParamsWithHTTPClient(client *http.Client) *FindConfigMurmur3PartitionerIgnoreMsbBitsParams { + + return &FindConfigMurmur3PartitionerIgnoreMsbBitsParams{ + HTTPClient: client, + } +} + +/* +FindConfigMurmur3PartitionerIgnoreMsbBitsParams contains all the parameters to send to the API endpoint +for the find config murmur3 partitioner ignore msb bits operation typically these are written to a http.Request +*/ +type FindConfigMurmur3PartitionerIgnoreMsbBitsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config murmur3 partitioner ignore msb bits params +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsParams) WithTimeout(timeout time.Duration) *FindConfigMurmur3PartitionerIgnoreMsbBitsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config murmur3 partitioner ignore msb bits params +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config murmur3 partitioner ignore msb bits params +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsParams) WithContext(ctx context.Context) *FindConfigMurmur3PartitionerIgnoreMsbBitsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config murmur3 partitioner ignore msb bits params +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config murmur3 partitioner ignore msb bits params +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsParams) WithHTTPClient(client *http.Client) *FindConfigMurmur3PartitionerIgnoreMsbBitsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config murmur3 partitioner ignore msb bits params +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_murmur3_partitioner_ignore_msb_bits_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_murmur3_partitioner_ignore_msb_bits_responses.go new file mode 100644 index 00000000000..84725710e79 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_murmur3_partitioner_ignore_msb_bits_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigMurmur3PartitionerIgnoreMsbBitsReader is a Reader for the FindConfigMurmur3PartitionerIgnoreMsbBits structure. +type FindConfigMurmur3PartitionerIgnoreMsbBitsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigMurmur3PartitionerIgnoreMsbBitsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigMurmur3PartitionerIgnoreMsbBitsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigMurmur3PartitionerIgnoreMsbBitsOK creates a FindConfigMurmur3PartitionerIgnoreMsbBitsOK with default headers values +func NewFindConfigMurmur3PartitionerIgnoreMsbBitsOK() *FindConfigMurmur3PartitionerIgnoreMsbBitsOK { + return &FindConfigMurmur3PartitionerIgnoreMsbBitsOK{} +} + +/* +FindConfigMurmur3PartitionerIgnoreMsbBitsOK handles this case with default header values. + +Config value +*/ +type FindConfigMurmur3PartitionerIgnoreMsbBitsOK struct { + Payload int64 +} + +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigMurmur3PartitionerIgnoreMsbBitsDefault creates a FindConfigMurmur3PartitionerIgnoreMsbBitsDefault with default headers values +func NewFindConfigMurmur3PartitionerIgnoreMsbBitsDefault(code int) *FindConfigMurmur3PartitionerIgnoreMsbBitsDefault { + return &FindConfigMurmur3PartitionerIgnoreMsbBitsDefault{ + _statusCode: code, + } +} + +/* +FindConfigMurmur3PartitionerIgnoreMsbBitsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigMurmur3PartitionerIgnoreMsbBitsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config murmur3 partitioner ignore msb bits default response +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigMurmur3PartitionerIgnoreMsbBitsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_frame_size_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_frame_size_in_mb_parameters.go new file mode 100644 index 00000000000..a03d9e26e33 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_frame_size_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigNativeTransportMaxFrameSizeInMbParams creates a new FindConfigNativeTransportMaxFrameSizeInMbParams object +// with the default values initialized. +func NewFindConfigNativeTransportMaxFrameSizeInMbParams() *FindConfigNativeTransportMaxFrameSizeInMbParams { + + return &FindConfigNativeTransportMaxFrameSizeInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigNativeTransportMaxFrameSizeInMbParamsWithTimeout creates a new FindConfigNativeTransportMaxFrameSizeInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigNativeTransportMaxFrameSizeInMbParamsWithTimeout(timeout time.Duration) *FindConfigNativeTransportMaxFrameSizeInMbParams { + + return &FindConfigNativeTransportMaxFrameSizeInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigNativeTransportMaxFrameSizeInMbParamsWithContext creates a new FindConfigNativeTransportMaxFrameSizeInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigNativeTransportMaxFrameSizeInMbParamsWithContext(ctx context.Context) *FindConfigNativeTransportMaxFrameSizeInMbParams { + + return &FindConfigNativeTransportMaxFrameSizeInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigNativeTransportMaxFrameSizeInMbParamsWithHTTPClient creates a new FindConfigNativeTransportMaxFrameSizeInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigNativeTransportMaxFrameSizeInMbParamsWithHTTPClient(client *http.Client) *FindConfigNativeTransportMaxFrameSizeInMbParams { + + return &FindConfigNativeTransportMaxFrameSizeInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigNativeTransportMaxFrameSizeInMbParams contains all the parameters to send to the API endpoint +for the find config native transport max frame size in mb operation typically these are written to a http.Request +*/ +type FindConfigNativeTransportMaxFrameSizeInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config native transport max frame size in mb params +func (o *FindConfigNativeTransportMaxFrameSizeInMbParams) WithTimeout(timeout time.Duration) *FindConfigNativeTransportMaxFrameSizeInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config native transport max frame size in mb params +func (o *FindConfigNativeTransportMaxFrameSizeInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config native transport max frame size in mb params +func (o *FindConfigNativeTransportMaxFrameSizeInMbParams) WithContext(ctx context.Context) *FindConfigNativeTransportMaxFrameSizeInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config native transport max frame size in mb params +func (o *FindConfigNativeTransportMaxFrameSizeInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config native transport max frame size in mb params +func (o *FindConfigNativeTransportMaxFrameSizeInMbParams) WithHTTPClient(client *http.Client) *FindConfigNativeTransportMaxFrameSizeInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config native transport max frame size in mb params +func (o *FindConfigNativeTransportMaxFrameSizeInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigNativeTransportMaxFrameSizeInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_frame_size_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_frame_size_in_mb_responses.go new file mode 100644 index 00000000000..54767d6cfb0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_frame_size_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigNativeTransportMaxFrameSizeInMbReader is a Reader for the FindConfigNativeTransportMaxFrameSizeInMb structure. +type FindConfigNativeTransportMaxFrameSizeInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigNativeTransportMaxFrameSizeInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigNativeTransportMaxFrameSizeInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigNativeTransportMaxFrameSizeInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigNativeTransportMaxFrameSizeInMbOK creates a FindConfigNativeTransportMaxFrameSizeInMbOK with default headers values +func NewFindConfigNativeTransportMaxFrameSizeInMbOK() *FindConfigNativeTransportMaxFrameSizeInMbOK { + return &FindConfigNativeTransportMaxFrameSizeInMbOK{} +} + +/* +FindConfigNativeTransportMaxFrameSizeInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigNativeTransportMaxFrameSizeInMbOK struct { + Payload int64 +} + +func (o *FindConfigNativeTransportMaxFrameSizeInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigNativeTransportMaxFrameSizeInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigNativeTransportMaxFrameSizeInMbDefault creates a FindConfigNativeTransportMaxFrameSizeInMbDefault with default headers values +func NewFindConfigNativeTransportMaxFrameSizeInMbDefault(code int) *FindConfigNativeTransportMaxFrameSizeInMbDefault { + return &FindConfigNativeTransportMaxFrameSizeInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigNativeTransportMaxFrameSizeInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigNativeTransportMaxFrameSizeInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config native transport max frame size in mb default response +func (o *FindConfigNativeTransportMaxFrameSizeInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigNativeTransportMaxFrameSizeInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigNativeTransportMaxFrameSizeInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigNativeTransportMaxFrameSizeInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_threads_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_threads_parameters.go new file mode 100644 index 00000000000..4b46ce001a5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_threads_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigNativeTransportMaxThreadsParams creates a new FindConfigNativeTransportMaxThreadsParams object +// with the default values initialized. +func NewFindConfigNativeTransportMaxThreadsParams() *FindConfigNativeTransportMaxThreadsParams { + + return &FindConfigNativeTransportMaxThreadsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigNativeTransportMaxThreadsParamsWithTimeout creates a new FindConfigNativeTransportMaxThreadsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigNativeTransportMaxThreadsParamsWithTimeout(timeout time.Duration) *FindConfigNativeTransportMaxThreadsParams { + + return &FindConfigNativeTransportMaxThreadsParams{ + + timeout: timeout, + } +} + +// NewFindConfigNativeTransportMaxThreadsParamsWithContext creates a new FindConfigNativeTransportMaxThreadsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigNativeTransportMaxThreadsParamsWithContext(ctx context.Context) *FindConfigNativeTransportMaxThreadsParams { + + return &FindConfigNativeTransportMaxThreadsParams{ + + Context: ctx, + } +} + +// NewFindConfigNativeTransportMaxThreadsParamsWithHTTPClient creates a new FindConfigNativeTransportMaxThreadsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigNativeTransportMaxThreadsParamsWithHTTPClient(client *http.Client) *FindConfigNativeTransportMaxThreadsParams { + + return &FindConfigNativeTransportMaxThreadsParams{ + HTTPClient: client, + } +} + +/* +FindConfigNativeTransportMaxThreadsParams contains all the parameters to send to the API endpoint +for the find config native transport max threads operation typically these are written to a http.Request +*/ +type FindConfigNativeTransportMaxThreadsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config native transport max threads params +func (o *FindConfigNativeTransportMaxThreadsParams) WithTimeout(timeout time.Duration) *FindConfigNativeTransportMaxThreadsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config native transport max threads params +func (o *FindConfigNativeTransportMaxThreadsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config native transport max threads params +func (o *FindConfigNativeTransportMaxThreadsParams) WithContext(ctx context.Context) *FindConfigNativeTransportMaxThreadsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config native transport max threads params +func (o *FindConfigNativeTransportMaxThreadsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config native transport max threads params +func (o *FindConfigNativeTransportMaxThreadsParams) WithHTTPClient(client *http.Client) *FindConfigNativeTransportMaxThreadsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config native transport max threads params +func (o *FindConfigNativeTransportMaxThreadsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigNativeTransportMaxThreadsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_threads_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_threads_responses.go new file mode 100644 index 00000000000..dbbb10543fc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_max_threads_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigNativeTransportMaxThreadsReader is a Reader for the FindConfigNativeTransportMaxThreads structure. +type FindConfigNativeTransportMaxThreadsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigNativeTransportMaxThreadsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigNativeTransportMaxThreadsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigNativeTransportMaxThreadsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigNativeTransportMaxThreadsOK creates a FindConfigNativeTransportMaxThreadsOK with default headers values +func NewFindConfigNativeTransportMaxThreadsOK() *FindConfigNativeTransportMaxThreadsOK { + return &FindConfigNativeTransportMaxThreadsOK{} +} + +/* +FindConfigNativeTransportMaxThreadsOK handles this case with default header values. + +Config value +*/ +type FindConfigNativeTransportMaxThreadsOK struct { + Payload int64 +} + +func (o *FindConfigNativeTransportMaxThreadsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigNativeTransportMaxThreadsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigNativeTransportMaxThreadsDefault creates a FindConfigNativeTransportMaxThreadsDefault with default headers values +func NewFindConfigNativeTransportMaxThreadsDefault(code int) *FindConfigNativeTransportMaxThreadsDefault { + return &FindConfigNativeTransportMaxThreadsDefault{ + _statusCode: code, + } +} + +/* +FindConfigNativeTransportMaxThreadsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigNativeTransportMaxThreadsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config native transport max threads default response +func (o *FindConfigNativeTransportMaxThreadsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigNativeTransportMaxThreadsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigNativeTransportMaxThreadsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigNativeTransportMaxThreadsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_parameters.go new file mode 100644 index 00000000000..55b929caf15 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigNativeTransportPortParams creates a new FindConfigNativeTransportPortParams object +// with the default values initialized. +func NewFindConfigNativeTransportPortParams() *FindConfigNativeTransportPortParams { + + return &FindConfigNativeTransportPortParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigNativeTransportPortParamsWithTimeout creates a new FindConfigNativeTransportPortParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigNativeTransportPortParamsWithTimeout(timeout time.Duration) *FindConfigNativeTransportPortParams { + + return &FindConfigNativeTransportPortParams{ + + timeout: timeout, + } +} + +// NewFindConfigNativeTransportPortParamsWithContext creates a new FindConfigNativeTransportPortParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigNativeTransportPortParamsWithContext(ctx context.Context) *FindConfigNativeTransportPortParams { + + return &FindConfigNativeTransportPortParams{ + + Context: ctx, + } +} + +// NewFindConfigNativeTransportPortParamsWithHTTPClient creates a new FindConfigNativeTransportPortParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigNativeTransportPortParamsWithHTTPClient(client *http.Client) *FindConfigNativeTransportPortParams { + + return &FindConfigNativeTransportPortParams{ + HTTPClient: client, + } +} + +/* +FindConfigNativeTransportPortParams contains all the parameters to send to the API endpoint +for the find config native transport port operation typically these are written to a http.Request +*/ +type FindConfigNativeTransportPortParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config native transport port params +func (o *FindConfigNativeTransportPortParams) WithTimeout(timeout time.Duration) *FindConfigNativeTransportPortParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config native transport port params +func (o *FindConfigNativeTransportPortParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config native transport port params +func (o *FindConfigNativeTransportPortParams) WithContext(ctx context.Context) *FindConfigNativeTransportPortParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config native transport port params +func (o *FindConfigNativeTransportPortParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config native transport port params +func (o *FindConfigNativeTransportPortParams) WithHTTPClient(client *http.Client) *FindConfigNativeTransportPortParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config native transport port params +func (o *FindConfigNativeTransportPortParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigNativeTransportPortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_responses.go new file mode 100644 index 00000000000..d0ffe8af1e2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigNativeTransportPortReader is a Reader for the FindConfigNativeTransportPort structure. +type FindConfigNativeTransportPortReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigNativeTransportPortReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigNativeTransportPortOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigNativeTransportPortDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigNativeTransportPortOK creates a FindConfigNativeTransportPortOK with default headers values +func NewFindConfigNativeTransportPortOK() *FindConfigNativeTransportPortOK { + return &FindConfigNativeTransportPortOK{} +} + +/* +FindConfigNativeTransportPortOK handles this case with default header values. + +Config value +*/ +type FindConfigNativeTransportPortOK struct { + Payload int64 +} + +func (o *FindConfigNativeTransportPortOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigNativeTransportPortOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigNativeTransportPortDefault creates a FindConfigNativeTransportPortDefault with default headers values +func NewFindConfigNativeTransportPortDefault(code int) *FindConfigNativeTransportPortDefault { + return &FindConfigNativeTransportPortDefault{ + _statusCode: code, + } +} + +/* +FindConfigNativeTransportPortDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigNativeTransportPortDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config native transport port default response +func (o *FindConfigNativeTransportPortDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigNativeTransportPortDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigNativeTransportPortDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigNativeTransportPortDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_ssl_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_ssl_parameters.go new file mode 100644 index 00000000000..a377569994f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_ssl_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigNativeTransportPortSslParams creates a new FindConfigNativeTransportPortSslParams object +// with the default values initialized. +func NewFindConfigNativeTransportPortSslParams() *FindConfigNativeTransportPortSslParams { + + return &FindConfigNativeTransportPortSslParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigNativeTransportPortSslParamsWithTimeout creates a new FindConfigNativeTransportPortSslParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigNativeTransportPortSslParamsWithTimeout(timeout time.Duration) *FindConfigNativeTransportPortSslParams { + + return &FindConfigNativeTransportPortSslParams{ + + timeout: timeout, + } +} + +// NewFindConfigNativeTransportPortSslParamsWithContext creates a new FindConfigNativeTransportPortSslParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigNativeTransportPortSslParamsWithContext(ctx context.Context) *FindConfigNativeTransportPortSslParams { + + return &FindConfigNativeTransportPortSslParams{ + + Context: ctx, + } +} + +// NewFindConfigNativeTransportPortSslParamsWithHTTPClient creates a new FindConfigNativeTransportPortSslParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigNativeTransportPortSslParamsWithHTTPClient(client *http.Client) *FindConfigNativeTransportPortSslParams { + + return &FindConfigNativeTransportPortSslParams{ + HTTPClient: client, + } +} + +/* +FindConfigNativeTransportPortSslParams contains all the parameters to send to the API endpoint +for the find config native transport port ssl operation typically these are written to a http.Request +*/ +type FindConfigNativeTransportPortSslParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config native transport port ssl params +func (o *FindConfigNativeTransportPortSslParams) WithTimeout(timeout time.Duration) *FindConfigNativeTransportPortSslParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config native transport port ssl params +func (o *FindConfigNativeTransportPortSslParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config native transport port ssl params +func (o *FindConfigNativeTransportPortSslParams) WithContext(ctx context.Context) *FindConfigNativeTransportPortSslParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config native transport port ssl params +func (o *FindConfigNativeTransportPortSslParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config native transport port ssl params +func (o *FindConfigNativeTransportPortSslParams) WithHTTPClient(client *http.Client) *FindConfigNativeTransportPortSslParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config native transport port ssl params +func (o *FindConfigNativeTransportPortSslParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigNativeTransportPortSslParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_ssl_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_ssl_responses.go new file mode 100644 index 00000000000..7813ea40b46 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_native_transport_port_ssl_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigNativeTransportPortSslReader is a Reader for the FindConfigNativeTransportPortSsl structure. +type FindConfigNativeTransportPortSslReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigNativeTransportPortSslReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigNativeTransportPortSslOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigNativeTransportPortSslDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigNativeTransportPortSslOK creates a FindConfigNativeTransportPortSslOK with default headers values +func NewFindConfigNativeTransportPortSslOK() *FindConfigNativeTransportPortSslOK { + return &FindConfigNativeTransportPortSslOK{} +} + +/* +FindConfigNativeTransportPortSslOK handles this case with default header values. + +Config value +*/ +type FindConfigNativeTransportPortSslOK struct { + Payload int64 +} + +func (o *FindConfigNativeTransportPortSslOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigNativeTransportPortSslOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigNativeTransportPortSslDefault creates a FindConfigNativeTransportPortSslDefault with default headers values +func NewFindConfigNativeTransportPortSslDefault(code int) *FindConfigNativeTransportPortSslDefault { + return &FindConfigNativeTransportPortSslDefault{ + _statusCode: code, + } +} + +/* +FindConfigNativeTransportPortSslDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigNativeTransportPortSslDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config native transport port ssl default response +func (o *FindConfigNativeTransportPortSslDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigNativeTransportPortSslDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigNativeTransportPortSslDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigNativeTransportPortSslDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_num_tokens_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_num_tokens_parameters.go new file mode 100644 index 00000000000..da86b11c183 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_num_tokens_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigNumTokensParams creates a new FindConfigNumTokensParams object +// with the default values initialized. +func NewFindConfigNumTokensParams() *FindConfigNumTokensParams { + + return &FindConfigNumTokensParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigNumTokensParamsWithTimeout creates a new FindConfigNumTokensParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigNumTokensParamsWithTimeout(timeout time.Duration) *FindConfigNumTokensParams { + + return &FindConfigNumTokensParams{ + + timeout: timeout, + } +} + +// NewFindConfigNumTokensParamsWithContext creates a new FindConfigNumTokensParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigNumTokensParamsWithContext(ctx context.Context) *FindConfigNumTokensParams { + + return &FindConfigNumTokensParams{ + + Context: ctx, + } +} + +// NewFindConfigNumTokensParamsWithHTTPClient creates a new FindConfigNumTokensParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigNumTokensParamsWithHTTPClient(client *http.Client) *FindConfigNumTokensParams { + + return &FindConfigNumTokensParams{ + HTTPClient: client, + } +} + +/* +FindConfigNumTokensParams contains all the parameters to send to the API endpoint +for the find config num tokens operation typically these are written to a http.Request +*/ +type FindConfigNumTokensParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config num tokens params +func (o *FindConfigNumTokensParams) WithTimeout(timeout time.Duration) *FindConfigNumTokensParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config num tokens params +func (o *FindConfigNumTokensParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config num tokens params +func (o *FindConfigNumTokensParams) WithContext(ctx context.Context) *FindConfigNumTokensParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config num tokens params +func (o *FindConfigNumTokensParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config num tokens params +func (o *FindConfigNumTokensParams) WithHTTPClient(client *http.Client) *FindConfigNumTokensParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config num tokens params +func (o *FindConfigNumTokensParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigNumTokensParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_num_tokens_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_num_tokens_responses.go new file mode 100644 index 00000000000..d882ad4506d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_num_tokens_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigNumTokensReader is a Reader for the FindConfigNumTokens structure. +type FindConfigNumTokensReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigNumTokensReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigNumTokensOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigNumTokensDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigNumTokensOK creates a FindConfigNumTokensOK with default headers values +func NewFindConfigNumTokensOK() *FindConfigNumTokensOK { + return &FindConfigNumTokensOK{} +} + +/* +FindConfigNumTokensOK handles this case with default header values. + +Config value +*/ +type FindConfigNumTokensOK struct { + Payload int64 +} + +func (o *FindConfigNumTokensOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigNumTokensOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigNumTokensDefault creates a FindConfigNumTokensDefault with default headers values +func NewFindConfigNumTokensDefault(code int) *FindConfigNumTokensDefault { + return &FindConfigNumTokensDefault{ + _statusCode: code, + } +} + +/* +FindConfigNumTokensDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigNumTokensDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config num tokens default response +func (o *FindConfigNumTokensDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigNumTokensDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigNumTokensDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigNumTokensDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_override_decommission_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_override_decommission_parameters.go new file mode 100644 index 00000000000..3845f4c65e9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_override_decommission_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigOverrideDecommissionParams creates a new FindConfigOverrideDecommissionParams object +// with the default values initialized. +func NewFindConfigOverrideDecommissionParams() *FindConfigOverrideDecommissionParams { + + return &FindConfigOverrideDecommissionParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigOverrideDecommissionParamsWithTimeout creates a new FindConfigOverrideDecommissionParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigOverrideDecommissionParamsWithTimeout(timeout time.Duration) *FindConfigOverrideDecommissionParams { + + return &FindConfigOverrideDecommissionParams{ + + timeout: timeout, + } +} + +// NewFindConfigOverrideDecommissionParamsWithContext creates a new FindConfigOverrideDecommissionParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigOverrideDecommissionParamsWithContext(ctx context.Context) *FindConfigOverrideDecommissionParams { + + return &FindConfigOverrideDecommissionParams{ + + Context: ctx, + } +} + +// NewFindConfigOverrideDecommissionParamsWithHTTPClient creates a new FindConfigOverrideDecommissionParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigOverrideDecommissionParamsWithHTTPClient(client *http.Client) *FindConfigOverrideDecommissionParams { + + return &FindConfigOverrideDecommissionParams{ + HTTPClient: client, + } +} + +/* +FindConfigOverrideDecommissionParams contains all the parameters to send to the API endpoint +for the find config override decommission operation typically these are written to a http.Request +*/ +type FindConfigOverrideDecommissionParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config override decommission params +func (o *FindConfigOverrideDecommissionParams) WithTimeout(timeout time.Duration) *FindConfigOverrideDecommissionParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config override decommission params +func (o *FindConfigOverrideDecommissionParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config override decommission params +func (o *FindConfigOverrideDecommissionParams) WithContext(ctx context.Context) *FindConfigOverrideDecommissionParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config override decommission params +func (o *FindConfigOverrideDecommissionParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config override decommission params +func (o *FindConfigOverrideDecommissionParams) WithHTTPClient(client *http.Client) *FindConfigOverrideDecommissionParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config override decommission params +func (o *FindConfigOverrideDecommissionParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigOverrideDecommissionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_override_decommission_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_override_decommission_responses.go new file mode 100644 index 00000000000..9eb8db659d1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_override_decommission_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigOverrideDecommissionReader is a Reader for the FindConfigOverrideDecommission structure. +type FindConfigOverrideDecommissionReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigOverrideDecommissionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigOverrideDecommissionOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigOverrideDecommissionDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigOverrideDecommissionOK creates a FindConfigOverrideDecommissionOK with default headers values +func NewFindConfigOverrideDecommissionOK() *FindConfigOverrideDecommissionOK { + return &FindConfigOverrideDecommissionOK{} +} + +/* +FindConfigOverrideDecommissionOK handles this case with default header values. + +Config value +*/ +type FindConfigOverrideDecommissionOK struct { + Payload bool +} + +func (o *FindConfigOverrideDecommissionOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigOverrideDecommissionOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigOverrideDecommissionDefault creates a FindConfigOverrideDecommissionDefault with default headers values +func NewFindConfigOverrideDecommissionDefault(code int) *FindConfigOverrideDecommissionDefault { + return &FindConfigOverrideDecommissionDefault{ + _statusCode: code, + } +} + +/* +FindConfigOverrideDecommissionDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigOverrideDecommissionDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config override decommission default response +func (o *FindConfigOverrideDecommissionDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigOverrideDecommissionDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigOverrideDecommissionDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigOverrideDecommissionDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_partitioner_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_partitioner_parameters.go new file mode 100644 index 00000000000..534356c548f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_partitioner_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigPartitionerParams creates a new FindConfigPartitionerParams object +// with the default values initialized. +func NewFindConfigPartitionerParams() *FindConfigPartitionerParams { + + return &FindConfigPartitionerParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigPartitionerParamsWithTimeout creates a new FindConfigPartitionerParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigPartitionerParamsWithTimeout(timeout time.Duration) *FindConfigPartitionerParams { + + return &FindConfigPartitionerParams{ + + timeout: timeout, + } +} + +// NewFindConfigPartitionerParamsWithContext creates a new FindConfigPartitionerParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigPartitionerParamsWithContext(ctx context.Context) *FindConfigPartitionerParams { + + return &FindConfigPartitionerParams{ + + Context: ctx, + } +} + +// NewFindConfigPartitionerParamsWithHTTPClient creates a new FindConfigPartitionerParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigPartitionerParamsWithHTTPClient(client *http.Client) *FindConfigPartitionerParams { + + return &FindConfigPartitionerParams{ + HTTPClient: client, + } +} + +/* +FindConfigPartitionerParams contains all the parameters to send to the API endpoint +for the find config partitioner operation typically these are written to a http.Request +*/ +type FindConfigPartitionerParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config partitioner params +func (o *FindConfigPartitionerParams) WithTimeout(timeout time.Duration) *FindConfigPartitionerParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config partitioner params +func (o *FindConfigPartitionerParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config partitioner params +func (o *FindConfigPartitionerParams) WithContext(ctx context.Context) *FindConfigPartitionerParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config partitioner params +func (o *FindConfigPartitionerParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config partitioner params +func (o *FindConfigPartitionerParams) WithHTTPClient(client *http.Client) *FindConfigPartitionerParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config partitioner params +func (o *FindConfigPartitionerParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigPartitionerParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_partitioner_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_partitioner_responses.go new file mode 100644 index 00000000000..4309568ff33 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_partitioner_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigPartitionerReader is a Reader for the FindConfigPartitioner structure. +type FindConfigPartitionerReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigPartitionerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigPartitionerOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigPartitionerDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigPartitionerOK creates a FindConfigPartitionerOK with default headers values +func NewFindConfigPartitionerOK() *FindConfigPartitionerOK { + return &FindConfigPartitionerOK{} +} + +/* +FindConfigPartitionerOK handles this case with default header values. + +Config value +*/ +type FindConfigPartitionerOK struct { + Payload string +} + +func (o *FindConfigPartitionerOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigPartitionerOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigPartitionerDefault creates a FindConfigPartitionerDefault with default headers values +func NewFindConfigPartitionerDefault(code int) *FindConfigPartitionerDefault { + return &FindConfigPartitionerDefault{ + _statusCode: code, + } +} + +/* +FindConfigPartitionerDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigPartitionerDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config partitioner default response +func (o *FindConfigPartitionerDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigPartitionerDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigPartitionerDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigPartitionerDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_cache_max_entries_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_cache_max_entries_parameters.go new file mode 100644 index 00000000000..c548d94d845 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_cache_max_entries_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigPermissionsCacheMaxEntriesParams creates a new FindConfigPermissionsCacheMaxEntriesParams object +// with the default values initialized. +func NewFindConfigPermissionsCacheMaxEntriesParams() *FindConfigPermissionsCacheMaxEntriesParams { + + return &FindConfigPermissionsCacheMaxEntriesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigPermissionsCacheMaxEntriesParamsWithTimeout creates a new FindConfigPermissionsCacheMaxEntriesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigPermissionsCacheMaxEntriesParamsWithTimeout(timeout time.Duration) *FindConfigPermissionsCacheMaxEntriesParams { + + return &FindConfigPermissionsCacheMaxEntriesParams{ + + timeout: timeout, + } +} + +// NewFindConfigPermissionsCacheMaxEntriesParamsWithContext creates a new FindConfigPermissionsCacheMaxEntriesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigPermissionsCacheMaxEntriesParamsWithContext(ctx context.Context) *FindConfigPermissionsCacheMaxEntriesParams { + + return &FindConfigPermissionsCacheMaxEntriesParams{ + + Context: ctx, + } +} + +// NewFindConfigPermissionsCacheMaxEntriesParamsWithHTTPClient creates a new FindConfigPermissionsCacheMaxEntriesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigPermissionsCacheMaxEntriesParamsWithHTTPClient(client *http.Client) *FindConfigPermissionsCacheMaxEntriesParams { + + return &FindConfigPermissionsCacheMaxEntriesParams{ + HTTPClient: client, + } +} + +/* +FindConfigPermissionsCacheMaxEntriesParams contains all the parameters to send to the API endpoint +for the find config permissions cache max entries operation typically these are written to a http.Request +*/ +type FindConfigPermissionsCacheMaxEntriesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config permissions cache max entries params +func (o *FindConfigPermissionsCacheMaxEntriesParams) WithTimeout(timeout time.Duration) *FindConfigPermissionsCacheMaxEntriesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config permissions cache max entries params +func (o *FindConfigPermissionsCacheMaxEntriesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config permissions cache max entries params +func (o *FindConfigPermissionsCacheMaxEntriesParams) WithContext(ctx context.Context) *FindConfigPermissionsCacheMaxEntriesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config permissions cache max entries params +func (o *FindConfigPermissionsCacheMaxEntriesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config permissions cache max entries params +func (o *FindConfigPermissionsCacheMaxEntriesParams) WithHTTPClient(client *http.Client) *FindConfigPermissionsCacheMaxEntriesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config permissions cache max entries params +func (o *FindConfigPermissionsCacheMaxEntriesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigPermissionsCacheMaxEntriesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_cache_max_entries_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_cache_max_entries_responses.go new file mode 100644 index 00000000000..8c95ae92a56 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_cache_max_entries_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigPermissionsCacheMaxEntriesReader is a Reader for the FindConfigPermissionsCacheMaxEntries structure. +type FindConfigPermissionsCacheMaxEntriesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigPermissionsCacheMaxEntriesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigPermissionsCacheMaxEntriesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigPermissionsCacheMaxEntriesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigPermissionsCacheMaxEntriesOK creates a FindConfigPermissionsCacheMaxEntriesOK with default headers values +func NewFindConfigPermissionsCacheMaxEntriesOK() *FindConfigPermissionsCacheMaxEntriesOK { + return &FindConfigPermissionsCacheMaxEntriesOK{} +} + +/* +FindConfigPermissionsCacheMaxEntriesOK handles this case with default header values. + +Config value +*/ +type FindConfigPermissionsCacheMaxEntriesOK struct { + Payload int64 +} + +func (o *FindConfigPermissionsCacheMaxEntriesOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigPermissionsCacheMaxEntriesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigPermissionsCacheMaxEntriesDefault creates a FindConfigPermissionsCacheMaxEntriesDefault with default headers values +func NewFindConfigPermissionsCacheMaxEntriesDefault(code int) *FindConfigPermissionsCacheMaxEntriesDefault { + return &FindConfigPermissionsCacheMaxEntriesDefault{ + _statusCode: code, + } +} + +/* +FindConfigPermissionsCacheMaxEntriesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigPermissionsCacheMaxEntriesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config permissions cache max entries default response +func (o *FindConfigPermissionsCacheMaxEntriesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigPermissionsCacheMaxEntriesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigPermissionsCacheMaxEntriesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigPermissionsCacheMaxEntriesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_update_interval_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_update_interval_in_ms_parameters.go new file mode 100644 index 00000000000..67eb7c0d276 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_update_interval_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigPermissionsUpdateIntervalInMsParams creates a new FindConfigPermissionsUpdateIntervalInMsParams object +// with the default values initialized. +func NewFindConfigPermissionsUpdateIntervalInMsParams() *FindConfigPermissionsUpdateIntervalInMsParams { + + return &FindConfigPermissionsUpdateIntervalInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigPermissionsUpdateIntervalInMsParamsWithTimeout creates a new FindConfigPermissionsUpdateIntervalInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigPermissionsUpdateIntervalInMsParamsWithTimeout(timeout time.Duration) *FindConfigPermissionsUpdateIntervalInMsParams { + + return &FindConfigPermissionsUpdateIntervalInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigPermissionsUpdateIntervalInMsParamsWithContext creates a new FindConfigPermissionsUpdateIntervalInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigPermissionsUpdateIntervalInMsParamsWithContext(ctx context.Context) *FindConfigPermissionsUpdateIntervalInMsParams { + + return &FindConfigPermissionsUpdateIntervalInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigPermissionsUpdateIntervalInMsParamsWithHTTPClient creates a new FindConfigPermissionsUpdateIntervalInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigPermissionsUpdateIntervalInMsParamsWithHTTPClient(client *http.Client) *FindConfigPermissionsUpdateIntervalInMsParams { + + return &FindConfigPermissionsUpdateIntervalInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigPermissionsUpdateIntervalInMsParams contains all the parameters to send to the API endpoint +for the find config permissions update interval in ms operation typically these are written to a http.Request +*/ +type FindConfigPermissionsUpdateIntervalInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config permissions update interval in ms params +func (o *FindConfigPermissionsUpdateIntervalInMsParams) WithTimeout(timeout time.Duration) *FindConfigPermissionsUpdateIntervalInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config permissions update interval in ms params +func (o *FindConfigPermissionsUpdateIntervalInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config permissions update interval in ms params +func (o *FindConfigPermissionsUpdateIntervalInMsParams) WithContext(ctx context.Context) *FindConfigPermissionsUpdateIntervalInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config permissions update interval in ms params +func (o *FindConfigPermissionsUpdateIntervalInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config permissions update interval in ms params +func (o *FindConfigPermissionsUpdateIntervalInMsParams) WithHTTPClient(client *http.Client) *FindConfigPermissionsUpdateIntervalInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config permissions update interval in ms params +func (o *FindConfigPermissionsUpdateIntervalInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigPermissionsUpdateIntervalInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_update_interval_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_update_interval_in_ms_responses.go new file mode 100644 index 00000000000..c2edd964d70 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_update_interval_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigPermissionsUpdateIntervalInMsReader is a Reader for the FindConfigPermissionsUpdateIntervalInMs structure. +type FindConfigPermissionsUpdateIntervalInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigPermissionsUpdateIntervalInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigPermissionsUpdateIntervalInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigPermissionsUpdateIntervalInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigPermissionsUpdateIntervalInMsOK creates a FindConfigPermissionsUpdateIntervalInMsOK with default headers values +func NewFindConfigPermissionsUpdateIntervalInMsOK() *FindConfigPermissionsUpdateIntervalInMsOK { + return &FindConfigPermissionsUpdateIntervalInMsOK{} +} + +/* +FindConfigPermissionsUpdateIntervalInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigPermissionsUpdateIntervalInMsOK struct { + Payload int64 +} + +func (o *FindConfigPermissionsUpdateIntervalInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigPermissionsUpdateIntervalInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigPermissionsUpdateIntervalInMsDefault creates a FindConfigPermissionsUpdateIntervalInMsDefault with default headers values +func NewFindConfigPermissionsUpdateIntervalInMsDefault(code int) *FindConfigPermissionsUpdateIntervalInMsDefault { + return &FindConfigPermissionsUpdateIntervalInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigPermissionsUpdateIntervalInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigPermissionsUpdateIntervalInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config permissions update interval in ms default response +func (o *FindConfigPermissionsUpdateIntervalInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigPermissionsUpdateIntervalInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigPermissionsUpdateIntervalInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigPermissionsUpdateIntervalInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_validity_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_validity_in_ms_parameters.go new file mode 100644 index 00000000000..01c7f8b5c94 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_validity_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigPermissionsValidityInMsParams creates a new FindConfigPermissionsValidityInMsParams object +// with the default values initialized. +func NewFindConfigPermissionsValidityInMsParams() *FindConfigPermissionsValidityInMsParams { + + return &FindConfigPermissionsValidityInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigPermissionsValidityInMsParamsWithTimeout creates a new FindConfigPermissionsValidityInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigPermissionsValidityInMsParamsWithTimeout(timeout time.Duration) *FindConfigPermissionsValidityInMsParams { + + return &FindConfigPermissionsValidityInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigPermissionsValidityInMsParamsWithContext creates a new FindConfigPermissionsValidityInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigPermissionsValidityInMsParamsWithContext(ctx context.Context) *FindConfigPermissionsValidityInMsParams { + + return &FindConfigPermissionsValidityInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigPermissionsValidityInMsParamsWithHTTPClient creates a new FindConfigPermissionsValidityInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigPermissionsValidityInMsParamsWithHTTPClient(client *http.Client) *FindConfigPermissionsValidityInMsParams { + + return &FindConfigPermissionsValidityInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigPermissionsValidityInMsParams contains all the parameters to send to the API endpoint +for the find config permissions validity in ms operation typically these are written to a http.Request +*/ +type FindConfigPermissionsValidityInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config permissions validity in ms params +func (o *FindConfigPermissionsValidityInMsParams) WithTimeout(timeout time.Duration) *FindConfigPermissionsValidityInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config permissions validity in ms params +func (o *FindConfigPermissionsValidityInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config permissions validity in ms params +func (o *FindConfigPermissionsValidityInMsParams) WithContext(ctx context.Context) *FindConfigPermissionsValidityInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config permissions validity in ms params +func (o *FindConfigPermissionsValidityInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config permissions validity in ms params +func (o *FindConfigPermissionsValidityInMsParams) WithHTTPClient(client *http.Client) *FindConfigPermissionsValidityInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config permissions validity in ms params +func (o *FindConfigPermissionsValidityInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigPermissionsValidityInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_validity_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_validity_in_ms_responses.go new file mode 100644 index 00000000000..8c0c62cc89d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_permissions_validity_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigPermissionsValidityInMsReader is a Reader for the FindConfigPermissionsValidityInMs structure. +type FindConfigPermissionsValidityInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigPermissionsValidityInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigPermissionsValidityInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigPermissionsValidityInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigPermissionsValidityInMsOK creates a FindConfigPermissionsValidityInMsOK with default headers values +func NewFindConfigPermissionsValidityInMsOK() *FindConfigPermissionsValidityInMsOK { + return &FindConfigPermissionsValidityInMsOK{} +} + +/* +FindConfigPermissionsValidityInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigPermissionsValidityInMsOK struct { + Payload int64 +} + +func (o *FindConfigPermissionsValidityInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigPermissionsValidityInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigPermissionsValidityInMsDefault creates a FindConfigPermissionsValidityInMsDefault with default headers values +func NewFindConfigPermissionsValidityInMsDefault(code int) *FindConfigPermissionsValidityInMsDefault { + return &FindConfigPermissionsValidityInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigPermissionsValidityInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigPermissionsValidityInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config permissions validity in ms default response +func (o *FindConfigPermissionsValidityInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigPermissionsValidityInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigPermissionsValidityInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigPermissionsValidityInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_phi_convict_threshold_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_phi_convict_threshold_parameters.go new file mode 100644 index 00000000000..d2bb467a898 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_phi_convict_threshold_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigPhiConvictThresholdParams creates a new FindConfigPhiConvictThresholdParams object +// with the default values initialized. +func NewFindConfigPhiConvictThresholdParams() *FindConfigPhiConvictThresholdParams { + + return &FindConfigPhiConvictThresholdParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigPhiConvictThresholdParamsWithTimeout creates a new FindConfigPhiConvictThresholdParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigPhiConvictThresholdParamsWithTimeout(timeout time.Duration) *FindConfigPhiConvictThresholdParams { + + return &FindConfigPhiConvictThresholdParams{ + + timeout: timeout, + } +} + +// NewFindConfigPhiConvictThresholdParamsWithContext creates a new FindConfigPhiConvictThresholdParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigPhiConvictThresholdParamsWithContext(ctx context.Context) *FindConfigPhiConvictThresholdParams { + + return &FindConfigPhiConvictThresholdParams{ + + Context: ctx, + } +} + +// NewFindConfigPhiConvictThresholdParamsWithHTTPClient creates a new FindConfigPhiConvictThresholdParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigPhiConvictThresholdParamsWithHTTPClient(client *http.Client) *FindConfigPhiConvictThresholdParams { + + return &FindConfigPhiConvictThresholdParams{ + HTTPClient: client, + } +} + +/* +FindConfigPhiConvictThresholdParams contains all the parameters to send to the API endpoint +for the find config phi convict threshold operation typically these are written to a http.Request +*/ +type FindConfigPhiConvictThresholdParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config phi convict threshold params +func (o *FindConfigPhiConvictThresholdParams) WithTimeout(timeout time.Duration) *FindConfigPhiConvictThresholdParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config phi convict threshold params +func (o *FindConfigPhiConvictThresholdParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config phi convict threshold params +func (o *FindConfigPhiConvictThresholdParams) WithContext(ctx context.Context) *FindConfigPhiConvictThresholdParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config phi convict threshold params +func (o *FindConfigPhiConvictThresholdParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config phi convict threshold params +func (o *FindConfigPhiConvictThresholdParams) WithHTTPClient(client *http.Client) *FindConfigPhiConvictThresholdParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config phi convict threshold params +func (o *FindConfigPhiConvictThresholdParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigPhiConvictThresholdParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_phi_convict_threshold_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_phi_convict_threshold_responses.go new file mode 100644 index 00000000000..78cdcbb1760 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_phi_convict_threshold_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigPhiConvictThresholdReader is a Reader for the FindConfigPhiConvictThreshold structure. +type FindConfigPhiConvictThresholdReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigPhiConvictThresholdReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigPhiConvictThresholdOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigPhiConvictThresholdDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigPhiConvictThresholdOK creates a FindConfigPhiConvictThresholdOK with default headers values +func NewFindConfigPhiConvictThresholdOK() *FindConfigPhiConvictThresholdOK { + return &FindConfigPhiConvictThresholdOK{} +} + +/* +FindConfigPhiConvictThresholdOK handles this case with default header values. + +Config value +*/ +type FindConfigPhiConvictThresholdOK struct { + Payload int64 +} + +func (o *FindConfigPhiConvictThresholdOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigPhiConvictThresholdOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigPhiConvictThresholdDefault creates a FindConfigPhiConvictThresholdDefault with default headers values +func NewFindConfigPhiConvictThresholdDefault(code int) *FindConfigPhiConvictThresholdDefault { + return &FindConfigPhiConvictThresholdDefault{ + _statusCode: code, + } +} + +/* +FindConfigPhiConvictThresholdDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigPhiConvictThresholdDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config phi convict threshold default response +func (o *FindConfigPhiConvictThresholdDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigPhiConvictThresholdDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigPhiConvictThresholdDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigPhiConvictThresholdDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_preheat_kernel_page_cache_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_preheat_kernel_page_cache_parameters.go new file mode 100644 index 00000000000..22a5d22abd1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_preheat_kernel_page_cache_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigPreheatKernelPageCacheParams creates a new FindConfigPreheatKernelPageCacheParams object +// with the default values initialized. +func NewFindConfigPreheatKernelPageCacheParams() *FindConfigPreheatKernelPageCacheParams { + + return &FindConfigPreheatKernelPageCacheParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigPreheatKernelPageCacheParamsWithTimeout creates a new FindConfigPreheatKernelPageCacheParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigPreheatKernelPageCacheParamsWithTimeout(timeout time.Duration) *FindConfigPreheatKernelPageCacheParams { + + return &FindConfigPreheatKernelPageCacheParams{ + + timeout: timeout, + } +} + +// NewFindConfigPreheatKernelPageCacheParamsWithContext creates a new FindConfigPreheatKernelPageCacheParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigPreheatKernelPageCacheParamsWithContext(ctx context.Context) *FindConfigPreheatKernelPageCacheParams { + + return &FindConfigPreheatKernelPageCacheParams{ + + Context: ctx, + } +} + +// NewFindConfigPreheatKernelPageCacheParamsWithHTTPClient creates a new FindConfigPreheatKernelPageCacheParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigPreheatKernelPageCacheParamsWithHTTPClient(client *http.Client) *FindConfigPreheatKernelPageCacheParams { + + return &FindConfigPreheatKernelPageCacheParams{ + HTTPClient: client, + } +} + +/* +FindConfigPreheatKernelPageCacheParams contains all the parameters to send to the API endpoint +for the find config preheat kernel page cache operation typically these are written to a http.Request +*/ +type FindConfigPreheatKernelPageCacheParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config preheat kernel page cache params +func (o *FindConfigPreheatKernelPageCacheParams) WithTimeout(timeout time.Duration) *FindConfigPreheatKernelPageCacheParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config preheat kernel page cache params +func (o *FindConfigPreheatKernelPageCacheParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config preheat kernel page cache params +func (o *FindConfigPreheatKernelPageCacheParams) WithContext(ctx context.Context) *FindConfigPreheatKernelPageCacheParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config preheat kernel page cache params +func (o *FindConfigPreheatKernelPageCacheParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config preheat kernel page cache params +func (o *FindConfigPreheatKernelPageCacheParams) WithHTTPClient(client *http.Client) *FindConfigPreheatKernelPageCacheParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config preheat kernel page cache params +func (o *FindConfigPreheatKernelPageCacheParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigPreheatKernelPageCacheParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_preheat_kernel_page_cache_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_preheat_kernel_page_cache_responses.go new file mode 100644 index 00000000000..9e28a8d8fd2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_preheat_kernel_page_cache_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigPreheatKernelPageCacheReader is a Reader for the FindConfigPreheatKernelPageCache structure. +type FindConfigPreheatKernelPageCacheReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigPreheatKernelPageCacheReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigPreheatKernelPageCacheOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigPreheatKernelPageCacheDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigPreheatKernelPageCacheOK creates a FindConfigPreheatKernelPageCacheOK with default headers values +func NewFindConfigPreheatKernelPageCacheOK() *FindConfigPreheatKernelPageCacheOK { + return &FindConfigPreheatKernelPageCacheOK{} +} + +/* +FindConfigPreheatKernelPageCacheOK handles this case with default header values. + +Config value +*/ +type FindConfigPreheatKernelPageCacheOK struct { + Payload bool +} + +func (o *FindConfigPreheatKernelPageCacheOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigPreheatKernelPageCacheOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigPreheatKernelPageCacheDefault creates a FindConfigPreheatKernelPageCacheDefault with default headers values +func NewFindConfigPreheatKernelPageCacheDefault(code int) *FindConfigPreheatKernelPageCacheDefault { + return &FindConfigPreheatKernelPageCacheDefault{ + _statusCode: code, + } +} + +/* +FindConfigPreheatKernelPageCacheDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigPreheatKernelPageCacheDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config preheat kernel page cache default response +func (o *FindConfigPreheatKernelPageCacheDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigPreheatKernelPageCacheDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigPreheatKernelPageCacheDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigPreheatKernelPageCacheDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_address_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_address_parameters.go new file mode 100644 index 00000000000..2679993b196 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_address_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigPrometheusAddressParams creates a new FindConfigPrometheusAddressParams object +// with the default values initialized. +func NewFindConfigPrometheusAddressParams() *FindConfigPrometheusAddressParams { + + return &FindConfigPrometheusAddressParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigPrometheusAddressParamsWithTimeout creates a new FindConfigPrometheusAddressParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigPrometheusAddressParamsWithTimeout(timeout time.Duration) *FindConfigPrometheusAddressParams { + + return &FindConfigPrometheusAddressParams{ + + timeout: timeout, + } +} + +// NewFindConfigPrometheusAddressParamsWithContext creates a new FindConfigPrometheusAddressParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigPrometheusAddressParamsWithContext(ctx context.Context) *FindConfigPrometheusAddressParams { + + return &FindConfigPrometheusAddressParams{ + + Context: ctx, + } +} + +// NewFindConfigPrometheusAddressParamsWithHTTPClient creates a new FindConfigPrometheusAddressParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigPrometheusAddressParamsWithHTTPClient(client *http.Client) *FindConfigPrometheusAddressParams { + + return &FindConfigPrometheusAddressParams{ + HTTPClient: client, + } +} + +/* +FindConfigPrometheusAddressParams contains all the parameters to send to the API endpoint +for the find config prometheus address operation typically these are written to a http.Request +*/ +type FindConfigPrometheusAddressParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config prometheus address params +func (o *FindConfigPrometheusAddressParams) WithTimeout(timeout time.Duration) *FindConfigPrometheusAddressParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config prometheus address params +func (o *FindConfigPrometheusAddressParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config prometheus address params +func (o *FindConfigPrometheusAddressParams) WithContext(ctx context.Context) *FindConfigPrometheusAddressParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config prometheus address params +func (o *FindConfigPrometheusAddressParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config prometheus address params +func (o *FindConfigPrometheusAddressParams) WithHTTPClient(client *http.Client) *FindConfigPrometheusAddressParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config prometheus address params +func (o *FindConfigPrometheusAddressParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigPrometheusAddressParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_address_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_address_responses.go new file mode 100644 index 00000000000..1b257c3d4bd --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_address_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigPrometheusAddressReader is a Reader for the FindConfigPrometheusAddress structure. +type FindConfigPrometheusAddressReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigPrometheusAddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigPrometheusAddressOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigPrometheusAddressDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigPrometheusAddressOK creates a FindConfigPrometheusAddressOK with default headers values +func NewFindConfigPrometheusAddressOK() *FindConfigPrometheusAddressOK { + return &FindConfigPrometheusAddressOK{} +} + +/* +FindConfigPrometheusAddressOK handles this case with default header values. + +Config value +*/ +type FindConfigPrometheusAddressOK struct { + Payload string +} + +func (o *FindConfigPrometheusAddressOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigPrometheusAddressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigPrometheusAddressDefault creates a FindConfigPrometheusAddressDefault with default headers values +func NewFindConfigPrometheusAddressDefault(code int) *FindConfigPrometheusAddressDefault { + return &FindConfigPrometheusAddressDefault{ + _statusCode: code, + } +} + +/* +FindConfigPrometheusAddressDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigPrometheusAddressDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config prometheus address default response +func (o *FindConfigPrometheusAddressDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigPrometheusAddressDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigPrometheusAddressDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigPrometheusAddressDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_port_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_port_parameters.go new file mode 100644 index 00000000000..0af89913f85 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_port_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigPrometheusPortParams creates a new FindConfigPrometheusPortParams object +// with the default values initialized. +func NewFindConfigPrometheusPortParams() *FindConfigPrometheusPortParams { + + return &FindConfigPrometheusPortParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigPrometheusPortParamsWithTimeout creates a new FindConfigPrometheusPortParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigPrometheusPortParamsWithTimeout(timeout time.Duration) *FindConfigPrometheusPortParams { + + return &FindConfigPrometheusPortParams{ + + timeout: timeout, + } +} + +// NewFindConfigPrometheusPortParamsWithContext creates a new FindConfigPrometheusPortParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigPrometheusPortParamsWithContext(ctx context.Context) *FindConfigPrometheusPortParams { + + return &FindConfigPrometheusPortParams{ + + Context: ctx, + } +} + +// NewFindConfigPrometheusPortParamsWithHTTPClient creates a new FindConfigPrometheusPortParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigPrometheusPortParamsWithHTTPClient(client *http.Client) *FindConfigPrometheusPortParams { + + return &FindConfigPrometheusPortParams{ + HTTPClient: client, + } +} + +/* +FindConfigPrometheusPortParams contains all the parameters to send to the API endpoint +for the find config prometheus port operation typically these are written to a http.Request +*/ +type FindConfigPrometheusPortParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config prometheus port params +func (o *FindConfigPrometheusPortParams) WithTimeout(timeout time.Duration) *FindConfigPrometheusPortParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config prometheus port params +func (o *FindConfigPrometheusPortParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config prometheus port params +func (o *FindConfigPrometheusPortParams) WithContext(ctx context.Context) *FindConfigPrometheusPortParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config prometheus port params +func (o *FindConfigPrometheusPortParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config prometheus port params +func (o *FindConfigPrometheusPortParams) WithHTTPClient(client *http.Client) *FindConfigPrometheusPortParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config prometheus port params +func (o *FindConfigPrometheusPortParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigPrometheusPortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_port_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_port_responses.go new file mode 100644 index 00000000000..0ac5f98d5b6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_port_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigPrometheusPortReader is a Reader for the FindConfigPrometheusPort structure. +type FindConfigPrometheusPortReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigPrometheusPortReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigPrometheusPortOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigPrometheusPortDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigPrometheusPortOK creates a FindConfigPrometheusPortOK with default headers values +func NewFindConfigPrometheusPortOK() *FindConfigPrometheusPortOK { + return &FindConfigPrometheusPortOK{} +} + +/* +FindConfigPrometheusPortOK handles this case with default header values. + +Config value +*/ +type FindConfigPrometheusPortOK struct { + Payload int64 +} + +func (o *FindConfigPrometheusPortOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigPrometheusPortOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigPrometheusPortDefault creates a FindConfigPrometheusPortDefault with default headers values +func NewFindConfigPrometheusPortDefault(code int) *FindConfigPrometheusPortDefault { + return &FindConfigPrometheusPortDefault{ + _statusCode: code, + } +} + +/* +FindConfigPrometheusPortDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigPrometheusPortDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config prometheus port default response +func (o *FindConfigPrometheusPortDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigPrometheusPortDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigPrometheusPortDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigPrometheusPortDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_prefix_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_prefix_parameters.go new file mode 100644 index 00000000000..35a7c994580 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_prefix_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigPrometheusPrefixParams creates a new FindConfigPrometheusPrefixParams object +// with the default values initialized. +func NewFindConfigPrometheusPrefixParams() *FindConfigPrometheusPrefixParams { + + return &FindConfigPrometheusPrefixParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigPrometheusPrefixParamsWithTimeout creates a new FindConfigPrometheusPrefixParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigPrometheusPrefixParamsWithTimeout(timeout time.Duration) *FindConfigPrometheusPrefixParams { + + return &FindConfigPrometheusPrefixParams{ + + timeout: timeout, + } +} + +// NewFindConfigPrometheusPrefixParamsWithContext creates a new FindConfigPrometheusPrefixParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigPrometheusPrefixParamsWithContext(ctx context.Context) *FindConfigPrometheusPrefixParams { + + return &FindConfigPrometheusPrefixParams{ + + Context: ctx, + } +} + +// NewFindConfigPrometheusPrefixParamsWithHTTPClient creates a new FindConfigPrometheusPrefixParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigPrometheusPrefixParamsWithHTTPClient(client *http.Client) *FindConfigPrometheusPrefixParams { + + return &FindConfigPrometheusPrefixParams{ + HTTPClient: client, + } +} + +/* +FindConfigPrometheusPrefixParams contains all the parameters to send to the API endpoint +for the find config prometheus prefix operation typically these are written to a http.Request +*/ +type FindConfigPrometheusPrefixParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config prometheus prefix params +func (o *FindConfigPrometheusPrefixParams) WithTimeout(timeout time.Duration) *FindConfigPrometheusPrefixParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config prometheus prefix params +func (o *FindConfigPrometheusPrefixParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config prometheus prefix params +func (o *FindConfigPrometheusPrefixParams) WithContext(ctx context.Context) *FindConfigPrometheusPrefixParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config prometheus prefix params +func (o *FindConfigPrometheusPrefixParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config prometheus prefix params +func (o *FindConfigPrometheusPrefixParams) WithHTTPClient(client *http.Client) *FindConfigPrometheusPrefixParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config prometheus prefix params +func (o *FindConfigPrometheusPrefixParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigPrometheusPrefixParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_prefix_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_prefix_responses.go new file mode 100644 index 00000000000..465293ee247 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_prometheus_prefix_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigPrometheusPrefixReader is a Reader for the FindConfigPrometheusPrefix structure. +type FindConfigPrometheusPrefixReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigPrometheusPrefixReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigPrometheusPrefixOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigPrometheusPrefixDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigPrometheusPrefixOK creates a FindConfigPrometheusPrefixOK with default headers values +func NewFindConfigPrometheusPrefixOK() *FindConfigPrometheusPrefixOK { + return &FindConfigPrometheusPrefixOK{} +} + +/* +FindConfigPrometheusPrefixOK handles this case with default header values. + +Config value +*/ +type FindConfigPrometheusPrefixOK struct { + Payload string +} + +func (o *FindConfigPrometheusPrefixOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigPrometheusPrefixOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigPrometheusPrefixDefault creates a FindConfigPrometheusPrefixDefault with default headers values +func NewFindConfigPrometheusPrefixDefault(code int) *FindConfigPrometheusPrefixDefault { + return &FindConfigPrometheusPrefixDefault{ + _statusCode: code, + } +} + +/* +FindConfigPrometheusPrefixDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigPrometheusPrefixDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config prometheus prefix default response +func (o *FindConfigPrometheusPrefixDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigPrometheusPrefixDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigPrometheusPrefixDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigPrometheusPrefixDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_range_request_timeout_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_range_request_timeout_in_ms_parameters.go new file mode 100644 index 00000000000..831e317e8ff --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_range_request_timeout_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRangeRequestTimeoutInMsParams creates a new FindConfigRangeRequestTimeoutInMsParams object +// with the default values initialized. +func NewFindConfigRangeRequestTimeoutInMsParams() *FindConfigRangeRequestTimeoutInMsParams { + + return &FindConfigRangeRequestTimeoutInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRangeRequestTimeoutInMsParamsWithTimeout creates a new FindConfigRangeRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRangeRequestTimeoutInMsParamsWithTimeout(timeout time.Duration) *FindConfigRangeRequestTimeoutInMsParams { + + return &FindConfigRangeRequestTimeoutInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigRangeRequestTimeoutInMsParamsWithContext creates a new FindConfigRangeRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRangeRequestTimeoutInMsParamsWithContext(ctx context.Context) *FindConfigRangeRequestTimeoutInMsParams { + + return &FindConfigRangeRequestTimeoutInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigRangeRequestTimeoutInMsParamsWithHTTPClient creates a new FindConfigRangeRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRangeRequestTimeoutInMsParamsWithHTTPClient(client *http.Client) *FindConfigRangeRequestTimeoutInMsParams { + + return &FindConfigRangeRequestTimeoutInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigRangeRequestTimeoutInMsParams contains all the parameters to send to the API endpoint +for the find config range request timeout in ms operation typically these are written to a http.Request +*/ +type FindConfigRangeRequestTimeoutInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config range request timeout in ms params +func (o *FindConfigRangeRequestTimeoutInMsParams) WithTimeout(timeout time.Duration) *FindConfigRangeRequestTimeoutInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config range request timeout in ms params +func (o *FindConfigRangeRequestTimeoutInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config range request timeout in ms params +func (o *FindConfigRangeRequestTimeoutInMsParams) WithContext(ctx context.Context) *FindConfigRangeRequestTimeoutInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config range request timeout in ms params +func (o *FindConfigRangeRequestTimeoutInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config range request timeout in ms params +func (o *FindConfigRangeRequestTimeoutInMsParams) WithHTTPClient(client *http.Client) *FindConfigRangeRequestTimeoutInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config range request timeout in ms params +func (o *FindConfigRangeRequestTimeoutInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRangeRequestTimeoutInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_range_request_timeout_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_range_request_timeout_in_ms_responses.go new file mode 100644 index 00000000000..762a1b81100 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_range_request_timeout_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRangeRequestTimeoutInMsReader is a Reader for the FindConfigRangeRequestTimeoutInMs structure. +type FindConfigRangeRequestTimeoutInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRangeRequestTimeoutInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRangeRequestTimeoutInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRangeRequestTimeoutInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRangeRequestTimeoutInMsOK creates a FindConfigRangeRequestTimeoutInMsOK with default headers values +func NewFindConfigRangeRequestTimeoutInMsOK() *FindConfigRangeRequestTimeoutInMsOK { + return &FindConfigRangeRequestTimeoutInMsOK{} +} + +/* +FindConfigRangeRequestTimeoutInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigRangeRequestTimeoutInMsOK struct { + Payload int64 +} + +func (o *FindConfigRangeRequestTimeoutInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRangeRequestTimeoutInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRangeRequestTimeoutInMsDefault creates a FindConfigRangeRequestTimeoutInMsDefault with default headers values +func NewFindConfigRangeRequestTimeoutInMsDefault(code int) *FindConfigRangeRequestTimeoutInMsDefault { + return &FindConfigRangeRequestTimeoutInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigRangeRequestTimeoutInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRangeRequestTimeoutInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config range request timeout in ms default response +func (o *FindConfigRangeRequestTimeoutInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRangeRequestTimeoutInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRangeRequestTimeoutInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRangeRequestTimeoutInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_read_request_timeout_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_read_request_timeout_in_ms_parameters.go new file mode 100644 index 00000000000..0b1547e93e5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_read_request_timeout_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigReadRequestTimeoutInMsParams creates a new FindConfigReadRequestTimeoutInMsParams object +// with the default values initialized. +func NewFindConfigReadRequestTimeoutInMsParams() *FindConfigReadRequestTimeoutInMsParams { + + return &FindConfigReadRequestTimeoutInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigReadRequestTimeoutInMsParamsWithTimeout creates a new FindConfigReadRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigReadRequestTimeoutInMsParamsWithTimeout(timeout time.Duration) *FindConfigReadRequestTimeoutInMsParams { + + return &FindConfigReadRequestTimeoutInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigReadRequestTimeoutInMsParamsWithContext creates a new FindConfigReadRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigReadRequestTimeoutInMsParamsWithContext(ctx context.Context) *FindConfigReadRequestTimeoutInMsParams { + + return &FindConfigReadRequestTimeoutInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigReadRequestTimeoutInMsParamsWithHTTPClient creates a new FindConfigReadRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigReadRequestTimeoutInMsParamsWithHTTPClient(client *http.Client) *FindConfigReadRequestTimeoutInMsParams { + + return &FindConfigReadRequestTimeoutInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigReadRequestTimeoutInMsParams contains all the parameters to send to the API endpoint +for the find config read request timeout in ms operation typically these are written to a http.Request +*/ +type FindConfigReadRequestTimeoutInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config read request timeout in ms params +func (o *FindConfigReadRequestTimeoutInMsParams) WithTimeout(timeout time.Duration) *FindConfigReadRequestTimeoutInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config read request timeout in ms params +func (o *FindConfigReadRequestTimeoutInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config read request timeout in ms params +func (o *FindConfigReadRequestTimeoutInMsParams) WithContext(ctx context.Context) *FindConfigReadRequestTimeoutInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config read request timeout in ms params +func (o *FindConfigReadRequestTimeoutInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config read request timeout in ms params +func (o *FindConfigReadRequestTimeoutInMsParams) WithHTTPClient(client *http.Client) *FindConfigReadRequestTimeoutInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config read request timeout in ms params +func (o *FindConfigReadRequestTimeoutInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigReadRequestTimeoutInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_read_request_timeout_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_read_request_timeout_in_ms_responses.go new file mode 100644 index 00000000000..1a1175541bf --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_read_request_timeout_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigReadRequestTimeoutInMsReader is a Reader for the FindConfigReadRequestTimeoutInMs structure. +type FindConfigReadRequestTimeoutInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigReadRequestTimeoutInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigReadRequestTimeoutInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigReadRequestTimeoutInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigReadRequestTimeoutInMsOK creates a FindConfigReadRequestTimeoutInMsOK with default headers values +func NewFindConfigReadRequestTimeoutInMsOK() *FindConfigReadRequestTimeoutInMsOK { + return &FindConfigReadRequestTimeoutInMsOK{} +} + +/* +FindConfigReadRequestTimeoutInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigReadRequestTimeoutInMsOK struct { + Payload int64 +} + +func (o *FindConfigReadRequestTimeoutInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigReadRequestTimeoutInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigReadRequestTimeoutInMsDefault creates a FindConfigReadRequestTimeoutInMsDefault with default headers values +func NewFindConfigReadRequestTimeoutInMsDefault(code int) *FindConfigReadRequestTimeoutInMsDefault { + return &FindConfigReadRequestTimeoutInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigReadRequestTimeoutInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigReadRequestTimeoutInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config read request timeout in ms default response +func (o *FindConfigReadRequestTimeoutInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigReadRequestTimeoutInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigReadRequestTimeoutInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigReadRequestTimeoutInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_capacity_to_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_capacity_to_parameters.go new file mode 100644 index 00000000000..35e855d0b15 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_capacity_to_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigReduceCacheCapacityToParams creates a new FindConfigReduceCacheCapacityToParams object +// with the default values initialized. +func NewFindConfigReduceCacheCapacityToParams() *FindConfigReduceCacheCapacityToParams { + + return &FindConfigReduceCacheCapacityToParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigReduceCacheCapacityToParamsWithTimeout creates a new FindConfigReduceCacheCapacityToParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigReduceCacheCapacityToParamsWithTimeout(timeout time.Duration) *FindConfigReduceCacheCapacityToParams { + + return &FindConfigReduceCacheCapacityToParams{ + + timeout: timeout, + } +} + +// NewFindConfigReduceCacheCapacityToParamsWithContext creates a new FindConfigReduceCacheCapacityToParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigReduceCacheCapacityToParamsWithContext(ctx context.Context) *FindConfigReduceCacheCapacityToParams { + + return &FindConfigReduceCacheCapacityToParams{ + + Context: ctx, + } +} + +// NewFindConfigReduceCacheCapacityToParamsWithHTTPClient creates a new FindConfigReduceCacheCapacityToParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigReduceCacheCapacityToParamsWithHTTPClient(client *http.Client) *FindConfigReduceCacheCapacityToParams { + + return &FindConfigReduceCacheCapacityToParams{ + HTTPClient: client, + } +} + +/* +FindConfigReduceCacheCapacityToParams contains all the parameters to send to the API endpoint +for the find config reduce cache capacity to operation typically these are written to a http.Request +*/ +type FindConfigReduceCacheCapacityToParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config reduce cache capacity to params +func (o *FindConfigReduceCacheCapacityToParams) WithTimeout(timeout time.Duration) *FindConfigReduceCacheCapacityToParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config reduce cache capacity to params +func (o *FindConfigReduceCacheCapacityToParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config reduce cache capacity to params +func (o *FindConfigReduceCacheCapacityToParams) WithContext(ctx context.Context) *FindConfigReduceCacheCapacityToParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config reduce cache capacity to params +func (o *FindConfigReduceCacheCapacityToParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config reduce cache capacity to params +func (o *FindConfigReduceCacheCapacityToParams) WithHTTPClient(client *http.Client) *FindConfigReduceCacheCapacityToParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config reduce cache capacity to params +func (o *FindConfigReduceCacheCapacityToParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigReduceCacheCapacityToParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_capacity_to_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_capacity_to_responses.go new file mode 100644 index 00000000000..21318952066 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_capacity_to_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigReduceCacheCapacityToReader is a Reader for the FindConfigReduceCacheCapacityTo structure. +type FindConfigReduceCacheCapacityToReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigReduceCacheCapacityToReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigReduceCacheCapacityToOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigReduceCacheCapacityToDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigReduceCacheCapacityToOK creates a FindConfigReduceCacheCapacityToOK with default headers values +func NewFindConfigReduceCacheCapacityToOK() *FindConfigReduceCacheCapacityToOK { + return &FindConfigReduceCacheCapacityToOK{} +} + +/* +FindConfigReduceCacheCapacityToOK handles this case with default header values. + +Config value +*/ +type FindConfigReduceCacheCapacityToOK struct { + Payload float64 +} + +func (o *FindConfigReduceCacheCapacityToOK) GetPayload() float64 { + return o.Payload +} + +func (o *FindConfigReduceCacheCapacityToOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigReduceCacheCapacityToDefault creates a FindConfigReduceCacheCapacityToDefault with default headers values +func NewFindConfigReduceCacheCapacityToDefault(code int) *FindConfigReduceCacheCapacityToDefault { + return &FindConfigReduceCacheCapacityToDefault{ + _statusCode: code, + } +} + +/* +FindConfigReduceCacheCapacityToDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigReduceCacheCapacityToDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config reduce cache capacity to default response +func (o *FindConfigReduceCacheCapacityToDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigReduceCacheCapacityToDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigReduceCacheCapacityToDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigReduceCacheCapacityToDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_sizes_at_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_sizes_at_parameters.go new file mode 100644 index 00000000000..a8fd0bcf249 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_sizes_at_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigReduceCacheSizesAtParams creates a new FindConfigReduceCacheSizesAtParams object +// with the default values initialized. +func NewFindConfigReduceCacheSizesAtParams() *FindConfigReduceCacheSizesAtParams { + + return &FindConfigReduceCacheSizesAtParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigReduceCacheSizesAtParamsWithTimeout creates a new FindConfigReduceCacheSizesAtParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigReduceCacheSizesAtParamsWithTimeout(timeout time.Duration) *FindConfigReduceCacheSizesAtParams { + + return &FindConfigReduceCacheSizesAtParams{ + + timeout: timeout, + } +} + +// NewFindConfigReduceCacheSizesAtParamsWithContext creates a new FindConfigReduceCacheSizesAtParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigReduceCacheSizesAtParamsWithContext(ctx context.Context) *FindConfigReduceCacheSizesAtParams { + + return &FindConfigReduceCacheSizesAtParams{ + + Context: ctx, + } +} + +// NewFindConfigReduceCacheSizesAtParamsWithHTTPClient creates a new FindConfigReduceCacheSizesAtParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigReduceCacheSizesAtParamsWithHTTPClient(client *http.Client) *FindConfigReduceCacheSizesAtParams { + + return &FindConfigReduceCacheSizesAtParams{ + HTTPClient: client, + } +} + +/* +FindConfigReduceCacheSizesAtParams contains all the parameters to send to the API endpoint +for the find config reduce cache sizes at operation typically these are written to a http.Request +*/ +type FindConfigReduceCacheSizesAtParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config reduce cache sizes at params +func (o *FindConfigReduceCacheSizesAtParams) WithTimeout(timeout time.Duration) *FindConfigReduceCacheSizesAtParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config reduce cache sizes at params +func (o *FindConfigReduceCacheSizesAtParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config reduce cache sizes at params +func (o *FindConfigReduceCacheSizesAtParams) WithContext(ctx context.Context) *FindConfigReduceCacheSizesAtParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config reduce cache sizes at params +func (o *FindConfigReduceCacheSizesAtParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config reduce cache sizes at params +func (o *FindConfigReduceCacheSizesAtParams) WithHTTPClient(client *http.Client) *FindConfigReduceCacheSizesAtParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config reduce cache sizes at params +func (o *FindConfigReduceCacheSizesAtParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigReduceCacheSizesAtParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_sizes_at_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_sizes_at_responses.go new file mode 100644 index 00000000000..d20e0b45f11 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_reduce_cache_sizes_at_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigReduceCacheSizesAtReader is a Reader for the FindConfigReduceCacheSizesAt structure. +type FindConfigReduceCacheSizesAtReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigReduceCacheSizesAtReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigReduceCacheSizesAtOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigReduceCacheSizesAtDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigReduceCacheSizesAtOK creates a FindConfigReduceCacheSizesAtOK with default headers values +func NewFindConfigReduceCacheSizesAtOK() *FindConfigReduceCacheSizesAtOK { + return &FindConfigReduceCacheSizesAtOK{} +} + +/* +FindConfigReduceCacheSizesAtOK handles this case with default header values. + +Config value +*/ +type FindConfigReduceCacheSizesAtOK struct { + Payload float64 +} + +func (o *FindConfigReduceCacheSizesAtOK) GetPayload() float64 { + return o.Payload +} + +func (o *FindConfigReduceCacheSizesAtOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigReduceCacheSizesAtDefault creates a FindConfigReduceCacheSizesAtDefault with default headers values +func NewFindConfigReduceCacheSizesAtDefault(code int) *FindConfigReduceCacheSizesAtDefault { + return &FindConfigReduceCacheSizesAtDefault{ + _statusCode: code, + } +} + +/* +FindConfigReduceCacheSizesAtDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigReduceCacheSizesAtDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config reduce cache sizes at default response +func (o *FindConfigReduceCacheSizesAtDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigReduceCacheSizesAtDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigReduceCacheSizesAtDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigReduceCacheSizesAtDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_first_boot_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_first_boot_parameters.go new file mode 100644 index 00000000000..90103e8080a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_first_boot_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigReplaceAddressFirstBootParams creates a new FindConfigReplaceAddressFirstBootParams object +// with the default values initialized. +func NewFindConfigReplaceAddressFirstBootParams() *FindConfigReplaceAddressFirstBootParams { + + return &FindConfigReplaceAddressFirstBootParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigReplaceAddressFirstBootParamsWithTimeout creates a new FindConfigReplaceAddressFirstBootParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigReplaceAddressFirstBootParamsWithTimeout(timeout time.Duration) *FindConfigReplaceAddressFirstBootParams { + + return &FindConfigReplaceAddressFirstBootParams{ + + timeout: timeout, + } +} + +// NewFindConfigReplaceAddressFirstBootParamsWithContext creates a new FindConfigReplaceAddressFirstBootParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigReplaceAddressFirstBootParamsWithContext(ctx context.Context) *FindConfigReplaceAddressFirstBootParams { + + return &FindConfigReplaceAddressFirstBootParams{ + + Context: ctx, + } +} + +// NewFindConfigReplaceAddressFirstBootParamsWithHTTPClient creates a new FindConfigReplaceAddressFirstBootParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigReplaceAddressFirstBootParamsWithHTTPClient(client *http.Client) *FindConfigReplaceAddressFirstBootParams { + + return &FindConfigReplaceAddressFirstBootParams{ + HTTPClient: client, + } +} + +/* +FindConfigReplaceAddressFirstBootParams contains all the parameters to send to the API endpoint +for the find config replace address first boot operation typically these are written to a http.Request +*/ +type FindConfigReplaceAddressFirstBootParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config replace address first boot params +func (o *FindConfigReplaceAddressFirstBootParams) WithTimeout(timeout time.Duration) *FindConfigReplaceAddressFirstBootParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config replace address first boot params +func (o *FindConfigReplaceAddressFirstBootParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config replace address first boot params +func (o *FindConfigReplaceAddressFirstBootParams) WithContext(ctx context.Context) *FindConfigReplaceAddressFirstBootParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config replace address first boot params +func (o *FindConfigReplaceAddressFirstBootParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config replace address first boot params +func (o *FindConfigReplaceAddressFirstBootParams) WithHTTPClient(client *http.Client) *FindConfigReplaceAddressFirstBootParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config replace address first boot params +func (o *FindConfigReplaceAddressFirstBootParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigReplaceAddressFirstBootParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_first_boot_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_first_boot_responses.go new file mode 100644 index 00000000000..3c86ded06ec --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_first_boot_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigReplaceAddressFirstBootReader is a Reader for the FindConfigReplaceAddressFirstBoot structure. +type FindConfigReplaceAddressFirstBootReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigReplaceAddressFirstBootReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigReplaceAddressFirstBootOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigReplaceAddressFirstBootDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigReplaceAddressFirstBootOK creates a FindConfigReplaceAddressFirstBootOK with default headers values +func NewFindConfigReplaceAddressFirstBootOK() *FindConfigReplaceAddressFirstBootOK { + return &FindConfigReplaceAddressFirstBootOK{} +} + +/* +FindConfigReplaceAddressFirstBootOK handles this case with default header values. + +Config value +*/ +type FindConfigReplaceAddressFirstBootOK struct { + Payload string +} + +func (o *FindConfigReplaceAddressFirstBootOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigReplaceAddressFirstBootOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigReplaceAddressFirstBootDefault creates a FindConfigReplaceAddressFirstBootDefault with default headers values +func NewFindConfigReplaceAddressFirstBootDefault(code int) *FindConfigReplaceAddressFirstBootDefault { + return &FindConfigReplaceAddressFirstBootDefault{ + _statusCode: code, + } +} + +/* +FindConfigReplaceAddressFirstBootDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigReplaceAddressFirstBootDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config replace address first boot default response +func (o *FindConfigReplaceAddressFirstBootDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigReplaceAddressFirstBootDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigReplaceAddressFirstBootDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigReplaceAddressFirstBootDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_parameters.go new file mode 100644 index 00000000000..34b0577d4e8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigReplaceAddressParams creates a new FindConfigReplaceAddressParams object +// with the default values initialized. +func NewFindConfigReplaceAddressParams() *FindConfigReplaceAddressParams { + + return &FindConfigReplaceAddressParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigReplaceAddressParamsWithTimeout creates a new FindConfigReplaceAddressParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigReplaceAddressParamsWithTimeout(timeout time.Duration) *FindConfigReplaceAddressParams { + + return &FindConfigReplaceAddressParams{ + + timeout: timeout, + } +} + +// NewFindConfigReplaceAddressParamsWithContext creates a new FindConfigReplaceAddressParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigReplaceAddressParamsWithContext(ctx context.Context) *FindConfigReplaceAddressParams { + + return &FindConfigReplaceAddressParams{ + + Context: ctx, + } +} + +// NewFindConfigReplaceAddressParamsWithHTTPClient creates a new FindConfigReplaceAddressParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigReplaceAddressParamsWithHTTPClient(client *http.Client) *FindConfigReplaceAddressParams { + + return &FindConfigReplaceAddressParams{ + HTTPClient: client, + } +} + +/* +FindConfigReplaceAddressParams contains all the parameters to send to the API endpoint +for the find config replace address operation typically these are written to a http.Request +*/ +type FindConfigReplaceAddressParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config replace address params +func (o *FindConfigReplaceAddressParams) WithTimeout(timeout time.Duration) *FindConfigReplaceAddressParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config replace address params +func (o *FindConfigReplaceAddressParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config replace address params +func (o *FindConfigReplaceAddressParams) WithContext(ctx context.Context) *FindConfigReplaceAddressParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config replace address params +func (o *FindConfigReplaceAddressParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config replace address params +func (o *FindConfigReplaceAddressParams) WithHTTPClient(client *http.Client) *FindConfigReplaceAddressParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config replace address params +func (o *FindConfigReplaceAddressParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigReplaceAddressParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_responses.go new file mode 100644 index 00000000000..5460e69244b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_address_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigReplaceAddressReader is a Reader for the FindConfigReplaceAddress structure. +type FindConfigReplaceAddressReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigReplaceAddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigReplaceAddressOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigReplaceAddressDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigReplaceAddressOK creates a FindConfigReplaceAddressOK with default headers values +func NewFindConfigReplaceAddressOK() *FindConfigReplaceAddressOK { + return &FindConfigReplaceAddressOK{} +} + +/* +FindConfigReplaceAddressOK handles this case with default header values. + +Config value +*/ +type FindConfigReplaceAddressOK struct { + Payload string +} + +func (o *FindConfigReplaceAddressOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigReplaceAddressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigReplaceAddressDefault creates a FindConfigReplaceAddressDefault with default headers values +func NewFindConfigReplaceAddressDefault(code int) *FindConfigReplaceAddressDefault { + return &FindConfigReplaceAddressDefault{ + _statusCode: code, + } +} + +/* +FindConfigReplaceAddressDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigReplaceAddressDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config replace address default response +func (o *FindConfigReplaceAddressDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigReplaceAddressDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigReplaceAddressDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigReplaceAddressDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_node_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_node_parameters.go new file mode 100644 index 00000000000..512e28b12ac --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_node_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigReplaceNodeParams creates a new FindConfigReplaceNodeParams object +// with the default values initialized. +func NewFindConfigReplaceNodeParams() *FindConfigReplaceNodeParams { + + return &FindConfigReplaceNodeParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigReplaceNodeParamsWithTimeout creates a new FindConfigReplaceNodeParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigReplaceNodeParamsWithTimeout(timeout time.Duration) *FindConfigReplaceNodeParams { + + return &FindConfigReplaceNodeParams{ + + timeout: timeout, + } +} + +// NewFindConfigReplaceNodeParamsWithContext creates a new FindConfigReplaceNodeParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigReplaceNodeParamsWithContext(ctx context.Context) *FindConfigReplaceNodeParams { + + return &FindConfigReplaceNodeParams{ + + Context: ctx, + } +} + +// NewFindConfigReplaceNodeParamsWithHTTPClient creates a new FindConfigReplaceNodeParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigReplaceNodeParamsWithHTTPClient(client *http.Client) *FindConfigReplaceNodeParams { + + return &FindConfigReplaceNodeParams{ + HTTPClient: client, + } +} + +/* +FindConfigReplaceNodeParams contains all the parameters to send to the API endpoint +for the find config replace node operation typically these are written to a http.Request +*/ +type FindConfigReplaceNodeParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config replace node params +func (o *FindConfigReplaceNodeParams) WithTimeout(timeout time.Duration) *FindConfigReplaceNodeParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config replace node params +func (o *FindConfigReplaceNodeParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config replace node params +func (o *FindConfigReplaceNodeParams) WithContext(ctx context.Context) *FindConfigReplaceNodeParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config replace node params +func (o *FindConfigReplaceNodeParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config replace node params +func (o *FindConfigReplaceNodeParams) WithHTTPClient(client *http.Client) *FindConfigReplaceNodeParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config replace node params +func (o *FindConfigReplaceNodeParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigReplaceNodeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_node_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_node_responses.go new file mode 100644 index 00000000000..fcfc32ddb3c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_node_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigReplaceNodeReader is a Reader for the FindConfigReplaceNode structure. +type FindConfigReplaceNodeReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigReplaceNodeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigReplaceNodeOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigReplaceNodeDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigReplaceNodeOK creates a FindConfigReplaceNodeOK with default headers values +func NewFindConfigReplaceNodeOK() *FindConfigReplaceNodeOK { + return &FindConfigReplaceNodeOK{} +} + +/* +FindConfigReplaceNodeOK handles this case with default header values. + +Config value +*/ +type FindConfigReplaceNodeOK struct { + Payload string +} + +func (o *FindConfigReplaceNodeOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigReplaceNodeOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigReplaceNodeDefault creates a FindConfigReplaceNodeDefault with default headers values +func NewFindConfigReplaceNodeDefault(code int) *FindConfigReplaceNodeDefault { + return &FindConfigReplaceNodeDefault{ + _statusCode: code, + } +} + +/* +FindConfigReplaceNodeDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigReplaceNodeDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config replace node default response +func (o *FindConfigReplaceNodeDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigReplaceNodeDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigReplaceNodeDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigReplaceNodeDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_token_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_token_parameters.go new file mode 100644 index 00000000000..fd034e42144 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_token_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigReplaceTokenParams creates a new FindConfigReplaceTokenParams object +// with the default values initialized. +func NewFindConfigReplaceTokenParams() *FindConfigReplaceTokenParams { + + return &FindConfigReplaceTokenParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigReplaceTokenParamsWithTimeout creates a new FindConfigReplaceTokenParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigReplaceTokenParamsWithTimeout(timeout time.Duration) *FindConfigReplaceTokenParams { + + return &FindConfigReplaceTokenParams{ + + timeout: timeout, + } +} + +// NewFindConfigReplaceTokenParamsWithContext creates a new FindConfigReplaceTokenParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigReplaceTokenParamsWithContext(ctx context.Context) *FindConfigReplaceTokenParams { + + return &FindConfigReplaceTokenParams{ + + Context: ctx, + } +} + +// NewFindConfigReplaceTokenParamsWithHTTPClient creates a new FindConfigReplaceTokenParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigReplaceTokenParamsWithHTTPClient(client *http.Client) *FindConfigReplaceTokenParams { + + return &FindConfigReplaceTokenParams{ + HTTPClient: client, + } +} + +/* +FindConfigReplaceTokenParams contains all the parameters to send to the API endpoint +for the find config replace token operation typically these are written to a http.Request +*/ +type FindConfigReplaceTokenParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config replace token params +func (o *FindConfigReplaceTokenParams) WithTimeout(timeout time.Duration) *FindConfigReplaceTokenParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config replace token params +func (o *FindConfigReplaceTokenParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config replace token params +func (o *FindConfigReplaceTokenParams) WithContext(ctx context.Context) *FindConfigReplaceTokenParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config replace token params +func (o *FindConfigReplaceTokenParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config replace token params +func (o *FindConfigReplaceTokenParams) WithHTTPClient(client *http.Client) *FindConfigReplaceTokenParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config replace token params +func (o *FindConfigReplaceTokenParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigReplaceTokenParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_token_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_token_responses.go new file mode 100644 index 00000000000..5a3ce523b2d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_replace_token_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigReplaceTokenReader is a Reader for the FindConfigReplaceToken structure. +type FindConfigReplaceTokenReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigReplaceTokenReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigReplaceTokenOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigReplaceTokenDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigReplaceTokenOK creates a FindConfigReplaceTokenOK with default headers values +func NewFindConfigReplaceTokenOK() *FindConfigReplaceTokenOK { + return &FindConfigReplaceTokenOK{} +} + +/* +FindConfigReplaceTokenOK handles this case with default header values. + +Config value +*/ +type FindConfigReplaceTokenOK struct { + Payload string +} + +func (o *FindConfigReplaceTokenOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigReplaceTokenOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigReplaceTokenDefault creates a FindConfigReplaceTokenDefault with default headers values +func NewFindConfigReplaceTokenDefault(code int) *FindConfigReplaceTokenDefault { + return &FindConfigReplaceTokenDefault{ + _statusCode: code, + } +} + +/* +FindConfigReplaceTokenDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigReplaceTokenDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config replace token default response +func (o *FindConfigReplaceTokenDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigReplaceTokenDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigReplaceTokenDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigReplaceTokenDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_id_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_id_parameters.go new file mode 100644 index 00000000000..3fc13138314 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_id_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRequestSchedulerIDParams creates a new FindConfigRequestSchedulerIDParams object +// with the default values initialized. +func NewFindConfigRequestSchedulerIDParams() *FindConfigRequestSchedulerIDParams { + + return &FindConfigRequestSchedulerIDParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRequestSchedulerIDParamsWithTimeout creates a new FindConfigRequestSchedulerIDParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRequestSchedulerIDParamsWithTimeout(timeout time.Duration) *FindConfigRequestSchedulerIDParams { + + return &FindConfigRequestSchedulerIDParams{ + + timeout: timeout, + } +} + +// NewFindConfigRequestSchedulerIDParamsWithContext creates a new FindConfigRequestSchedulerIDParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRequestSchedulerIDParamsWithContext(ctx context.Context) *FindConfigRequestSchedulerIDParams { + + return &FindConfigRequestSchedulerIDParams{ + + Context: ctx, + } +} + +// NewFindConfigRequestSchedulerIDParamsWithHTTPClient creates a new FindConfigRequestSchedulerIDParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRequestSchedulerIDParamsWithHTTPClient(client *http.Client) *FindConfigRequestSchedulerIDParams { + + return &FindConfigRequestSchedulerIDParams{ + HTTPClient: client, + } +} + +/* +FindConfigRequestSchedulerIDParams contains all the parameters to send to the API endpoint +for the find config request scheduler id operation typically these are written to a http.Request +*/ +type FindConfigRequestSchedulerIDParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config request scheduler id params +func (o *FindConfigRequestSchedulerIDParams) WithTimeout(timeout time.Duration) *FindConfigRequestSchedulerIDParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config request scheduler id params +func (o *FindConfigRequestSchedulerIDParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config request scheduler id params +func (o *FindConfigRequestSchedulerIDParams) WithContext(ctx context.Context) *FindConfigRequestSchedulerIDParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config request scheduler id params +func (o *FindConfigRequestSchedulerIDParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config request scheduler id params +func (o *FindConfigRequestSchedulerIDParams) WithHTTPClient(client *http.Client) *FindConfigRequestSchedulerIDParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config request scheduler id params +func (o *FindConfigRequestSchedulerIDParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRequestSchedulerIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_id_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_id_responses.go new file mode 100644 index 00000000000..bd03a8a24c0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_id_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRequestSchedulerIDReader is a Reader for the FindConfigRequestSchedulerID structure. +type FindConfigRequestSchedulerIDReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRequestSchedulerIDReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRequestSchedulerIDOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRequestSchedulerIDDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRequestSchedulerIDOK creates a FindConfigRequestSchedulerIDOK with default headers values +func NewFindConfigRequestSchedulerIDOK() *FindConfigRequestSchedulerIDOK { + return &FindConfigRequestSchedulerIDOK{} +} + +/* +FindConfigRequestSchedulerIDOK handles this case with default header values. + +Config value +*/ +type FindConfigRequestSchedulerIDOK struct { + Payload string +} + +func (o *FindConfigRequestSchedulerIDOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigRequestSchedulerIDOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRequestSchedulerIDDefault creates a FindConfigRequestSchedulerIDDefault with default headers values +func NewFindConfigRequestSchedulerIDDefault(code int) *FindConfigRequestSchedulerIDDefault { + return &FindConfigRequestSchedulerIDDefault{ + _statusCode: code, + } +} + +/* +FindConfigRequestSchedulerIDDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRequestSchedulerIDDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config request scheduler id default response +func (o *FindConfigRequestSchedulerIDDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRequestSchedulerIDDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRequestSchedulerIDDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRequestSchedulerIDDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_options_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_options_parameters.go new file mode 100644 index 00000000000..a0991856a6a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_options_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRequestSchedulerOptionsParams creates a new FindConfigRequestSchedulerOptionsParams object +// with the default values initialized. +func NewFindConfigRequestSchedulerOptionsParams() *FindConfigRequestSchedulerOptionsParams { + + return &FindConfigRequestSchedulerOptionsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRequestSchedulerOptionsParamsWithTimeout creates a new FindConfigRequestSchedulerOptionsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRequestSchedulerOptionsParamsWithTimeout(timeout time.Duration) *FindConfigRequestSchedulerOptionsParams { + + return &FindConfigRequestSchedulerOptionsParams{ + + timeout: timeout, + } +} + +// NewFindConfigRequestSchedulerOptionsParamsWithContext creates a new FindConfigRequestSchedulerOptionsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRequestSchedulerOptionsParamsWithContext(ctx context.Context) *FindConfigRequestSchedulerOptionsParams { + + return &FindConfigRequestSchedulerOptionsParams{ + + Context: ctx, + } +} + +// NewFindConfigRequestSchedulerOptionsParamsWithHTTPClient creates a new FindConfigRequestSchedulerOptionsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRequestSchedulerOptionsParamsWithHTTPClient(client *http.Client) *FindConfigRequestSchedulerOptionsParams { + + return &FindConfigRequestSchedulerOptionsParams{ + HTTPClient: client, + } +} + +/* +FindConfigRequestSchedulerOptionsParams contains all the parameters to send to the API endpoint +for the find config request scheduler options operation typically these are written to a http.Request +*/ +type FindConfigRequestSchedulerOptionsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config request scheduler options params +func (o *FindConfigRequestSchedulerOptionsParams) WithTimeout(timeout time.Duration) *FindConfigRequestSchedulerOptionsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config request scheduler options params +func (o *FindConfigRequestSchedulerOptionsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config request scheduler options params +func (o *FindConfigRequestSchedulerOptionsParams) WithContext(ctx context.Context) *FindConfigRequestSchedulerOptionsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config request scheduler options params +func (o *FindConfigRequestSchedulerOptionsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config request scheduler options params +func (o *FindConfigRequestSchedulerOptionsParams) WithHTTPClient(client *http.Client) *FindConfigRequestSchedulerOptionsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config request scheduler options params +func (o *FindConfigRequestSchedulerOptionsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRequestSchedulerOptionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_options_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_options_responses.go new file mode 100644 index 00000000000..b3cbebed3df --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_options_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRequestSchedulerOptionsReader is a Reader for the FindConfigRequestSchedulerOptions structure. +type FindConfigRequestSchedulerOptionsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRequestSchedulerOptionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRequestSchedulerOptionsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRequestSchedulerOptionsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRequestSchedulerOptionsOK creates a FindConfigRequestSchedulerOptionsOK with default headers values +func NewFindConfigRequestSchedulerOptionsOK() *FindConfigRequestSchedulerOptionsOK { + return &FindConfigRequestSchedulerOptionsOK{} +} + +/* +FindConfigRequestSchedulerOptionsOK handles this case with default header values. + +Config value +*/ +type FindConfigRequestSchedulerOptionsOK struct { + Payload []string +} + +func (o *FindConfigRequestSchedulerOptionsOK) GetPayload() []string { + return o.Payload +} + +func (o *FindConfigRequestSchedulerOptionsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRequestSchedulerOptionsDefault creates a FindConfigRequestSchedulerOptionsDefault with default headers values +func NewFindConfigRequestSchedulerOptionsDefault(code int) *FindConfigRequestSchedulerOptionsDefault { + return &FindConfigRequestSchedulerOptionsDefault{ + _statusCode: code, + } +} + +/* +FindConfigRequestSchedulerOptionsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRequestSchedulerOptionsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config request scheduler options default response +func (o *FindConfigRequestSchedulerOptionsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRequestSchedulerOptionsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRequestSchedulerOptionsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRequestSchedulerOptionsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_parameters.go new file mode 100644 index 00000000000..54ba69cbd20 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRequestSchedulerParams creates a new FindConfigRequestSchedulerParams object +// with the default values initialized. +func NewFindConfigRequestSchedulerParams() *FindConfigRequestSchedulerParams { + + return &FindConfigRequestSchedulerParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRequestSchedulerParamsWithTimeout creates a new FindConfigRequestSchedulerParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRequestSchedulerParamsWithTimeout(timeout time.Duration) *FindConfigRequestSchedulerParams { + + return &FindConfigRequestSchedulerParams{ + + timeout: timeout, + } +} + +// NewFindConfigRequestSchedulerParamsWithContext creates a new FindConfigRequestSchedulerParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRequestSchedulerParamsWithContext(ctx context.Context) *FindConfigRequestSchedulerParams { + + return &FindConfigRequestSchedulerParams{ + + Context: ctx, + } +} + +// NewFindConfigRequestSchedulerParamsWithHTTPClient creates a new FindConfigRequestSchedulerParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRequestSchedulerParamsWithHTTPClient(client *http.Client) *FindConfigRequestSchedulerParams { + + return &FindConfigRequestSchedulerParams{ + HTTPClient: client, + } +} + +/* +FindConfigRequestSchedulerParams contains all the parameters to send to the API endpoint +for the find config request scheduler operation typically these are written to a http.Request +*/ +type FindConfigRequestSchedulerParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config request scheduler params +func (o *FindConfigRequestSchedulerParams) WithTimeout(timeout time.Duration) *FindConfigRequestSchedulerParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config request scheduler params +func (o *FindConfigRequestSchedulerParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config request scheduler params +func (o *FindConfigRequestSchedulerParams) WithContext(ctx context.Context) *FindConfigRequestSchedulerParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config request scheduler params +func (o *FindConfigRequestSchedulerParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config request scheduler params +func (o *FindConfigRequestSchedulerParams) WithHTTPClient(client *http.Client) *FindConfigRequestSchedulerParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config request scheduler params +func (o *FindConfigRequestSchedulerParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRequestSchedulerParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_responses.go new file mode 100644 index 00000000000..a342bb7ef40 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_scheduler_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRequestSchedulerReader is a Reader for the FindConfigRequestScheduler structure. +type FindConfigRequestSchedulerReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRequestSchedulerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRequestSchedulerOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRequestSchedulerDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRequestSchedulerOK creates a FindConfigRequestSchedulerOK with default headers values +func NewFindConfigRequestSchedulerOK() *FindConfigRequestSchedulerOK { + return &FindConfigRequestSchedulerOK{} +} + +/* +FindConfigRequestSchedulerOK handles this case with default header values. + +Config value +*/ +type FindConfigRequestSchedulerOK struct { + Payload string +} + +func (o *FindConfigRequestSchedulerOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigRequestSchedulerOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRequestSchedulerDefault creates a FindConfigRequestSchedulerDefault with default headers values +func NewFindConfigRequestSchedulerDefault(code int) *FindConfigRequestSchedulerDefault { + return &FindConfigRequestSchedulerDefault{ + _statusCode: code, + } +} + +/* +FindConfigRequestSchedulerDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRequestSchedulerDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config request scheduler default response +func (o *FindConfigRequestSchedulerDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRequestSchedulerDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRequestSchedulerDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRequestSchedulerDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_timeout_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_timeout_in_ms_parameters.go new file mode 100644 index 00000000000..f6b37c4c928 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_timeout_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRequestTimeoutInMsParams creates a new FindConfigRequestTimeoutInMsParams object +// with the default values initialized. +func NewFindConfigRequestTimeoutInMsParams() *FindConfigRequestTimeoutInMsParams { + + return &FindConfigRequestTimeoutInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRequestTimeoutInMsParamsWithTimeout creates a new FindConfigRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRequestTimeoutInMsParamsWithTimeout(timeout time.Duration) *FindConfigRequestTimeoutInMsParams { + + return &FindConfigRequestTimeoutInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigRequestTimeoutInMsParamsWithContext creates a new FindConfigRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRequestTimeoutInMsParamsWithContext(ctx context.Context) *FindConfigRequestTimeoutInMsParams { + + return &FindConfigRequestTimeoutInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigRequestTimeoutInMsParamsWithHTTPClient creates a new FindConfigRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRequestTimeoutInMsParamsWithHTTPClient(client *http.Client) *FindConfigRequestTimeoutInMsParams { + + return &FindConfigRequestTimeoutInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigRequestTimeoutInMsParams contains all the parameters to send to the API endpoint +for the find config request timeout in ms operation typically these are written to a http.Request +*/ +type FindConfigRequestTimeoutInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config request timeout in ms params +func (o *FindConfigRequestTimeoutInMsParams) WithTimeout(timeout time.Duration) *FindConfigRequestTimeoutInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config request timeout in ms params +func (o *FindConfigRequestTimeoutInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config request timeout in ms params +func (o *FindConfigRequestTimeoutInMsParams) WithContext(ctx context.Context) *FindConfigRequestTimeoutInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config request timeout in ms params +func (o *FindConfigRequestTimeoutInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config request timeout in ms params +func (o *FindConfigRequestTimeoutInMsParams) WithHTTPClient(client *http.Client) *FindConfigRequestTimeoutInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config request timeout in ms params +func (o *FindConfigRequestTimeoutInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRequestTimeoutInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_timeout_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_timeout_in_ms_responses.go new file mode 100644 index 00000000000..aa6aae37806 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_request_timeout_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRequestTimeoutInMsReader is a Reader for the FindConfigRequestTimeoutInMs structure. +type FindConfigRequestTimeoutInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRequestTimeoutInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRequestTimeoutInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRequestTimeoutInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRequestTimeoutInMsOK creates a FindConfigRequestTimeoutInMsOK with default headers values +func NewFindConfigRequestTimeoutInMsOK() *FindConfigRequestTimeoutInMsOK { + return &FindConfigRequestTimeoutInMsOK{} +} + +/* +FindConfigRequestTimeoutInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigRequestTimeoutInMsOK struct { + Payload int64 +} + +func (o *FindConfigRequestTimeoutInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRequestTimeoutInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRequestTimeoutInMsDefault creates a FindConfigRequestTimeoutInMsDefault with default headers values +func NewFindConfigRequestTimeoutInMsDefault(code int) *FindConfigRequestTimeoutInMsDefault { + return &FindConfigRequestTimeoutInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigRequestTimeoutInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRequestTimeoutInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config request timeout in ms default response +func (o *FindConfigRequestTimeoutInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRequestTimeoutInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRequestTimeoutInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRequestTimeoutInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ring_delay_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ring_delay_ms_parameters.go new file mode 100644 index 00000000000..5a70db3f460 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ring_delay_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRingDelayMsParams creates a new FindConfigRingDelayMsParams object +// with the default values initialized. +func NewFindConfigRingDelayMsParams() *FindConfigRingDelayMsParams { + + return &FindConfigRingDelayMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRingDelayMsParamsWithTimeout creates a new FindConfigRingDelayMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRingDelayMsParamsWithTimeout(timeout time.Duration) *FindConfigRingDelayMsParams { + + return &FindConfigRingDelayMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigRingDelayMsParamsWithContext creates a new FindConfigRingDelayMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRingDelayMsParamsWithContext(ctx context.Context) *FindConfigRingDelayMsParams { + + return &FindConfigRingDelayMsParams{ + + Context: ctx, + } +} + +// NewFindConfigRingDelayMsParamsWithHTTPClient creates a new FindConfigRingDelayMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRingDelayMsParamsWithHTTPClient(client *http.Client) *FindConfigRingDelayMsParams { + + return &FindConfigRingDelayMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigRingDelayMsParams contains all the parameters to send to the API endpoint +for the find config ring delay ms operation typically these are written to a http.Request +*/ +type FindConfigRingDelayMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config ring delay ms params +func (o *FindConfigRingDelayMsParams) WithTimeout(timeout time.Duration) *FindConfigRingDelayMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config ring delay ms params +func (o *FindConfigRingDelayMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config ring delay ms params +func (o *FindConfigRingDelayMsParams) WithContext(ctx context.Context) *FindConfigRingDelayMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config ring delay ms params +func (o *FindConfigRingDelayMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config ring delay ms params +func (o *FindConfigRingDelayMsParams) WithHTTPClient(client *http.Client) *FindConfigRingDelayMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config ring delay ms params +func (o *FindConfigRingDelayMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRingDelayMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ring_delay_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ring_delay_ms_responses.go new file mode 100644 index 00000000000..bb1a4202c4a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ring_delay_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRingDelayMsReader is a Reader for the FindConfigRingDelayMs structure. +type FindConfigRingDelayMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRingDelayMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRingDelayMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRingDelayMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRingDelayMsOK creates a FindConfigRingDelayMsOK with default headers values +func NewFindConfigRingDelayMsOK() *FindConfigRingDelayMsOK { + return &FindConfigRingDelayMsOK{} +} + +/* +FindConfigRingDelayMsOK handles this case with default header values. + +Config value +*/ +type FindConfigRingDelayMsOK struct { + Payload int64 +} + +func (o *FindConfigRingDelayMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRingDelayMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRingDelayMsDefault creates a FindConfigRingDelayMsDefault with default headers values +func NewFindConfigRingDelayMsDefault(code int) *FindConfigRingDelayMsDefault { + return &FindConfigRingDelayMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigRingDelayMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRingDelayMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config ring delay ms default response +func (o *FindConfigRingDelayMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRingDelayMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRingDelayMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRingDelayMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_role_manager_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_role_manager_parameters.go new file mode 100644 index 00000000000..b7542b6c7b1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_role_manager_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRoleManagerParams creates a new FindConfigRoleManagerParams object +// with the default values initialized. +func NewFindConfigRoleManagerParams() *FindConfigRoleManagerParams { + + return &FindConfigRoleManagerParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRoleManagerParamsWithTimeout creates a new FindConfigRoleManagerParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRoleManagerParamsWithTimeout(timeout time.Duration) *FindConfigRoleManagerParams { + + return &FindConfigRoleManagerParams{ + + timeout: timeout, + } +} + +// NewFindConfigRoleManagerParamsWithContext creates a new FindConfigRoleManagerParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRoleManagerParamsWithContext(ctx context.Context) *FindConfigRoleManagerParams { + + return &FindConfigRoleManagerParams{ + + Context: ctx, + } +} + +// NewFindConfigRoleManagerParamsWithHTTPClient creates a new FindConfigRoleManagerParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRoleManagerParamsWithHTTPClient(client *http.Client) *FindConfigRoleManagerParams { + + return &FindConfigRoleManagerParams{ + HTTPClient: client, + } +} + +/* +FindConfigRoleManagerParams contains all the parameters to send to the API endpoint +for the find config role manager operation typically these are written to a http.Request +*/ +type FindConfigRoleManagerParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config role manager params +func (o *FindConfigRoleManagerParams) WithTimeout(timeout time.Duration) *FindConfigRoleManagerParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config role manager params +func (o *FindConfigRoleManagerParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config role manager params +func (o *FindConfigRoleManagerParams) WithContext(ctx context.Context) *FindConfigRoleManagerParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config role manager params +func (o *FindConfigRoleManagerParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config role manager params +func (o *FindConfigRoleManagerParams) WithHTTPClient(client *http.Client) *FindConfigRoleManagerParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config role manager params +func (o *FindConfigRoleManagerParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRoleManagerParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_role_manager_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_role_manager_responses.go new file mode 100644 index 00000000000..428b06ca356 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_role_manager_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRoleManagerReader is a Reader for the FindConfigRoleManager structure. +type FindConfigRoleManagerReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRoleManagerReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRoleManagerOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRoleManagerDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRoleManagerOK creates a FindConfigRoleManagerOK with default headers values +func NewFindConfigRoleManagerOK() *FindConfigRoleManagerOK { + return &FindConfigRoleManagerOK{} +} + +/* +FindConfigRoleManagerOK handles this case with default header values. + +Config value +*/ +type FindConfigRoleManagerOK struct { + Payload string +} + +func (o *FindConfigRoleManagerOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigRoleManagerOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRoleManagerDefault creates a FindConfigRoleManagerDefault with default headers values +func NewFindConfigRoleManagerDefault(code int) *FindConfigRoleManagerDefault { + return &FindConfigRoleManagerDefault{ + _statusCode: code, + } +} + +/* +FindConfigRoleManagerDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRoleManagerDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config role manager default response +func (o *FindConfigRoleManagerDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRoleManagerDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRoleManagerDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRoleManagerDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_keys_to_save_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_keys_to_save_parameters.go new file mode 100644 index 00000000000..7900ca5cc71 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_keys_to_save_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRowCacheKeysToSaveParams creates a new FindConfigRowCacheKeysToSaveParams object +// with the default values initialized. +func NewFindConfigRowCacheKeysToSaveParams() *FindConfigRowCacheKeysToSaveParams { + + return &FindConfigRowCacheKeysToSaveParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRowCacheKeysToSaveParamsWithTimeout creates a new FindConfigRowCacheKeysToSaveParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRowCacheKeysToSaveParamsWithTimeout(timeout time.Duration) *FindConfigRowCacheKeysToSaveParams { + + return &FindConfigRowCacheKeysToSaveParams{ + + timeout: timeout, + } +} + +// NewFindConfigRowCacheKeysToSaveParamsWithContext creates a new FindConfigRowCacheKeysToSaveParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRowCacheKeysToSaveParamsWithContext(ctx context.Context) *FindConfigRowCacheKeysToSaveParams { + + return &FindConfigRowCacheKeysToSaveParams{ + + Context: ctx, + } +} + +// NewFindConfigRowCacheKeysToSaveParamsWithHTTPClient creates a new FindConfigRowCacheKeysToSaveParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRowCacheKeysToSaveParamsWithHTTPClient(client *http.Client) *FindConfigRowCacheKeysToSaveParams { + + return &FindConfigRowCacheKeysToSaveParams{ + HTTPClient: client, + } +} + +/* +FindConfigRowCacheKeysToSaveParams contains all the parameters to send to the API endpoint +for the find config row cache keys to save operation typically these are written to a http.Request +*/ +type FindConfigRowCacheKeysToSaveParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config row cache keys to save params +func (o *FindConfigRowCacheKeysToSaveParams) WithTimeout(timeout time.Duration) *FindConfigRowCacheKeysToSaveParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config row cache keys to save params +func (o *FindConfigRowCacheKeysToSaveParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config row cache keys to save params +func (o *FindConfigRowCacheKeysToSaveParams) WithContext(ctx context.Context) *FindConfigRowCacheKeysToSaveParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config row cache keys to save params +func (o *FindConfigRowCacheKeysToSaveParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config row cache keys to save params +func (o *FindConfigRowCacheKeysToSaveParams) WithHTTPClient(client *http.Client) *FindConfigRowCacheKeysToSaveParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config row cache keys to save params +func (o *FindConfigRowCacheKeysToSaveParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRowCacheKeysToSaveParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_keys_to_save_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_keys_to_save_responses.go new file mode 100644 index 00000000000..40929ae3123 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_keys_to_save_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRowCacheKeysToSaveReader is a Reader for the FindConfigRowCacheKeysToSave structure. +type FindConfigRowCacheKeysToSaveReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRowCacheKeysToSaveReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRowCacheKeysToSaveOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRowCacheKeysToSaveDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRowCacheKeysToSaveOK creates a FindConfigRowCacheKeysToSaveOK with default headers values +func NewFindConfigRowCacheKeysToSaveOK() *FindConfigRowCacheKeysToSaveOK { + return &FindConfigRowCacheKeysToSaveOK{} +} + +/* +FindConfigRowCacheKeysToSaveOK handles this case with default header values. + +Config value +*/ +type FindConfigRowCacheKeysToSaveOK struct { + Payload int64 +} + +func (o *FindConfigRowCacheKeysToSaveOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRowCacheKeysToSaveOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRowCacheKeysToSaveDefault creates a FindConfigRowCacheKeysToSaveDefault with default headers values +func NewFindConfigRowCacheKeysToSaveDefault(code int) *FindConfigRowCacheKeysToSaveDefault { + return &FindConfigRowCacheKeysToSaveDefault{ + _statusCode: code, + } +} + +/* +FindConfigRowCacheKeysToSaveDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRowCacheKeysToSaveDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config row cache keys to save default response +func (o *FindConfigRowCacheKeysToSaveDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRowCacheKeysToSaveDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRowCacheKeysToSaveDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRowCacheKeysToSaveDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_save_period_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_save_period_parameters.go new file mode 100644 index 00000000000..71b0bb9708d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_save_period_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRowCacheSavePeriodParams creates a new FindConfigRowCacheSavePeriodParams object +// with the default values initialized. +func NewFindConfigRowCacheSavePeriodParams() *FindConfigRowCacheSavePeriodParams { + + return &FindConfigRowCacheSavePeriodParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRowCacheSavePeriodParamsWithTimeout creates a new FindConfigRowCacheSavePeriodParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRowCacheSavePeriodParamsWithTimeout(timeout time.Duration) *FindConfigRowCacheSavePeriodParams { + + return &FindConfigRowCacheSavePeriodParams{ + + timeout: timeout, + } +} + +// NewFindConfigRowCacheSavePeriodParamsWithContext creates a new FindConfigRowCacheSavePeriodParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRowCacheSavePeriodParamsWithContext(ctx context.Context) *FindConfigRowCacheSavePeriodParams { + + return &FindConfigRowCacheSavePeriodParams{ + + Context: ctx, + } +} + +// NewFindConfigRowCacheSavePeriodParamsWithHTTPClient creates a new FindConfigRowCacheSavePeriodParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRowCacheSavePeriodParamsWithHTTPClient(client *http.Client) *FindConfigRowCacheSavePeriodParams { + + return &FindConfigRowCacheSavePeriodParams{ + HTTPClient: client, + } +} + +/* +FindConfigRowCacheSavePeriodParams contains all the parameters to send to the API endpoint +for the find config row cache save period operation typically these are written to a http.Request +*/ +type FindConfigRowCacheSavePeriodParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config row cache save period params +func (o *FindConfigRowCacheSavePeriodParams) WithTimeout(timeout time.Duration) *FindConfigRowCacheSavePeriodParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config row cache save period params +func (o *FindConfigRowCacheSavePeriodParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config row cache save period params +func (o *FindConfigRowCacheSavePeriodParams) WithContext(ctx context.Context) *FindConfigRowCacheSavePeriodParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config row cache save period params +func (o *FindConfigRowCacheSavePeriodParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config row cache save period params +func (o *FindConfigRowCacheSavePeriodParams) WithHTTPClient(client *http.Client) *FindConfigRowCacheSavePeriodParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config row cache save period params +func (o *FindConfigRowCacheSavePeriodParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRowCacheSavePeriodParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_save_period_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_save_period_responses.go new file mode 100644 index 00000000000..4cc8e05ac01 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_save_period_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRowCacheSavePeriodReader is a Reader for the FindConfigRowCacheSavePeriod structure. +type FindConfigRowCacheSavePeriodReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRowCacheSavePeriodReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRowCacheSavePeriodOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRowCacheSavePeriodDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRowCacheSavePeriodOK creates a FindConfigRowCacheSavePeriodOK with default headers values +func NewFindConfigRowCacheSavePeriodOK() *FindConfigRowCacheSavePeriodOK { + return &FindConfigRowCacheSavePeriodOK{} +} + +/* +FindConfigRowCacheSavePeriodOK handles this case with default header values. + +Config value +*/ +type FindConfigRowCacheSavePeriodOK struct { + Payload int64 +} + +func (o *FindConfigRowCacheSavePeriodOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRowCacheSavePeriodOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRowCacheSavePeriodDefault creates a FindConfigRowCacheSavePeriodDefault with default headers values +func NewFindConfigRowCacheSavePeriodDefault(code int) *FindConfigRowCacheSavePeriodDefault { + return &FindConfigRowCacheSavePeriodDefault{ + _statusCode: code, + } +} + +/* +FindConfigRowCacheSavePeriodDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRowCacheSavePeriodDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config row cache save period default response +func (o *FindConfigRowCacheSavePeriodDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRowCacheSavePeriodDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRowCacheSavePeriodDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRowCacheSavePeriodDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_size_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_size_in_mb_parameters.go new file mode 100644 index 00000000000..44958b24729 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_size_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRowCacheSizeInMbParams creates a new FindConfigRowCacheSizeInMbParams object +// with the default values initialized. +func NewFindConfigRowCacheSizeInMbParams() *FindConfigRowCacheSizeInMbParams { + + return &FindConfigRowCacheSizeInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRowCacheSizeInMbParamsWithTimeout creates a new FindConfigRowCacheSizeInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRowCacheSizeInMbParamsWithTimeout(timeout time.Duration) *FindConfigRowCacheSizeInMbParams { + + return &FindConfigRowCacheSizeInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigRowCacheSizeInMbParamsWithContext creates a new FindConfigRowCacheSizeInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRowCacheSizeInMbParamsWithContext(ctx context.Context) *FindConfigRowCacheSizeInMbParams { + + return &FindConfigRowCacheSizeInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigRowCacheSizeInMbParamsWithHTTPClient creates a new FindConfigRowCacheSizeInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRowCacheSizeInMbParamsWithHTTPClient(client *http.Client) *FindConfigRowCacheSizeInMbParams { + + return &FindConfigRowCacheSizeInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigRowCacheSizeInMbParams contains all the parameters to send to the API endpoint +for the find config row cache size in mb operation typically these are written to a http.Request +*/ +type FindConfigRowCacheSizeInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config row cache size in mb params +func (o *FindConfigRowCacheSizeInMbParams) WithTimeout(timeout time.Duration) *FindConfigRowCacheSizeInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config row cache size in mb params +func (o *FindConfigRowCacheSizeInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config row cache size in mb params +func (o *FindConfigRowCacheSizeInMbParams) WithContext(ctx context.Context) *FindConfigRowCacheSizeInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config row cache size in mb params +func (o *FindConfigRowCacheSizeInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config row cache size in mb params +func (o *FindConfigRowCacheSizeInMbParams) WithHTTPClient(client *http.Client) *FindConfigRowCacheSizeInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config row cache size in mb params +func (o *FindConfigRowCacheSizeInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRowCacheSizeInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_size_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_size_in_mb_responses.go new file mode 100644 index 00000000000..3fcd49c6cb8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_row_cache_size_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRowCacheSizeInMbReader is a Reader for the FindConfigRowCacheSizeInMb structure. +type FindConfigRowCacheSizeInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRowCacheSizeInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRowCacheSizeInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRowCacheSizeInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRowCacheSizeInMbOK creates a FindConfigRowCacheSizeInMbOK with default headers values +func NewFindConfigRowCacheSizeInMbOK() *FindConfigRowCacheSizeInMbOK { + return &FindConfigRowCacheSizeInMbOK{} +} + +/* +FindConfigRowCacheSizeInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigRowCacheSizeInMbOK struct { + Payload int64 +} + +func (o *FindConfigRowCacheSizeInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRowCacheSizeInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRowCacheSizeInMbDefault creates a FindConfigRowCacheSizeInMbDefault with default headers values +func NewFindConfigRowCacheSizeInMbDefault(code int) *FindConfigRowCacheSizeInMbDefault { + return &FindConfigRowCacheSizeInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigRowCacheSizeInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRowCacheSizeInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config row cache size in mb default response +func (o *FindConfigRowCacheSizeInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRowCacheSizeInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRowCacheSizeInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRowCacheSizeInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_address_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_address_parameters.go new file mode 100644 index 00000000000..07688bc6497 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_address_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRPCAddressParams creates a new FindConfigRPCAddressParams object +// with the default values initialized. +func NewFindConfigRPCAddressParams() *FindConfigRPCAddressParams { + + return &FindConfigRPCAddressParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRPCAddressParamsWithTimeout creates a new FindConfigRPCAddressParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRPCAddressParamsWithTimeout(timeout time.Duration) *FindConfigRPCAddressParams { + + return &FindConfigRPCAddressParams{ + + timeout: timeout, + } +} + +// NewFindConfigRPCAddressParamsWithContext creates a new FindConfigRPCAddressParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRPCAddressParamsWithContext(ctx context.Context) *FindConfigRPCAddressParams { + + return &FindConfigRPCAddressParams{ + + Context: ctx, + } +} + +// NewFindConfigRPCAddressParamsWithHTTPClient creates a new FindConfigRPCAddressParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRPCAddressParamsWithHTTPClient(client *http.Client) *FindConfigRPCAddressParams { + + return &FindConfigRPCAddressParams{ + HTTPClient: client, + } +} + +/* +FindConfigRPCAddressParams contains all the parameters to send to the API endpoint +for the find config rpc address operation typically these are written to a http.Request +*/ +type FindConfigRPCAddressParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config rpc address params +func (o *FindConfigRPCAddressParams) WithTimeout(timeout time.Duration) *FindConfigRPCAddressParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config rpc address params +func (o *FindConfigRPCAddressParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config rpc address params +func (o *FindConfigRPCAddressParams) WithContext(ctx context.Context) *FindConfigRPCAddressParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config rpc address params +func (o *FindConfigRPCAddressParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config rpc address params +func (o *FindConfigRPCAddressParams) WithHTTPClient(client *http.Client) *FindConfigRPCAddressParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config rpc address params +func (o *FindConfigRPCAddressParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRPCAddressParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_address_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_address_responses.go new file mode 100644 index 00000000000..7639bc2174d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_address_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRPCAddressReader is a Reader for the FindConfigRPCAddress structure. +type FindConfigRPCAddressReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRPCAddressReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRPCAddressOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRPCAddressDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRPCAddressOK creates a FindConfigRPCAddressOK with default headers values +func NewFindConfigRPCAddressOK() *FindConfigRPCAddressOK { + return &FindConfigRPCAddressOK{} +} + +/* +FindConfigRPCAddressOK handles this case with default header values. + +Config value +*/ +type FindConfigRPCAddressOK struct { + Payload string +} + +func (o *FindConfigRPCAddressOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigRPCAddressOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRPCAddressDefault creates a FindConfigRPCAddressDefault with default headers values +func NewFindConfigRPCAddressDefault(code int) *FindConfigRPCAddressDefault { + return &FindConfigRPCAddressDefault{ + _statusCode: code, + } +} + +/* +FindConfigRPCAddressDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRPCAddressDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config rpc address default response +func (o *FindConfigRPCAddressDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRPCAddressDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRPCAddressDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRPCAddressDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_interface_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_interface_parameters.go new file mode 100644 index 00000000000..1e880de257e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_interface_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRPCInterfaceParams creates a new FindConfigRPCInterfaceParams object +// with the default values initialized. +func NewFindConfigRPCInterfaceParams() *FindConfigRPCInterfaceParams { + + return &FindConfigRPCInterfaceParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRPCInterfaceParamsWithTimeout creates a new FindConfigRPCInterfaceParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRPCInterfaceParamsWithTimeout(timeout time.Duration) *FindConfigRPCInterfaceParams { + + return &FindConfigRPCInterfaceParams{ + + timeout: timeout, + } +} + +// NewFindConfigRPCInterfaceParamsWithContext creates a new FindConfigRPCInterfaceParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRPCInterfaceParamsWithContext(ctx context.Context) *FindConfigRPCInterfaceParams { + + return &FindConfigRPCInterfaceParams{ + + Context: ctx, + } +} + +// NewFindConfigRPCInterfaceParamsWithHTTPClient creates a new FindConfigRPCInterfaceParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRPCInterfaceParamsWithHTTPClient(client *http.Client) *FindConfigRPCInterfaceParams { + + return &FindConfigRPCInterfaceParams{ + HTTPClient: client, + } +} + +/* +FindConfigRPCInterfaceParams contains all the parameters to send to the API endpoint +for the find config rpc interface operation typically these are written to a http.Request +*/ +type FindConfigRPCInterfaceParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config rpc interface params +func (o *FindConfigRPCInterfaceParams) WithTimeout(timeout time.Duration) *FindConfigRPCInterfaceParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config rpc interface params +func (o *FindConfigRPCInterfaceParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config rpc interface params +func (o *FindConfigRPCInterfaceParams) WithContext(ctx context.Context) *FindConfigRPCInterfaceParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config rpc interface params +func (o *FindConfigRPCInterfaceParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config rpc interface params +func (o *FindConfigRPCInterfaceParams) WithHTTPClient(client *http.Client) *FindConfigRPCInterfaceParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config rpc interface params +func (o *FindConfigRPCInterfaceParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRPCInterfaceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_interface_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_interface_responses.go new file mode 100644 index 00000000000..58969f608d9 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_interface_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRPCInterfaceReader is a Reader for the FindConfigRPCInterface structure. +type FindConfigRPCInterfaceReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRPCInterfaceReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRPCInterfaceOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRPCInterfaceDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRPCInterfaceOK creates a FindConfigRPCInterfaceOK with default headers values +func NewFindConfigRPCInterfaceOK() *FindConfigRPCInterfaceOK { + return &FindConfigRPCInterfaceOK{} +} + +/* +FindConfigRPCInterfaceOK handles this case with default header values. + +Config value +*/ +type FindConfigRPCInterfaceOK struct { + Payload string +} + +func (o *FindConfigRPCInterfaceOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigRPCInterfaceOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRPCInterfaceDefault creates a FindConfigRPCInterfaceDefault with default headers values +func NewFindConfigRPCInterfaceDefault(code int) *FindConfigRPCInterfaceDefault { + return &FindConfigRPCInterfaceDefault{ + _statusCode: code, + } +} + +/* +FindConfigRPCInterfaceDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRPCInterfaceDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config rpc interface default response +func (o *FindConfigRPCInterfaceDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRPCInterfaceDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRPCInterfaceDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRPCInterfaceDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_keepalive_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_keepalive_parameters.go new file mode 100644 index 00000000000..ad6da430947 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_keepalive_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRPCKeepaliveParams creates a new FindConfigRPCKeepaliveParams object +// with the default values initialized. +func NewFindConfigRPCKeepaliveParams() *FindConfigRPCKeepaliveParams { + + return &FindConfigRPCKeepaliveParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRPCKeepaliveParamsWithTimeout creates a new FindConfigRPCKeepaliveParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRPCKeepaliveParamsWithTimeout(timeout time.Duration) *FindConfigRPCKeepaliveParams { + + return &FindConfigRPCKeepaliveParams{ + + timeout: timeout, + } +} + +// NewFindConfigRPCKeepaliveParamsWithContext creates a new FindConfigRPCKeepaliveParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRPCKeepaliveParamsWithContext(ctx context.Context) *FindConfigRPCKeepaliveParams { + + return &FindConfigRPCKeepaliveParams{ + + Context: ctx, + } +} + +// NewFindConfigRPCKeepaliveParamsWithHTTPClient creates a new FindConfigRPCKeepaliveParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRPCKeepaliveParamsWithHTTPClient(client *http.Client) *FindConfigRPCKeepaliveParams { + + return &FindConfigRPCKeepaliveParams{ + HTTPClient: client, + } +} + +/* +FindConfigRPCKeepaliveParams contains all the parameters to send to the API endpoint +for the find config rpc keepalive operation typically these are written to a http.Request +*/ +type FindConfigRPCKeepaliveParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config rpc keepalive params +func (o *FindConfigRPCKeepaliveParams) WithTimeout(timeout time.Duration) *FindConfigRPCKeepaliveParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config rpc keepalive params +func (o *FindConfigRPCKeepaliveParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config rpc keepalive params +func (o *FindConfigRPCKeepaliveParams) WithContext(ctx context.Context) *FindConfigRPCKeepaliveParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config rpc keepalive params +func (o *FindConfigRPCKeepaliveParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config rpc keepalive params +func (o *FindConfigRPCKeepaliveParams) WithHTTPClient(client *http.Client) *FindConfigRPCKeepaliveParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config rpc keepalive params +func (o *FindConfigRPCKeepaliveParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRPCKeepaliveParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_keepalive_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_keepalive_responses.go new file mode 100644 index 00000000000..6f96c48e9ad --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_keepalive_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRPCKeepaliveReader is a Reader for the FindConfigRPCKeepalive structure. +type FindConfigRPCKeepaliveReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRPCKeepaliveReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRPCKeepaliveOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRPCKeepaliveDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRPCKeepaliveOK creates a FindConfigRPCKeepaliveOK with default headers values +func NewFindConfigRPCKeepaliveOK() *FindConfigRPCKeepaliveOK { + return &FindConfigRPCKeepaliveOK{} +} + +/* +FindConfigRPCKeepaliveOK handles this case with default header values. + +Config value +*/ +type FindConfigRPCKeepaliveOK struct { + Payload bool +} + +func (o *FindConfigRPCKeepaliveOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigRPCKeepaliveOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRPCKeepaliveDefault creates a FindConfigRPCKeepaliveDefault with default headers values +func NewFindConfigRPCKeepaliveDefault(code int) *FindConfigRPCKeepaliveDefault { + return &FindConfigRPCKeepaliveDefault{ + _statusCode: code, + } +} + +/* +FindConfigRPCKeepaliveDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRPCKeepaliveDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config rpc keepalive default response +func (o *FindConfigRPCKeepaliveDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRPCKeepaliveDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRPCKeepaliveDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRPCKeepaliveDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_max_threads_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_max_threads_parameters.go new file mode 100644 index 00000000000..aa9f057c1c2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_max_threads_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRPCMaxThreadsParams creates a new FindConfigRPCMaxThreadsParams object +// with the default values initialized. +func NewFindConfigRPCMaxThreadsParams() *FindConfigRPCMaxThreadsParams { + + return &FindConfigRPCMaxThreadsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRPCMaxThreadsParamsWithTimeout creates a new FindConfigRPCMaxThreadsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRPCMaxThreadsParamsWithTimeout(timeout time.Duration) *FindConfigRPCMaxThreadsParams { + + return &FindConfigRPCMaxThreadsParams{ + + timeout: timeout, + } +} + +// NewFindConfigRPCMaxThreadsParamsWithContext creates a new FindConfigRPCMaxThreadsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRPCMaxThreadsParamsWithContext(ctx context.Context) *FindConfigRPCMaxThreadsParams { + + return &FindConfigRPCMaxThreadsParams{ + + Context: ctx, + } +} + +// NewFindConfigRPCMaxThreadsParamsWithHTTPClient creates a new FindConfigRPCMaxThreadsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRPCMaxThreadsParamsWithHTTPClient(client *http.Client) *FindConfigRPCMaxThreadsParams { + + return &FindConfigRPCMaxThreadsParams{ + HTTPClient: client, + } +} + +/* +FindConfigRPCMaxThreadsParams contains all the parameters to send to the API endpoint +for the find config rpc max threads operation typically these are written to a http.Request +*/ +type FindConfigRPCMaxThreadsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config rpc max threads params +func (o *FindConfigRPCMaxThreadsParams) WithTimeout(timeout time.Duration) *FindConfigRPCMaxThreadsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config rpc max threads params +func (o *FindConfigRPCMaxThreadsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config rpc max threads params +func (o *FindConfigRPCMaxThreadsParams) WithContext(ctx context.Context) *FindConfigRPCMaxThreadsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config rpc max threads params +func (o *FindConfigRPCMaxThreadsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config rpc max threads params +func (o *FindConfigRPCMaxThreadsParams) WithHTTPClient(client *http.Client) *FindConfigRPCMaxThreadsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config rpc max threads params +func (o *FindConfigRPCMaxThreadsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRPCMaxThreadsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_max_threads_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_max_threads_responses.go new file mode 100644 index 00000000000..3c051d36aa8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_max_threads_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRPCMaxThreadsReader is a Reader for the FindConfigRPCMaxThreads structure. +type FindConfigRPCMaxThreadsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRPCMaxThreadsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRPCMaxThreadsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRPCMaxThreadsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRPCMaxThreadsOK creates a FindConfigRPCMaxThreadsOK with default headers values +func NewFindConfigRPCMaxThreadsOK() *FindConfigRPCMaxThreadsOK { + return &FindConfigRPCMaxThreadsOK{} +} + +/* +FindConfigRPCMaxThreadsOK handles this case with default header values. + +Config value +*/ +type FindConfigRPCMaxThreadsOK struct { + Payload int64 +} + +func (o *FindConfigRPCMaxThreadsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRPCMaxThreadsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRPCMaxThreadsDefault creates a FindConfigRPCMaxThreadsDefault with default headers values +func NewFindConfigRPCMaxThreadsDefault(code int) *FindConfigRPCMaxThreadsDefault { + return &FindConfigRPCMaxThreadsDefault{ + _statusCode: code, + } +} + +/* +FindConfigRPCMaxThreadsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRPCMaxThreadsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config rpc max threads default response +func (o *FindConfigRPCMaxThreadsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRPCMaxThreadsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRPCMaxThreadsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRPCMaxThreadsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_min_threads_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_min_threads_parameters.go new file mode 100644 index 00000000000..a961eb02262 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_min_threads_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRPCMinThreadsParams creates a new FindConfigRPCMinThreadsParams object +// with the default values initialized. +func NewFindConfigRPCMinThreadsParams() *FindConfigRPCMinThreadsParams { + + return &FindConfigRPCMinThreadsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRPCMinThreadsParamsWithTimeout creates a new FindConfigRPCMinThreadsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRPCMinThreadsParamsWithTimeout(timeout time.Duration) *FindConfigRPCMinThreadsParams { + + return &FindConfigRPCMinThreadsParams{ + + timeout: timeout, + } +} + +// NewFindConfigRPCMinThreadsParamsWithContext creates a new FindConfigRPCMinThreadsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRPCMinThreadsParamsWithContext(ctx context.Context) *FindConfigRPCMinThreadsParams { + + return &FindConfigRPCMinThreadsParams{ + + Context: ctx, + } +} + +// NewFindConfigRPCMinThreadsParamsWithHTTPClient creates a new FindConfigRPCMinThreadsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRPCMinThreadsParamsWithHTTPClient(client *http.Client) *FindConfigRPCMinThreadsParams { + + return &FindConfigRPCMinThreadsParams{ + HTTPClient: client, + } +} + +/* +FindConfigRPCMinThreadsParams contains all the parameters to send to the API endpoint +for the find config rpc min threads operation typically these are written to a http.Request +*/ +type FindConfigRPCMinThreadsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config rpc min threads params +func (o *FindConfigRPCMinThreadsParams) WithTimeout(timeout time.Duration) *FindConfigRPCMinThreadsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config rpc min threads params +func (o *FindConfigRPCMinThreadsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config rpc min threads params +func (o *FindConfigRPCMinThreadsParams) WithContext(ctx context.Context) *FindConfigRPCMinThreadsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config rpc min threads params +func (o *FindConfigRPCMinThreadsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config rpc min threads params +func (o *FindConfigRPCMinThreadsParams) WithHTTPClient(client *http.Client) *FindConfigRPCMinThreadsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config rpc min threads params +func (o *FindConfigRPCMinThreadsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRPCMinThreadsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_min_threads_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_min_threads_responses.go new file mode 100644 index 00000000000..6e4641516d0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_min_threads_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRPCMinThreadsReader is a Reader for the FindConfigRPCMinThreads structure. +type FindConfigRPCMinThreadsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRPCMinThreadsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRPCMinThreadsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRPCMinThreadsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRPCMinThreadsOK creates a FindConfigRPCMinThreadsOK with default headers values +func NewFindConfigRPCMinThreadsOK() *FindConfigRPCMinThreadsOK { + return &FindConfigRPCMinThreadsOK{} +} + +/* +FindConfigRPCMinThreadsOK handles this case with default header values. + +Config value +*/ +type FindConfigRPCMinThreadsOK struct { + Payload int64 +} + +func (o *FindConfigRPCMinThreadsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRPCMinThreadsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRPCMinThreadsDefault creates a FindConfigRPCMinThreadsDefault with default headers values +func NewFindConfigRPCMinThreadsDefault(code int) *FindConfigRPCMinThreadsDefault { + return &FindConfigRPCMinThreadsDefault{ + _statusCode: code, + } +} + +/* +FindConfigRPCMinThreadsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRPCMinThreadsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config rpc min threads default response +func (o *FindConfigRPCMinThreadsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRPCMinThreadsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRPCMinThreadsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRPCMinThreadsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_port_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_port_parameters.go new file mode 100644 index 00000000000..35c1e54fb2d --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_port_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRPCPortParams creates a new FindConfigRPCPortParams object +// with the default values initialized. +func NewFindConfigRPCPortParams() *FindConfigRPCPortParams { + + return &FindConfigRPCPortParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRPCPortParamsWithTimeout creates a new FindConfigRPCPortParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRPCPortParamsWithTimeout(timeout time.Duration) *FindConfigRPCPortParams { + + return &FindConfigRPCPortParams{ + + timeout: timeout, + } +} + +// NewFindConfigRPCPortParamsWithContext creates a new FindConfigRPCPortParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRPCPortParamsWithContext(ctx context.Context) *FindConfigRPCPortParams { + + return &FindConfigRPCPortParams{ + + Context: ctx, + } +} + +// NewFindConfigRPCPortParamsWithHTTPClient creates a new FindConfigRPCPortParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRPCPortParamsWithHTTPClient(client *http.Client) *FindConfigRPCPortParams { + + return &FindConfigRPCPortParams{ + HTTPClient: client, + } +} + +/* +FindConfigRPCPortParams contains all the parameters to send to the API endpoint +for the find config rpc port operation typically these are written to a http.Request +*/ +type FindConfigRPCPortParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config rpc port params +func (o *FindConfigRPCPortParams) WithTimeout(timeout time.Duration) *FindConfigRPCPortParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config rpc port params +func (o *FindConfigRPCPortParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config rpc port params +func (o *FindConfigRPCPortParams) WithContext(ctx context.Context) *FindConfigRPCPortParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config rpc port params +func (o *FindConfigRPCPortParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config rpc port params +func (o *FindConfigRPCPortParams) WithHTTPClient(client *http.Client) *FindConfigRPCPortParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config rpc port params +func (o *FindConfigRPCPortParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRPCPortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_port_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_port_responses.go new file mode 100644 index 00000000000..040f6daa920 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_port_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRPCPortReader is a Reader for the FindConfigRPCPort structure. +type FindConfigRPCPortReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRPCPortReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRPCPortOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRPCPortDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRPCPortOK creates a FindConfigRPCPortOK with default headers values +func NewFindConfigRPCPortOK() *FindConfigRPCPortOK { + return &FindConfigRPCPortOK{} +} + +/* +FindConfigRPCPortOK handles this case with default header values. + +Config value +*/ +type FindConfigRPCPortOK struct { + Payload int64 +} + +func (o *FindConfigRPCPortOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRPCPortOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRPCPortDefault creates a FindConfigRPCPortDefault with default headers values +func NewFindConfigRPCPortDefault(code int) *FindConfigRPCPortDefault { + return &FindConfigRPCPortDefault{ + _statusCode: code, + } +} + +/* +FindConfigRPCPortDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRPCPortDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config rpc port default response +func (o *FindConfigRPCPortDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRPCPortDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRPCPortDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRPCPortDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_recv_buff_size_in_bytes_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_recv_buff_size_in_bytes_parameters.go new file mode 100644 index 00000000000..b5fa566d561 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_recv_buff_size_in_bytes_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRPCRecvBuffSizeInBytesParams creates a new FindConfigRPCRecvBuffSizeInBytesParams object +// with the default values initialized. +func NewFindConfigRPCRecvBuffSizeInBytesParams() *FindConfigRPCRecvBuffSizeInBytesParams { + + return &FindConfigRPCRecvBuffSizeInBytesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRPCRecvBuffSizeInBytesParamsWithTimeout creates a new FindConfigRPCRecvBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRPCRecvBuffSizeInBytesParamsWithTimeout(timeout time.Duration) *FindConfigRPCRecvBuffSizeInBytesParams { + + return &FindConfigRPCRecvBuffSizeInBytesParams{ + + timeout: timeout, + } +} + +// NewFindConfigRPCRecvBuffSizeInBytesParamsWithContext creates a new FindConfigRPCRecvBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRPCRecvBuffSizeInBytesParamsWithContext(ctx context.Context) *FindConfigRPCRecvBuffSizeInBytesParams { + + return &FindConfigRPCRecvBuffSizeInBytesParams{ + + Context: ctx, + } +} + +// NewFindConfigRPCRecvBuffSizeInBytesParamsWithHTTPClient creates a new FindConfigRPCRecvBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRPCRecvBuffSizeInBytesParamsWithHTTPClient(client *http.Client) *FindConfigRPCRecvBuffSizeInBytesParams { + + return &FindConfigRPCRecvBuffSizeInBytesParams{ + HTTPClient: client, + } +} + +/* +FindConfigRPCRecvBuffSizeInBytesParams contains all the parameters to send to the API endpoint +for the find config rpc recv buff size in bytes operation typically these are written to a http.Request +*/ +type FindConfigRPCRecvBuffSizeInBytesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config rpc recv buff size in bytes params +func (o *FindConfigRPCRecvBuffSizeInBytesParams) WithTimeout(timeout time.Duration) *FindConfigRPCRecvBuffSizeInBytesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config rpc recv buff size in bytes params +func (o *FindConfigRPCRecvBuffSizeInBytesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config rpc recv buff size in bytes params +func (o *FindConfigRPCRecvBuffSizeInBytesParams) WithContext(ctx context.Context) *FindConfigRPCRecvBuffSizeInBytesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config rpc recv buff size in bytes params +func (o *FindConfigRPCRecvBuffSizeInBytesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config rpc recv buff size in bytes params +func (o *FindConfigRPCRecvBuffSizeInBytesParams) WithHTTPClient(client *http.Client) *FindConfigRPCRecvBuffSizeInBytesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config rpc recv buff size in bytes params +func (o *FindConfigRPCRecvBuffSizeInBytesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRPCRecvBuffSizeInBytesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_recv_buff_size_in_bytes_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_recv_buff_size_in_bytes_responses.go new file mode 100644 index 00000000000..fa3ce31b815 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_recv_buff_size_in_bytes_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRPCRecvBuffSizeInBytesReader is a Reader for the FindConfigRPCRecvBuffSizeInBytes structure. +type FindConfigRPCRecvBuffSizeInBytesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRPCRecvBuffSizeInBytesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRPCRecvBuffSizeInBytesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRPCRecvBuffSizeInBytesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRPCRecvBuffSizeInBytesOK creates a FindConfigRPCRecvBuffSizeInBytesOK with default headers values +func NewFindConfigRPCRecvBuffSizeInBytesOK() *FindConfigRPCRecvBuffSizeInBytesOK { + return &FindConfigRPCRecvBuffSizeInBytesOK{} +} + +/* +FindConfigRPCRecvBuffSizeInBytesOK handles this case with default header values. + +Config value +*/ +type FindConfigRPCRecvBuffSizeInBytesOK struct { + Payload int64 +} + +func (o *FindConfigRPCRecvBuffSizeInBytesOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRPCRecvBuffSizeInBytesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRPCRecvBuffSizeInBytesDefault creates a FindConfigRPCRecvBuffSizeInBytesDefault with default headers values +func NewFindConfigRPCRecvBuffSizeInBytesDefault(code int) *FindConfigRPCRecvBuffSizeInBytesDefault { + return &FindConfigRPCRecvBuffSizeInBytesDefault{ + _statusCode: code, + } +} + +/* +FindConfigRPCRecvBuffSizeInBytesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRPCRecvBuffSizeInBytesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config rpc recv buff size in bytes default response +func (o *FindConfigRPCRecvBuffSizeInBytesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRPCRecvBuffSizeInBytesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRPCRecvBuffSizeInBytesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRPCRecvBuffSizeInBytesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_send_buff_size_in_bytes_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_send_buff_size_in_bytes_parameters.go new file mode 100644 index 00000000000..1b4d0a1a9c8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_send_buff_size_in_bytes_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRPCSendBuffSizeInBytesParams creates a new FindConfigRPCSendBuffSizeInBytesParams object +// with the default values initialized. +func NewFindConfigRPCSendBuffSizeInBytesParams() *FindConfigRPCSendBuffSizeInBytesParams { + + return &FindConfigRPCSendBuffSizeInBytesParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRPCSendBuffSizeInBytesParamsWithTimeout creates a new FindConfigRPCSendBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRPCSendBuffSizeInBytesParamsWithTimeout(timeout time.Duration) *FindConfigRPCSendBuffSizeInBytesParams { + + return &FindConfigRPCSendBuffSizeInBytesParams{ + + timeout: timeout, + } +} + +// NewFindConfigRPCSendBuffSizeInBytesParamsWithContext creates a new FindConfigRPCSendBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRPCSendBuffSizeInBytesParamsWithContext(ctx context.Context) *FindConfigRPCSendBuffSizeInBytesParams { + + return &FindConfigRPCSendBuffSizeInBytesParams{ + + Context: ctx, + } +} + +// NewFindConfigRPCSendBuffSizeInBytesParamsWithHTTPClient creates a new FindConfigRPCSendBuffSizeInBytesParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRPCSendBuffSizeInBytesParamsWithHTTPClient(client *http.Client) *FindConfigRPCSendBuffSizeInBytesParams { + + return &FindConfigRPCSendBuffSizeInBytesParams{ + HTTPClient: client, + } +} + +/* +FindConfigRPCSendBuffSizeInBytesParams contains all the parameters to send to the API endpoint +for the find config rpc send buff size in bytes operation typically these are written to a http.Request +*/ +type FindConfigRPCSendBuffSizeInBytesParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config rpc send buff size in bytes params +func (o *FindConfigRPCSendBuffSizeInBytesParams) WithTimeout(timeout time.Duration) *FindConfigRPCSendBuffSizeInBytesParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config rpc send buff size in bytes params +func (o *FindConfigRPCSendBuffSizeInBytesParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config rpc send buff size in bytes params +func (o *FindConfigRPCSendBuffSizeInBytesParams) WithContext(ctx context.Context) *FindConfigRPCSendBuffSizeInBytesParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config rpc send buff size in bytes params +func (o *FindConfigRPCSendBuffSizeInBytesParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config rpc send buff size in bytes params +func (o *FindConfigRPCSendBuffSizeInBytesParams) WithHTTPClient(client *http.Client) *FindConfigRPCSendBuffSizeInBytesParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config rpc send buff size in bytes params +func (o *FindConfigRPCSendBuffSizeInBytesParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRPCSendBuffSizeInBytesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_send_buff_size_in_bytes_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_send_buff_size_in_bytes_responses.go new file mode 100644 index 00000000000..6f046d7f0e1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_send_buff_size_in_bytes_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRPCSendBuffSizeInBytesReader is a Reader for the FindConfigRPCSendBuffSizeInBytes structure. +type FindConfigRPCSendBuffSizeInBytesReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRPCSendBuffSizeInBytesReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRPCSendBuffSizeInBytesOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRPCSendBuffSizeInBytesDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRPCSendBuffSizeInBytesOK creates a FindConfigRPCSendBuffSizeInBytesOK with default headers values +func NewFindConfigRPCSendBuffSizeInBytesOK() *FindConfigRPCSendBuffSizeInBytesOK { + return &FindConfigRPCSendBuffSizeInBytesOK{} +} + +/* +FindConfigRPCSendBuffSizeInBytesOK handles this case with default header values. + +Config value +*/ +type FindConfigRPCSendBuffSizeInBytesOK struct { + Payload int64 +} + +func (o *FindConfigRPCSendBuffSizeInBytesOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigRPCSendBuffSizeInBytesOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRPCSendBuffSizeInBytesDefault creates a FindConfigRPCSendBuffSizeInBytesDefault with default headers values +func NewFindConfigRPCSendBuffSizeInBytesDefault(code int) *FindConfigRPCSendBuffSizeInBytesDefault { + return &FindConfigRPCSendBuffSizeInBytesDefault{ + _statusCode: code, + } +} + +/* +FindConfigRPCSendBuffSizeInBytesDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRPCSendBuffSizeInBytesDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config rpc send buff size in bytes default response +func (o *FindConfigRPCSendBuffSizeInBytesDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRPCSendBuffSizeInBytesDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRPCSendBuffSizeInBytesDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRPCSendBuffSizeInBytesDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_server_type_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_server_type_parameters.go new file mode 100644 index 00000000000..bb6d1ae559f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_server_type_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigRPCServerTypeParams creates a new FindConfigRPCServerTypeParams object +// with the default values initialized. +func NewFindConfigRPCServerTypeParams() *FindConfigRPCServerTypeParams { + + return &FindConfigRPCServerTypeParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigRPCServerTypeParamsWithTimeout creates a new FindConfigRPCServerTypeParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigRPCServerTypeParamsWithTimeout(timeout time.Duration) *FindConfigRPCServerTypeParams { + + return &FindConfigRPCServerTypeParams{ + + timeout: timeout, + } +} + +// NewFindConfigRPCServerTypeParamsWithContext creates a new FindConfigRPCServerTypeParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigRPCServerTypeParamsWithContext(ctx context.Context) *FindConfigRPCServerTypeParams { + + return &FindConfigRPCServerTypeParams{ + + Context: ctx, + } +} + +// NewFindConfigRPCServerTypeParamsWithHTTPClient creates a new FindConfigRPCServerTypeParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigRPCServerTypeParamsWithHTTPClient(client *http.Client) *FindConfigRPCServerTypeParams { + + return &FindConfigRPCServerTypeParams{ + HTTPClient: client, + } +} + +/* +FindConfigRPCServerTypeParams contains all the parameters to send to the API endpoint +for the find config rpc server type operation typically these are written to a http.Request +*/ +type FindConfigRPCServerTypeParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config rpc server type params +func (o *FindConfigRPCServerTypeParams) WithTimeout(timeout time.Duration) *FindConfigRPCServerTypeParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config rpc server type params +func (o *FindConfigRPCServerTypeParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config rpc server type params +func (o *FindConfigRPCServerTypeParams) WithContext(ctx context.Context) *FindConfigRPCServerTypeParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config rpc server type params +func (o *FindConfigRPCServerTypeParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config rpc server type params +func (o *FindConfigRPCServerTypeParams) WithHTTPClient(client *http.Client) *FindConfigRPCServerTypeParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config rpc server type params +func (o *FindConfigRPCServerTypeParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigRPCServerTypeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_server_type_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_server_type_responses.go new file mode 100644 index 00000000000..0f645edfd15 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_rpc_server_type_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigRPCServerTypeReader is a Reader for the FindConfigRPCServerType structure. +type FindConfigRPCServerTypeReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigRPCServerTypeReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigRPCServerTypeOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigRPCServerTypeDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigRPCServerTypeOK creates a FindConfigRPCServerTypeOK with default headers values +func NewFindConfigRPCServerTypeOK() *FindConfigRPCServerTypeOK { + return &FindConfigRPCServerTypeOK{} +} + +/* +FindConfigRPCServerTypeOK handles this case with default header values. + +Config value +*/ +type FindConfigRPCServerTypeOK struct { + Payload string +} + +func (o *FindConfigRPCServerTypeOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigRPCServerTypeOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigRPCServerTypeDefault creates a FindConfigRPCServerTypeDefault with default headers values +func NewFindConfigRPCServerTypeDefault(code int) *FindConfigRPCServerTypeDefault { + return &FindConfigRPCServerTypeDefault{ + _statusCode: code, + } +} + +/* +FindConfigRPCServerTypeDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigRPCServerTypeDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config rpc server type default response +func (o *FindConfigRPCServerTypeDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigRPCServerTypeDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigRPCServerTypeDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigRPCServerTypeDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_saved_caches_directory_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_saved_caches_directory_parameters.go new file mode 100644 index 00000000000..4fcb8237f03 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_saved_caches_directory_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigSavedCachesDirectoryParams creates a new FindConfigSavedCachesDirectoryParams object +// with the default values initialized. +func NewFindConfigSavedCachesDirectoryParams() *FindConfigSavedCachesDirectoryParams { + + return &FindConfigSavedCachesDirectoryParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigSavedCachesDirectoryParamsWithTimeout creates a new FindConfigSavedCachesDirectoryParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigSavedCachesDirectoryParamsWithTimeout(timeout time.Duration) *FindConfigSavedCachesDirectoryParams { + + return &FindConfigSavedCachesDirectoryParams{ + + timeout: timeout, + } +} + +// NewFindConfigSavedCachesDirectoryParamsWithContext creates a new FindConfigSavedCachesDirectoryParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigSavedCachesDirectoryParamsWithContext(ctx context.Context) *FindConfigSavedCachesDirectoryParams { + + return &FindConfigSavedCachesDirectoryParams{ + + Context: ctx, + } +} + +// NewFindConfigSavedCachesDirectoryParamsWithHTTPClient creates a new FindConfigSavedCachesDirectoryParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigSavedCachesDirectoryParamsWithHTTPClient(client *http.Client) *FindConfigSavedCachesDirectoryParams { + + return &FindConfigSavedCachesDirectoryParams{ + HTTPClient: client, + } +} + +/* +FindConfigSavedCachesDirectoryParams contains all the parameters to send to the API endpoint +for the find config saved caches directory operation typically these are written to a http.Request +*/ +type FindConfigSavedCachesDirectoryParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config saved caches directory params +func (o *FindConfigSavedCachesDirectoryParams) WithTimeout(timeout time.Duration) *FindConfigSavedCachesDirectoryParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config saved caches directory params +func (o *FindConfigSavedCachesDirectoryParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config saved caches directory params +func (o *FindConfigSavedCachesDirectoryParams) WithContext(ctx context.Context) *FindConfigSavedCachesDirectoryParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config saved caches directory params +func (o *FindConfigSavedCachesDirectoryParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config saved caches directory params +func (o *FindConfigSavedCachesDirectoryParams) WithHTTPClient(client *http.Client) *FindConfigSavedCachesDirectoryParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config saved caches directory params +func (o *FindConfigSavedCachesDirectoryParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigSavedCachesDirectoryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_saved_caches_directory_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_saved_caches_directory_responses.go new file mode 100644 index 00000000000..6aa3b621dcc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_saved_caches_directory_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigSavedCachesDirectoryReader is a Reader for the FindConfigSavedCachesDirectory structure. +type FindConfigSavedCachesDirectoryReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigSavedCachesDirectoryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigSavedCachesDirectoryOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigSavedCachesDirectoryDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigSavedCachesDirectoryOK creates a FindConfigSavedCachesDirectoryOK with default headers values +func NewFindConfigSavedCachesDirectoryOK() *FindConfigSavedCachesDirectoryOK { + return &FindConfigSavedCachesDirectoryOK{} +} + +/* +FindConfigSavedCachesDirectoryOK handles this case with default header values. + +Config value +*/ +type FindConfigSavedCachesDirectoryOK struct { + Payload string +} + +func (o *FindConfigSavedCachesDirectoryOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigSavedCachesDirectoryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigSavedCachesDirectoryDefault creates a FindConfigSavedCachesDirectoryDefault with default headers values +func NewFindConfigSavedCachesDirectoryDefault(code int) *FindConfigSavedCachesDirectoryDefault { + return &FindConfigSavedCachesDirectoryDefault{ + _statusCode: code, + } +} + +/* +FindConfigSavedCachesDirectoryDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigSavedCachesDirectoryDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config saved caches directory default response +func (o *FindConfigSavedCachesDirectoryDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigSavedCachesDirectoryDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigSavedCachesDirectoryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigSavedCachesDirectoryDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_seed_provider_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_seed_provider_parameters.go new file mode 100644 index 00000000000..fda3b50125e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_seed_provider_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigSeedProviderParams creates a new FindConfigSeedProviderParams object +// with the default values initialized. +func NewFindConfigSeedProviderParams() *FindConfigSeedProviderParams { + + return &FindConfigSeedProviderParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigSeedProviderParamsWithTimeout creates a new FindConfigSeedProviderParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigSeedProviderParamsWithTimeout(timeout time.Duration) *FindConfigSeedProviderParams { + + return &FindConfigSeedProviderParams{ + + timeout: timeout, + } +} + +// NewFindConfigSeedProviderParamsWithContext creates a new FindConfigSeedProviderParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigSeedProviderParamsWithContext(ctx context.Context) *FindConfigSeedProviderParams { + + return &FindConfigSeedProviderParams{ + + Context: ctx, + } +} + +// NewFindConfigSeedProviderParamsWithHTTPClient creates a new FindConfigSeedProviderParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigSeedProviderParamsWithHTTPClient(client *http.Client) *FindConfigSeedProviderParams { + + return &FindConfigSeedProviderParams{ + HTTPClient: client, + } +} + +/* +FindConfigSeedProviderParams contains all the parameters to send to the API endpoint +for the find config seed provider operation typically these are written to a http.Request +*/ +type FindConfigSeedProviderParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config seed provider params +func (o *FindConfigSeedProviderParams) WithTimeout(timeout time.Duration) *FindConfigSeedProviderParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config seed provider params +func (o *FindConfigSeedProviderParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config seed provider params +func (o *FindConfigSeedProviderParams) WithContext(ctx context.Context) *FindConfigSeedProviderParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config seed provider params +func (o *FindConfigSeedProviderParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config seed provider params +func (o *FindConfigSeedProviderParams) WithHTTPClient(client *http.Client) *FindConfigSeedProviderParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config seed provider params +func (o *FindConfigSeedProviderParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigSeedProviderParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_seed_provider_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_seed_provider_responses.go new file mode 100644 index 00000000000..a7b4e976e96 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_seed_provider_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigSeedProviderReader is a Reader for the FindConfigSeedProvider structure. +type FindConfigSeedProviderReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigSeedProviderReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigSeedProviderOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigSeedProviderDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigSeedProviderOK creates a FindConfigSeedProviderOK with default headers values +func NewFindConfigSeedProviderOK() *FindConfigSeedProviderOK { + return &FindConfigSeedProviderOK{} +} + +/* +FindConfigSeedProviderOK handles this case with default header values. + +Config value +*/ +type FindConfigSeedProviderOK struct { + Payload []string +} + +func (o *FindConfigSeedProviderOK) GetPayload() []string { + return o.Payload +} + +func (o *FindConfigSeedProviderOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigSeedProviderDefault creates a FindConfigSeedProviderDefault with default headers values +func NewFindConfigSeedProviderDefault(code int) *FindConfigSeedProviderDefault { + return &FindConfigSeedProviderDefault{ + _statusCode: code, + } +} + +/* +FindConfigSeedProviderDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigSeedProviderDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config seed provider default response +func (o *FindConfigSeedProviderDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigSeedProviderDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigSeedProviderDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigSeedProviderDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_server_encryption_options_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_server_encryption_options_parameters.go new file mode 100644 index 00000000000..5104649929b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_server_encryption_options_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigServerEncryptionOptionsParams creates a new FindConfigServerEncryptionOptionsParams object +// with the default values initialized. +func NewFindConfigServerEncryptionOptionsParams() *FindConfigServerEncryptionOptionsParams { + + return &FindConfigServerEncryptionOptionsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigServerEncryptionOptionsParamsWithTimeout creates a new FindConfigServerEncryptionOptionsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigServerEncryptionOptionsParamsWithTimeout(timeout time.Duration) *FindConfigServerEncryptionOptionsParams { + + return &FindConfigServerEncryptionOptionsParams{ + + timeout: timeout, + } +} + +// NewFindConfigServerEncryptionOptionsParamsWithContext creates a new FindConfigServerEncryptionOptionsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigServerEncryptionOptionsParamsWithContext(ctx context.Context) *FindConfigServerEncryptionOptionsParams { + + return &FindConfigServerEncryptionOptionsParams{ + + Context: ctx, + } +} + +// NewFindConfigServerEncryptionOptionsParamsWithHTTPClient creates a new FindConfigServerEncryptionOptionsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigServerEncryptionOptionsParamsWithHTTPClient(client *http.Client) *FindConfigServerEncryptionOptionsParams { + + return &FindConfigServerEncryptionOptionsParams{ + HTTPClient: client, + } +} + +/* +FindConfigServerEncryptionOptionsParams contains all the parameters to send to the API endpoint +for the find config server encryption options operation typically these are written to a http.Request +*/ +type FindConfigServerEncryptionOptionsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config server encryption options params +func (o *FindConfigServerEncryptionOptionsParams) WithTimeout(timeout time.Duration) *FindConfigServerEncryptionOptionsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config server encryption options params +func (o *FindConfigServerEncryptionOptionsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config server encryption options params +func (o *FindConfigServerEncryptionOptionsParams) WithContext(ctx context.Context) *FindConfigServerEncryptionOptionsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config server encryption options params +func (o *FindConfigServerEncryptionOptionsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config server encryption options params +func (o *FindConfigServerEncryptionOptionsParams) WithHTTPClient(client *http.Client) *FindConfigServerEncryptionOptionsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config server encryption options params +func (o *FindConfigServerEncryptionOptionsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigServerEncryptionOptionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_server_encryption_options_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_server_encryption_options_responses.go new file mode 100644 index 00000000000..4e3022be844 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_server_encryption_options_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigServerEncryptionOptionsReader is a Reader for the FindConfigServerEncryptionOptions structure. +type FindConfigServerEncryptionOptionsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigServerEncryptionOptionsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigServerEncryptionOptionsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigServerEncryptionOptionsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigServerEncryptionOptionsOK creates a FindConfigServerEncryptionOptionsOK with default headers values +func NewFindConfigServerEncryptionOptionsOK() *FindConfigServerEncryptionOptionsOK { + return &FindConfigServerEncryptionOptionsOK{} +} + +/* +FindConfigServerEncryptionOptionsOK handles this case with default header values. + +Config value +*/ +type FindConfigServerEncryptionOptionsOK struct { + Payload []string +} + +func (o *FindConfigServerEncryptionOptionsOK) GetPayload() []string { + return o.Payload +} + +func (o *FindConfigServerEncryptionOptionsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigServerEncryptionOptionsDefault creates a FindConfigServerEncryptionOptionsDefault with default headers values +func NewFindConfigServerEncryptionOptionsDefault(code int) *FindConfigServerEncryptionOptionsDefault { + return &FindConfigServerEncryptionOptionsDefault{ + _statusCode: code, + } +} + +/* +FindConfigServerEncryptionOptionsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigServerEncryptionOptionsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config server encryption options default response +func (o *FindConfigServerEncryptionOptionsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigServerEncryptionOptionsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigServerEncryptionOptionsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigServerEncryptionOptionsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shadow_round_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shadow_round_ms_parameters.go new file mode 100644 index 00000000000..d5867710442 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shadow_round_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigShadowRoundMsParams creates a new FindConfigShadowRoundMsParams object +// with the default values initialized. +func NewFindConfigShadowRoundMsParams() *FindConfigShadowRoundMsParams { + + return &FindConfigShadowRoundMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigShadowRoundMsParamsWithTimeout creates a new FindConfigShadowRoundMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigShadowRoundMsParamsWithTimeout(timeout time.Duration) *FindConfigShadowRoundMsParams { + + return &FindConfigShadowRoundMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigShadowRoundMsParamsWithContext creates a new FindConfigShadowRoundMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigShadowRoundMsParamsWithContext(ctx context.Context) *FindConfigShadowRoundMsParams { + + return &FindConfigShadowRoundMsParams{ + + Context: ctx, + } +} + +// NewFindConfigShadowRoundMsParamsWithHTTPClient creates a new FindConfigShadowRoundMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigShadowRoundMsParamsWithHTTPClient(client *http.Client) *FindConfigShadowRoundMsParams { + + return &FindConfigShadowRoundMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigShadowRoundMsParams contains all the parameters to send to the API endpoint +for the find config shadow round ms operation typically these are written to a http.Request +*/ +type FindConfigShadowRoundMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config shadow round ms params +func (o *FindConfigShadowRoundMsParams) WithTimeout(timeout time.Duration) *FindConfigShadowRoundMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config shadow round ms params +func (o *FindConfigShadowRoundMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config shadow round ms params +func (o *FindConfigShadowRoundMsParams) WithContext(ctx context.Context) *FindConfigShadowRoundMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config shadow round ms params +func (o *FindConfigShadowRoundMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config shadow round ms params +func (o *FindConfigShadowRoundMsParams) WithHTTPClient(client *http.Client) *FindConfigShadowRoundMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config shadow round ms params +func (o *FindConfigShadowRoundMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigShadowRoundMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shadow_round_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shadow_round_ms_responses.go new file mode 100644 index 00000000000..92a4613c265 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shadow_round_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigShadowRoundMsReader is a Reader for the FindConfigShadowRoundMs structure. +type FindConfigShadowRoundMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigShadowRoundMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigShadowRoundMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigShadowRoundMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigShadowRoundMsOK creates a FindConfigShadowRoundMsOK with default headers values +func NewFindConfigShadowRoundMsOK() *FindConfigShadowRoundMsOK { + return &FindConfigShadowRoundMsOK{} +} + +/* +FindConfigShadowRoundMsOK handles this case with default header values. + +Config value +*/ +type FindConfigShadowRoundMsOK struct { + Payload int64 +} + +func (o *FindConfigShadowRoundMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigShadowRoundMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigShadowRoundMsDefault creates a FindConfigShadowRoundMsDefault with default headers values +func NewFindConfigShadowRoundMsDefault(code int) *FindConfigShadowRoundMsDefault { + return &FindConfigShadowRoundMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigShadowRoundMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigShadowRoundMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config shadow round ms default response +func (o *FindConfigShadowRoundMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigShadowRoundMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigShadowRoundMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigShadowRoundMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shutdown_announce_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shutdown_announce_in_ms_parameters.go new file mode 100644 index 00000000000..510f7a41b96 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shutdown_announce_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigShutdownAnnounceInMsParams creates a new FindConfigShutdownAnnounceInMsParams object +// with the default values initialized. +func NewFindConfigShutdownAnnounceInMsParams() *FindConfigShutdownAnnounceInMsParams { + + return &FindConfigShutdownAnnounceInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigShutdownAnnounceInMsParamsWithTimeout creates a new FindConfigShutdownAnnounceInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigShutdownAnnounceInMsParamsWithTimeout(timeout time.Duration) *FindConfigShutdownAnnounceInMsParams { + + return &FindConfigShutdownAnnounceInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigShutdownAnnounceInMsParamsWithContext creates a new FindConfigShutdownAnnounceInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigShutdownAnnounceInMsParamsWithContext(ctx context.Context) *FindConfigShutdownAnnounceInMsParams { + + return &FindConfigShutdownAnnounceInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigShutdownAnnounceInMsParamsWithHTTPClient creates a new FindConfigShutdownAnnounceInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigShutdownAnnounceInMsParamsWithHTTPClient(client *http.Client) *FindConfigShutdownAnnounceInMsParams { + + return &FindConfigShutdownAnnounceInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigShutdownAnnounceInMsParams contains all the parameters to send to the API endpoint +for the find config shutdown announce in ms operation typically these are written to a http.Request +*/ +type FindConfigShutdownAnnounceInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config shutdown announce in ms params +func (o *FindConfigShutdownAnnounceInMsParams) WithTimeout(timeout time.Duration) *FindConfigShutdownAnnounceInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config shutdown announce in ms params +func (o *FindConfigShutdownAnnounceInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config shutdown announce in ms params +func (o *FindConfigShutdownAnnounceInMsParams) WithContext(ctx context.Context) *FindConfigShutdownAnnounceInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config shutdown announce in ms params +func (o *FindConfigShutdownAnnounceInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config shutdown announce in ms params +func (o *FindConfigShutdownAnnounceInMsParams) WithHTTPClient(client *http.Client) *FindConfigShutdownAnnounceInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config shutdown announce in ms params +func (o *FindConfigShutdownAnnounceInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigShutdownAnnounceInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shutdown_announce_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shutdown_announce_in_ms_responses.go new file mode 100644 index 00000000000..ff29c8dfb43 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_shutdown_announce_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigShutdownAnnounceInMsReader is a Reader for the FindConfigShutdownAnnounceInMs structure. +type FindConfigShutdownAnnounceInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigShutdownAnnounceInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigShutdownAnnounceInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigShutdownAnnounceInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigShutdownAnnounceInMsOK creates a FindConfigShutdownAnnounceInMsOK with default headers values +func NewFindConfigShutdownAnnounceInMsOK() *FindConfigShutdownAnnounceInMsOK { + return &FindConfigShutdownAnnounceInMsOK{} +} + +/* +FindConfigShutdownAnnounceInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigShutdownAnnounceInMsOK struct { + Payload int64 +} + +func (o *FindConfigShutdownAnnounceInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigShutdownAnnounceInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigShutdownAnnounceInMsDefault creates a FindConfigShutdownAnnounceInMsDefault with default headers values +func NewFindConfigShutdownAnnounceInMsDefault(code int) *FindConfigShutdownAnnounceInMsDefault { + return &FindConfigShutdownAnnounceInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigShutdownAnnounceInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigShutdownAnnounceInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config shutdown announce in ms default response +func (o *FindConfigShutdownAnnounceInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigShutdownAnnounceInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigShutdownAnnounceInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigShutdownAnnounceInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_skip_wait_for_gossip_to_settle_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_skip_wait_for_gossip_to_settle_parameters.go new file mode 100644 index 00000000000..cc8f4aee9d3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_skip_wait_for_gossip_to_settle_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigSkipWaitForGossipToSettleParams creates a new FindConfigSkipWaitForGossipToSettleParams object +// with the default values initialized. +func NewFindConfigSkipWaitForGossipToSettleParams() *FindConfigSkipWaitForGossipToSettleParams { + + return &FindConfigSkipWaitForGossipToSettleParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigSkipWaitForGossipToSettleParamsWithTimeout creates a new FindConfigSkipWaitForGossipToSettleParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigSkipWaitForGossipToSettleParamsWithTimeout(timeout time.Duration) *FindConfigSkipWaitForGossipToSettleParams { + + return &FindConfigSkipWaitForGossipToSettleParams{ + + timeout: timeout, + } +} + +// NewFindConfigSkipWaitForGossipToSettleParamsWithContext creates a new FindConfigSkipWaitForGossipToSettleParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigSkipWaitForGossipToSettleParamsWithContext(ctx context.Context) *FindConfigSkipWaitForGossipToSettleParams { + + return &FindConfigSkipWaitForGossipToSettleParams{ + + Context: ctx, + } +} + +// NewFindConfigSkipWaitForGossipToSettleParamsWithHTTPClient creates a new FindConfigSkipWaitForGossipToSettleParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigSkipWaitForGossipToSettleParamsWithHTTPClient(client *http.Client) *FindConfigSkipWaitForGossipToSettleParams { + + return &FindConfigSkipWaitForGossipToSettleParams{ + HTTPClient: client, + } +} + +/* +FindConfigSkipWaitForGossipToSettleParams contains all the parameters to send to the API endpoint +for the find config skip wait for gossip to settle operation typically these are written to a http.Request +*/ +type FindConfigSkipWaitForGossipToSettleParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config skip wait for gossip to settle params +func (o *FindConfigSkipWaitForGossipToSettleParams) WithTimeout(timeout time.Duration) *FindConfigSkipWaitForGossipToSettleParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config skip wait for gossip to settle params +func (o *FindConfigSkipWaitForGossipToSettleParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config skip wait for gossip to settle params +func (o *FindConfigSkipWaitForGossipToSettleParams) WithContext(ctx context.Context) *FindConfigSkipWaitForGossipToSettleParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config skip wait for gossip to settle params +func (o *FindConfigSkipWaitForGossipToSettleParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config skip wait for gossip to settle params +func (o *FindConfigSkipWaitForGossipToSettleParams) WithHTTPClient(client *http.Client) *FindConfigSkipWaitForGossipToSettleParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config skip wait for gossip to settle params +func (o *FindConfigSkipWaitForGossipToSettleParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigSkipWaitForGossipToSettleParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_skip_wait_for_gossip_to_settle_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_skip_wait_for_gossip_to_settle_responses.go new file mode 100644 index 00000000000..693ee3d918e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_skip_wait_for_gossip_to_settle_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigSkipWaitForGossipToSettleReader is a Reader for the FindConfigSkipWaitForGossipToSettle structure. +type FindConfigSkipWaitForGossipToSettleReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigSkipWaitForGossipToSettleReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigSkipWaitForGossipToSettleOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigSkipWaitForGossipToSettleDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigSkipWaitForGossipToSettleOK creates a FindConfigSkipWaitForGossipToSettleOK with default headers values +func NewFindConfigSkipWaitForGossipToSettleOK() *FindConfigSkipWaitForGossipToSettleOK { + return &FindConfigSkipWaitForGossipToSettleOK{} +} + +/* +FindConfigSkipWaitForGossipToSettleOK handles this case with default header values. + +Config value +*/ +type FindConfigSkipWaitForGossipToSettleOK struct { + Payload int64 +} + +func (o *FindConfigSkipWaitForGossipToSettleOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigSkipWaitForGossipToSettleOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigSkipWaitForGossipToSettleDefault creates a FindConfigSkipWaitForGossipToSettleDefault with default headers values +func NewFindConfigSkipWaitForGossipToSettleDefault(code int) *FindConfigSkipWaitForGossipToSettleDefault { + return &FindConfigSkipWaitForGossipToSettleDefault{ + _statusCode: code, + } +} + +/* +FindConfigSkipWaitForGossipToSettleDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigSkipWaitForGossipToSettleDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config skip wait for gossip to settle default response +func (o *FindConfigSkipWaitForGossipToSettleDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigSkipWaitForGossipToSettleDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigSkipWaitForGossipToSettleDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigSkipWaitForGossipToSettleDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_snapshot_before_compaction_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_snapshot_before_compaction_parameters.go new file mode 100644 index 00000000000..131e68cd58c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_snapshot_before_compaction_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigSnapshotBeforeCompactionParams creates a new FindConfigSnapshotBeforeCompactionParams object +// with the default values initialized. +func NewFindConfigSnapshotBeforeCompactionParams() *FindConfigSnapshotBeforeCompactionParams { + + return &FindConfigSnapshotBeforeCompactionParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigSnapshotBeforeCompactionParamsWithTimeout creates a new FindConfigSnapshotBeforeCompactionParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigSnapshotBeforeCompactionParamsWithTimeout(timeout time.Duration) *FindConfigSnapshotBeforeCompactionParams { + + return &FindConfigSnapshotBeforeCompactionParams{ + + timeout: timeout, + } +} + +// NewFindConfigSnapshotBeforeCompactionParamsWithContext creates a new FindConfigSnapshotBeforeCompactionParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigSnapshotBeforeCompactionParamsWithContext(ctx context.Context) *FindConfigSnapshotBeforeCompactionParams { + + return &FindConfigSnapshotBeforeCompactionParams{ + + Context: ctx, + } +} + +// NewFindConfigSnapshotBeforeCompactionParamsWithHTTPClient creates a new FindConfigSnapshotBeforeCompactionParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigSnapshotBeforeCompactionParamsWithHTTPClient(client *http.Client) *FindConfigSnapshotBeforeCompactionParams { + + return &FindConfigSnapshotBeforeCompactionParams{ + HTTPClient: client, + } +} + +/* +FindConfigSnapshotBeforeCompactionParams contains all the parameters to send to the API endpoint +for the find config snapshot before compaction operation typically these are written to a http.Request +*/ +type FindConfigSnapshotBeforeCompactionParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config snapshot before compaction params +func (o *FindConfigSnapshotBeforeCompactionParams) WithTimeout(timeout time.Duration) *FindConfigSnapshotBeforeCompactionParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config snapshot before compaction params +func (o *FindConfigSnapshotBeforeCompactionParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config snapshot before compaction params +func (o *FindConfigSnapshotBeforeCompactionParams) WithContext(ctx context.Context) *FindConfigSnapshotBeforeCompactionParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config snapshot before compaction params +func (o *FindConfigSnapshotBeforeCompactionParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config snapshot before compaction params +func (o *FindConfigSnapshotBeforeCompactionParams) WithHTTPClient(client *http.Client) *FindConfigSnapshotBeforeCompactionParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config snapshot before compaction params +func (o *FindConfigSnapshotBeforeCompactionParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigSnapshotBeforeCompactionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_snapshot_before_compaction_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_snapshot_before_compaction_responses.go new file mode 100644 index 00000000000..8dfb4584cf2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_snapshot_before_compaction_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigSnapshotBeforeCompactionReader is a Reader for the FindConfigSnapshotBeforeCompaction structure. +type FindConfigSnapshotBeforeCompactionReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigSnapshotBeforeCompactionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigSnapshotBeforeCompactionOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigSnapshotBeforeCompactionDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigSnapshotBeforeCompactionOK creates a FindConfigSnapshotBeforeCompactionOK with default headers values +func NewFindConfigSnapshotBeforeCompactionOK() *FindConfigSnapshotBeforeCompactionOK { + return &FindConfigSnapshotBeforeCompactionOK{} +} + +/* +FindConfigSnapshotBeforeCompactionOK handles this case with default header values. + +Config value +*/ +type FindConfigSnapshotBeforeCompactionOK struct { + Payload bool +} + +func (o *FindConfigSnapshotBeforeCompactionOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigSnapshotBeforeCompactionOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigSnapshotBeforeCompactionDefault creates a FindConfigSnapshotBeforeCompactionDefault with default headers values +func NewFindConfigSnapshotBeforeCompactionDefault(code int) *FindConfigSnapshotBeforeCompactionDefault { + return &FindConfigSnapshotBeforeCompactionDefault{ + _statusCode: code, + } +} + +/* +FindConfigSnapshotBeforeCompactionDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigSnapshotBeforeCompactionDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config snapshot before compaction default response +func (o *FindConfigSnapshotBeforeCompactionDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigSnapshotBeforeCompactionDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigSnapshotBeforeCompactionDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigSnapshotBeforeCompactionDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ssl_storage_port_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ssl_storage_port_parameters.go new file mode 100644 index 00000000000..56e627ceecc --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ssl_storage_port_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigSslStoragePortParams creates a new FindConfigSslStoragePortParams object +// with the default values initialized. +func NewFindConfigSslStoragePortParams() *FindConfigSslStoragePortParams { + + return &FindConfigSslStoragePortParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigSslStoragePortParamsWithTimeout creates a new FindConfigSslStoragePortParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigSslStoragePortParamsWithTimeout(timeout time.Duration) *FindConfigSslStoragePortParams { + + return &FindConfigSslStoragePortParams{ + + timeout: timeout, + } +} + +// NewFindConfigSslStoragePortParamsWithContext creates a new FindConfigSslStoragePortParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigSslStoragePortParamsWithContext(ctx context.Context) *FindConfigSslStoragePortParams { + + return &FindConfigSslStoragePortParams{ + + Context: ctx, + } +} + +// NewFindConfigSslStoragePortParamsWithHTTPClient creates a new FindConfigSslStoragePortParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigSslStoragePortParamsWithHTTPClient(client *http.Client) *FindConfigSslStoragePortParams { + + return &FindConfigSslStoragePortParams{ + HTTPClient: client, + } +} + +/* +FindConfigSslStoragePortParams contains all the parameters to send to the API endpoint +for the find config ssl storage port operation typically these are written to a http.Request +*/ +type FindConfigSslStoragePortParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config ssl storage port params +func (o *FindConfigSslStoragePortParams) WithTimeout(timeout time.Duration) *FindConfigSslStoragePortParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config ssl storage port params +func (o *FindConfigSslStoragePortParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config ssl storage port params +func (o *FindConfigSslStoragePortParams) WithContext(ctx context.Context) *FindConfigSslStoragePortParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config ssl storage port params +func (o *FindConfigSslStoragePortParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config ssl storage port params +func (o *FindConfigSslStoragePortParams) WithHTTPClient(client *http.Client) *FindConfigSslStoragePortParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config ssl storage port params +func (o *FindConfigSslStoragePortParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigSslStoragePortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ssl_storage_port_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ssl_storage_port_responses.go new file mode 100644 index 00000000000..cb5f46028a5 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_ssl_storage_port_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigSslStoragePortReader is a Reader for the FindConfigSslStoragePort structure. +type FindConfigSslStoragePortReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigSslStoragePortReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigSslStoragePortOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigSslStoragePortDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigSslStoragePortOK creates a FindConfigSslStoragePortOK with default headers values +func NewFindConfigSslStoragePortOK() *FindConfigSslStoragePortOK { + return &FindConfigSslStoragePortOK{} +} + +/* +FindConfigSslStoragePortOK handles this case with default header values. + +Config value +*/ +type FindConfigSslStoragePortOK struct { + Payload int64 +} + +func (o *FindConfigSslStoragePortOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigSslStoragePortOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigSslStoragePortDefault creates a FindConfigSslStoragePortDefault with default headers values +func NewFindConfigSslStoragePortDefault(code int) *FindConfigSslStoragePortDefault { + return &FindConfigSslStoragePortDefault{ + _statusCode: code, + } +} + +/* +FindConfigSslStoragePortDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigSslStoragePortDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config ssl storage port default response +func (o *FindConfigSslStoragePortDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigSslStoragePortDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigSslStoragePortDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigSslStoragePortDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_preemptive_open_interval_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_preemptive_open_interval_in_mb_parameters.go new file mode 100644 index 00000000000..f37b91eafa2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_preemptive_open_interval_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigSstablePreemptiveOpenIntervalInMbParams creates a new FindConfigSstablePreemptiveOpenIntervalInMbParams object +// with the default values initialized. +func NewFindConfigSstablePreemptiveOpenIntervalInMbParams() *FindConfigSstablePreemptiveOpenIntervalInMbParams { + + return &FindConfigSstablePreemptiveOpenIntervalInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigSstablePreemptiveOpenIntervalInMbParamsWithTimeout creates a new FindConfigSstablePreemptiveOpenIntervalInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigSstablePreemptiveOpenIntervalInMbParamsWithTimeout(timeout time.Duration) *FindConfigSstablePreemptiveOpenIntervalInMbParams { + + return &FindConfigSstablePreemptiveOpenIntervalInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigSstablePreemptiveOpenIntervalInMbParamsWithContext creates a new FindConfigSstablePreemptiveOpenIntervalInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigSstablePreemptiveOpenIntervalInMbParamsWithContext(ctx context.Context) *FindConfigSstablePreemptiveOpenIntervalInMbParams { + + return &FindConfigSstablePreemptiveOpenIntervalInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigSstablePreemptiveOpenIntervalInMbParamsWithHTTPClient creates a new FindConfigSstablePreemptiveOpenIntervalInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigSstablePreemptiveOpenIntervalInMbParamsWithHTTPClient(client *http.Client) *FindConfigSstablePreemptiveOpenIntervalInMbParams { + + return &FindConfigSstablePreemptiveOpenIntervalInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigSstablePreemptiveOpenIntervalInMbParams contains all the parameters to send to the API endpoint +for the find config sstable preemptive open interval in mb operation typically these are written to a http.Request +*/ +type FindConfigSstablePreemptiveOpenIntervalInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config sstable preemptive open interval in mb params +func (o *FindConfigSstablePreemptiveOpenIntervalInMbParams) WithTimeout(timeout time.Duration) *FindConfigSstablePreemptiveOpenIntervalInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config sstable preemptive open interval in mb params +func (o *FindConfigSstablePreemptiveOpenIntervalInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config sstable preemptive open interval in mb params +func (o *FindConfigSstablePreemptiveOpenIntervalInMbParams) WithContext(ctx context.Context) *FindConfigSstablePreemptiveOpenIntervalInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config sstable preemptive open interval in mb params +func (o *FindConfigSstablePreemptiveOpenIntervalInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config sstable preemptive open interval in mb params +func (o *FindConfigSstablePreemptiveOpenIntervalInMbParams) WithHTTPClient(client *http.Client) *FindConfigSstablePreemptiveOpenIntervalInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config sstable preemptive open interval in mb params +func (o *FindConfigSstablePreemptiveOpenIntervalInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigSstablePreemptiveOpenIntervalInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_preemptive_open_interval_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_preemptive_open_interval_in_mb_responses.go new file mode 100644 index 00000000000..1b4caf8665c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_preemptive_open_interval_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigSstablePreemptiveOpenIntervalInMbReader is a Reader for the FindConfigSstablePreemptiveOpenIntervalInMb structure. +type FindConfigSstablePreemptiveOpenIntervalInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigSstablePreemptiveOpenIntervalInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigSstablePreemptiveOpenIntervalInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigSstablePreemptiveOpenIntervalInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigSstablePreemptiveOpenIntervalInMbOK creates a FindConfigSstablePreemptiveOpenIntervalInMbOK with default headers values +func NewFindConfigSstablePreemptiveOpenIntervalInMbOK() *FindConfigSstablePreemptiveOpenIntervalInMbOK { + return &FindConfigSstablePreemptiveOpenIntervalInMbOK{} +} + +/* +FindConfigSstablePreemptiveOpenIntervalInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigSstablePreemptiveOpenIntervalInMbOK struct { + Payload int64 +} + +func (o *FindConfigSstablePreemptiveOpenIntervalInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigSstablePreemptiveOpenIntervalInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigSstablePreemptiveOpenIntervalInMbDefault creates a FindConfigSstablePreemptiveOpenIntervalInMbDefault with default headers values +func NewFindConfigSstablePreemptiveOpenIntervalInMbDefault(code int) *FindConfigSstablePreemptiveOpenIntervalInMbDefault { + return &FindConfigSstablePreemptiveOpenIntervalInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigSstablePreemptiveOpenIntervalInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigSstablePreemptiveOpenIntervalInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config sstable preemptive open interval in mb default response +func (o *FindConfigSstablePreemptiveOpenIntervalInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigSstablePreemptiveOpenIntervalInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigSstablePreemptiveOpenIntervalInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigSstablePreemptiveOpenIntervalInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_summary_ratio_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_summary_ratio_parameters.go new file mode 100644 index 00000000000..4d5b9333d3a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_summary_ratio_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigSstableSummaryRatioParams creates a new FindConfigSstableSummaryRatioParams object +// with the default values initialized. +func NewFindConfigSstableSummaryRatioParams() *FindConfigSstableSummaryRatioParams { + + return &FindConfigSstableSummaryRatioParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigSstableSummaryRatioParamsWithTimeout creates a new FindConfigSstableSummaryRatioParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigSstableSummaryRatioParamsWithTimeout(timeout time.Duration) *FindConfigSstableSummaryRatioParams { + + return &FindConfigSstableSummaryRatioParams{ + + timeout: timeout, + } +} + +// NewFindConfigSstableSummaryRatioParamsWithContext creates a new FindConfigSstableSummaryRatioParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigSstableSummaryRatioParamsWithContext(ctx context.Context) *FindConfigSstableSummaryRatioParams { + + return &FindConfigSstableSummaryRatioParams{ + + Context: ctx, + } +} + +// NewFindConfigSstableSummaryRatioParamsWithHTTPClient creates a new FindConfigSstableSummaryRatioParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigSstableSummaryRatioParamsWithHTTPClient(client *http.Client) *FindConfigSstableSummaryRatioParams { + + return &FindConfigSstableSummaryRatioParams{ + HTTPClient: client, + } +} + +/* +FindConfigSstableSummaryRatioParams contains all the parameters to send to the API endpoint +for the find config sstable summary ratio operation typically these are written to a http.Request +*/ +type FindConfigSstableSummaryRatioParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config sstable summary ratio params +func (o *FindConfigSstableSummaryRatioParams) WithTimeout(timeout time.Duration) *FindConfigSstableSummaryRatioParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config sstable summary ratio params +func (o *FindConfigSstableSummaryRatioParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config sstable summary ratio params +func (o *FindConfigSstableSummaryRatioParams) WithContext(ctx context.Context) *FindConfigSstableSummaryRatioParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config sstable summary ratio params +func (o *FindConfigSstableSummaryRatioParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config sstable summary ratio params +func (o *FindConfigSstableSummaryRatioParams) WithHTTPClient(client *http.Client) *FindConfigSstableSummaryRatioParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config sstable summary ratio params +func (o *FindConfigSstableSummaryRatioParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigSstableSummaryRatioParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_summary_ratio_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_summary_ratio_responses.go new file mode 100644 index 00000000000..07bbad61553 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_sstable_summary_ratio_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigSstableSummaryRatioReader is a Reader for the FindConfigSstableSummaryRatio structure. +type FindConfigSstableSummaryRatioReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigSstableSummaryRatioReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigSstableSummaryRatioOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigSstableSummaryRatioDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigSstableSummaryRatioOK creates a FindConfigSstableSummaryRatioOK with default headers values +func NewFindConfigSstableSummaryRatioOK() *FindConfigSstableSummaryRatioOK { + return &FindConfigSstableSummaryRatioOK{} +} + +/* +FindConfigSstableSummaryRatioOK handles this case with default header values. + +Config value +*/ +type FindConfigSstableSummaryRatioOK struct { + Payload float64 +} + +func (o *FindConfigSstableSummaryRatioOK) GetPayload() float64 { + return o.Payload +} + +func (o *FindConfigSstableSummaryRatioOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigSstableSummaryRatioDefault creates a FindConfigSstableSummaryRatioDefault with default headers values +func NewFindConfigSstableSummaryRatioDefault(code int) *FindConfigSstableSummaryRatioDefault { + return &FindConfigSstableSummaryRatioDefault{ + _statusCode: code, + } +} + +/* +FindConfigSstableSummaryRatioDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigSstableSummaryRatioDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config sstable summary ratio default response +func (o *FindConfigSstableSummaryRatioDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigSstableSummaryRatioDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigSstableSummaryRatioDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigSstableSummaryRatioDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_native_transport_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_native_transport_parameters.go new file mode 100644 index 00000000000..4132b7ac1b7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_native_transport_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigStartNativeTransportParams creates a new FindConfigStartNativeTransportParams object +// with the default values initialized. +func NewFindConfigStartNativeTransportParams() *FindConfigStartNativeTransportParams { + + return &FindConfigStartNativeTransportParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigStartNativeTransportParamsWithTimeout creates a new FindConfigStartNativeTransportParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigStartNativeTransportParamsWithTimeout(timeout time.Duration) *FindConfigStartNativeTransportParams { + + return &FindConfigStartNativeTransportParams{ + + timeout: timeout, + } +} + +// NewFindConfigStartNativeTransportParamsWithContext creates a new FindConfigStartNativeTransportParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigStartNativeTransportParamsWithContext(ctx context.Context) *FindConfigStartNativeTransportParams { + + return &FindConfigStartNativeTransportParams{ + + Context: ctx, + } +} + +// NewFindConfigStartNativeTransportParamsWithHTTPClient creates a new FindConfigStartNativeTransportParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigStartNativeTransportParamsWithHTTPClient(client *http.Client) *FindConfigStartNativeTransportParams { + + return &FindConfigStartNativeTransportParams{ + HTTPClient: client, + } +} + +/* +FindConfigStartNativeTransportParams contains all the parameters to send to the API endpoint +for the find config start native transport operation typically these are written to a http.Request +*/ +type FindConfigStartNativeTransportParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config start native transport params +func (o *FindConfigStartNativeTransportParams) WithTimeout(timeout time.Duration) *FindConfigStartNativeTransportParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config start native transport params +func (o *FindConfigStartNativeTransportParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config start native transport params +func (o *FindConfigStartNativeTransportParams) WithContext(ctx context.Context) *FindConfigStartNativeTransportParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config start native transport params +func (o *FindConfigStartNativeTransportParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config start native transport params +func (o *FindConfigStartNativeTransportParams) WithHTTPClient(client *http.Client) *FindConfigStartNativeTransportParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config start native transport params +func (o *FindConfigStartNativeTransportParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigStartNativeTransportParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_native_transport_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_native_transport_responses.go new file mode 100644 index 00000000000..ab83805185c --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_native_transport_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigStartNativeTransportReader is a Reader for the FindConfigStartNativeTransport structure. +type FindConfigStartNativeTransportReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigStartNativeTransportReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigStartNativeTransportOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigStartNativeTransportDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigStartNativeTransportOK creates a FindConfigStartNativeTransportOK with default headers values +func NewFindConfigStartNativeTransportOK() *FindConfigStartNativeTransportOK { + return &FindConfigStartNativeTransportOK{} +} + +/* +FindConfigStartNativeTransportOK handles this case with default header values. + +Config value +*/ +type FindConfigStartNativeTransportOK struct { + Payload bool +} + +func (o *FindConfigStartNativeTransportOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigStartNativeTransportOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigStartNativeTransportDefault creates a FindConfigStartNativeTransportDefault with default headers values +func NewFindConfigStartNativeTransportDefault(code int) *FindConfigStartNativeTransportDefault { + return &FindConfigStartNativeTransportDefault{ + _statusCode: code, + } +} + +/* +FindConfigStartNativeTransportDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigStartNativeTransportDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config start native transport default response +func (o *FindConfigStartNativeTransportDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigStartNativeTransportDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigStartNativeTransportDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigStartNativeTransportDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_rpc_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_rpc_parameters.go new file mode 100644 index 00000000000..3a429c5dde3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_rpc_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigStartRPCParams creates a new FindConfigStartRPCParams object +// with the default values initialized. +func NewFindConfigStartRPCParams() *FindConfigStartRPCParams { + + return &FindConfigStartRPCParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigStartRPCParamsWithTimeout creates a new FindConfigStartRPCParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigStartRPCParamsWithTimeout(timeout time.Duration) *FindConfigStartRPCParams { + + return &FindConfigStartRPCParams{ + + timeout: timeout, + } +} + +// NewFindConfigStartRPCParamsWithContext creates a new FindConfigStartRPCParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigStartRPCParamsWithContext(ctx context.Context) *FindConfigStartRPCParams { + + return &FindConfigStartRPCParams{ + + Context: ctx, + } +} + +// NewFindConfigStartRPCParamsWithHTTPClient creates a new FindConfigStartRPCParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigStartRPCParamsWithHTTPClient(client *http.Client) *FindConfigStartRPCParams { + + return &FindConfigStartRPCParams{ + HTTPClient: client, + } +} + +/* +FindConfigStartRPCParams contains all the parameters to send to the API endpoint +for the find config start rpc operation typically these are written to a http.Request +*/ +type FindConfigStartRPCParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config start rpc params +func (o *FindConfigStartRPCParams) WithTimeout(timeout time.Duration) *FindConfigStartRPCParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config start rpc params +func (o *FindConfigStartRPCParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config start rpc params +func (o *FindConfigStartRPCParams) WithContext(ctx context.Context) *FindConfigStartRPCParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config start rpc params +func (o *FindConfigStartRPCParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config start rpc params +func (o *FindConfigStartRPCParams) WithHTTPClient(client *http.Client) *FindConfigStartRPCParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config start rpc params +func (o *FindConfigStartRPCParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigStartRPCParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_rpc_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_rpc_responses.go new file mode 100644 index 00000000000..7b41284a6ac --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_start_rpc_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigStartRPCReader is a Reader for the FindConfigStartRPC structure. +type FindConfigStartRPCReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigStartRPCReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigStartRPCOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigStartRPCDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigStartRPCOK creates a FindConfigStartRPCOK with default headers values +func NewFindConfigStartRPCOK() *FindConfigStartRPCOK { + return &FindConfigStartRPCOK{} +} + +/* +FindConfigStartRPCOK handles this case with default header values. + +Config value +*/ +type FindConfigStartRPCOK struct { + Payload bool +} + +func (o *FindConfigStartRPCOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigStartRPCOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigStartRPCDefault creates a FindConfigStartRPCDefault with default headers values +func NewFindConfigStartRPCDefault(code int) *FindConfigStartRPCDefault { + return &FindConfigStartRPCDefault{ + _statusCode: code, + } +} + +/* +FindConfigStartRPCDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigStartRPCDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config start rpc default response +func (o *FindConfigStartRPCDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigStartRPCDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigStartRPCDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigStartRPCDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_storage_port_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_storage_port_parameters.go new file mode 100644 index 00000000000..657d6289296 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_storage_port_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigStoragePortParams creates a new FindConfigStoragePortParams object +// with the default values initialized. +func NewFindConfigStoragePortParams() *FindConfigStoragePortParams { + + return &FindConfigStoragePortParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigStoragePortParamsWithTimeout creates a new FindConfigStoragePortParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigStoragePortParamsWithTimeout(timeout time.Duration) *FindConfigStoragePortParams { + + return &FindConfigStoragePortParams{ + + timeout: timeout, + } +} + +// NewFindConfigStoragePortParamsWithContext creates a new FindConfigStoragePortParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigStoragePortParamsWithContext(ctx context.Context) *FindConfigStoragePortParams { + + return &FindConfigStoragePortParams{ + + Context: ctx, + } +} + +// NewFindConfigStoragePortParamsWithHTTPClient creates a new FindConfigStoragePortParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigStoragePortParamsWithHTTPClient(client *http.Client) *FindConfigStoragePortParams { + + return &FindConfigStoragePortParams{ + HTTPClient: client, + } +} + +/* +FindConfigStoragePortParams contains all the parameters to send to the API endpoint +for the find config storage port operation typically these are written to a http.Request +*/ +type FindConfigStoragePortParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config storage port params +func (o *FindConfigStoragePortParams) WithTimeout(timeout time.Duration) *FindConfigStoragePortParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config storage port params +func (o *FindConfigStoragePortParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config storage port params +func (o *FindConfigStoragePortParams) WithContext(ctx context.Context) *FindConfigStoragePortParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config storage port params +func (o *FindConfigStoragePortParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config storage port params +func (o *FindConfigStoragePortParams) WithHTTPClient(client *http.Client) *FindConfigStoragePortParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config storage port params +func (o *FindConfigStoragePortParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigStoragePortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_storage_port_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_storage_port_responses.go new file mode 100644 index 00000000000..d5802d3d90f --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_storage_port_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigStoragePortReader is a Reader for the FindConfigStoragePort structure. +type FindConfigStoragePortReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigStoragePortReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigStoragePortOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigStoragePortDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigStoragePortOK creates a FindConfigStoragePortOK with default headers values +func NewFindConfigStoragePortOK() *FindConfigStoragePortOK { + return &FindConfigStoragePortOK{} +} + +/* +FindConfigStoragePortOK handles this case with default header values. + +Config value +*/ +type FindConfigStoragePortOK struct { + Payload int64 +} + +func (o *FindConfigStoragePortOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigStoragePortOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigStoragePortDefault creates a FindConfigStoragePortDefault with default headers values +func NewFindConfigStoragePortDefault(code int) *FindConfigStoragePortDefault { + return &FindConfigStoragePortDefault{ + _statusCode: code, + } +} + +/* +FindConfigStoragePortDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigStoragePortDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config storage port default response +func (o *FindConfigStoragePortDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigStoragePortDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigStoragePortDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigStoragePortDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_stream_throughput_outbound_megabits_per_sec_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_stream_throughput_outbound_megabits_per_sec_parameters.go new file mode 100644 index 00000000000..c01b2b73ade --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_stream_throughput_outbound_megabits_per_sec_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigStreamThroughputOutboundMegabitsPerSecParams creates a new FindConfigStreamThroughputOutboundMegabitsPerSecParams object +// with the default values initialized. +func NewFindConfigStreamThroughputOutboundMegabitsPerSecParams() *FindConfigStreamThroughputOutboundMegabitsPerSecParams { + + return &FindConfigStreamThroughputOutboundMegabitsPerSecParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigStreamThroughputOutboundMegabitsPerSecParamsWithTimeout creates a new FindConfigStreamThroughputOutboundMegabitsPerSecParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigStreamThroughputOutboundMegabitsPerSecParamsWithTimeout(timeout time.Duration) *FindConfigStreamThroughputOutboundMegabitsPerSecParams { + + return &FindConfigStreamThroughputOutboundMegabitsPerSecParams{ + + timeout: timeout, + } +} + +// NewFindConfigStreamThroughputOutboundMegabitsPerSecParamsWithContext creates a new FindConfigStreamThroughputOutboundMegabitsPerSecParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigStreamThroughputOutboundMegabitsPerSecParamsWithContext(ctx context.Context) *FindConfigStreamThroughputOutboundMegabitsPerSecParams { + + return &FindConfigStreamThroughputOutboundMegabitsPerSecParams{ + + Context: ctx, + } +} + +// NewFindConfigStreamThroughputOutboundMegabitsPerSecParamsWithHTTPClient creates a new FindConfigStreamThroughputOutboundMegabitsPerSecParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigStreamThroughputOutboundMegabitsPerSecParamsWithHTTPClient(client *http.Client) *FindConfigStreamThroughputOutboundMegabitsPerSecParams { + + return &FindConfigStreamThroughputOutboundMegabitsPerSecParams{ + HTTPClient: client, + } +} + +/* +FindConfigStreamThroughputOutboundMegabitsPerSecParams contains all the parameters to send to the API endpoint +for the find config stream throughput outbound megabits per sec operation typically these are written to a http.Request +*/ +type FindConfigStreamThroughputOutboundMegabitsPerSecParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config stream throughput outbound megabits per sec params +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecParams) WithTimeout(timeout time.Duration) *FindConfigStreamThroughputOutboundMegabitsPerSecParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config stream throughput outbound megabits per sec params +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config stream throughput outbound megabits per sec params +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecParams) WithContext(ctx context.Context) *FindConfigStreamThroughputOutboundMegabitsPerSecParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config stream throughput outbound megabits per sec params +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config stream throughput outbound megabits per sec params +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecParams) WithHTTPClient(client *http.Client) *FindConfigStreamThroughputOutboundMegabitsPerSecParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config stream throughput outbound megabits per sec params +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_stream_throughput_outbound_megabits_per_sec_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_stream_throughput_outbound_megabits_per_sec_responses.go new file mode 100644 index 00000000000..9cbf6ea4cce --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_stream_throughput_outbound_megabits_per_sec_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigStreamThroughputOutboundMegabitsPerSecReader is a Reader for the FindConfigStreamThroughputOutboundMegabitsPerSec structure. +type FindConfigStreamThroughputOutboundMegabitsPerSecReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigStreamThroughputOutboundMegabitsPerSecOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigStreamThroughputOutboundMegabitsPerSecDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigStreamThroughputOutboundMegabitsPerSecOK creates a FindConfigStreamThroughputOutboundMegabitsPerSecOK with default headers values +func NewFindConfigStreamThroughputOutboundMegabitsPerSecOK() *FindConfigStreamThroughputOutboundMegabitsPerSecOK { + return &FindConfigStreamThroughputOutboundMegabitsPerSecOK{} +} + +/* +FindConfigStreamThroughputOutboundMegabitsPerSecOK handles this case with default header values. + +Config value +*/ +type FindConfigStreamThroughputOutboundMegabitsPerSecOK struct { + Payload int64 +} + +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigStreamThroughputOutboundMegabitsPerSecDefault creates a FindConfigStreamThroughputOutboundMegabitsPerSecDefault with default headers values +func NewFindConfigStreamThroughputOutboundMegabitsPerSecDefault(code int) *FindConfigStreamThroughputOutboundMegabitsPerSecDefault { + return &FindConfigStreamThroughputOutboundMegabitsPerSecDefault{ + _statusCode: code, + } +} + +/* +FindConfigStreamThroughputOutboundMegabitsPerSecDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigStreamThroughputOutboundMegabitsPerSecDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config stream throughput outbound megabits per sec default response +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigStreamThroughputOutboundMegabitsPerSecDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_streaming_socket_timeout_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_streaming_socket_timeout_in_ms_parameters.go new file mode 100644 index 00000000000..85c3332ba83 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_streaming_socket_timeout_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigStreamingSocketTimeoutInMsParams creates a new FindConfigStreamingSocketTimeoutInMsParams object +// with the default values initialized. +func NewFindConfigStreamingSocketTimeoutInMsParams() *FindConfigStreamingSocketTimeoutInMsParams { + + return &FindConfigStreamingSocketTimeoutInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigStreamingSocketTimeoutInMsParamsWithTimeout creates a new FindConfigStreamingSocketTimeoutInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigStreamingSocketTimeoutInMsParamsWithTimeout(timeout time.Duration) *FindConfigStreamingSocketTimeoutInMsParams { + + return &FindConfigStreamingSocketTimeoutInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigStreamingSocketTimeoutInMsParamsWithContext creates a new FindConfigStreamingSocketTimeoutInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigStreamingSocketTimeoutInMsParamsWithContext(ctx context.Context) *FindConfigStreamingSocketTimeoutInMsParams { + + return &FindConfigStreamingSocketTimeoutInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigStreamingSocketTimeoutInMsParamsWithHTTPClient creates a new FindConfigStreamingSocketTimeoutInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigStreamingSocketTimeoutInMsParamsWithHTTPClient(client *http.Client) *FindConfigStreamingSocketTimeoutInMsParams { + + return &FindConfigStreamingSocketTimeoutInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigStreamingSocketTimeoutInMsParams contains all the parameters to send to the API endpoint +for the find config streaming socket timeout in ms operation typically these are written to a http.Request +*/ +type FindConfigStreamingSocketTimeoutInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config streaming socket timeout in ms params +func (o *FindConfigStreamingSocketTimeoutInMsParams) WithTimeout(timeout time.Duration) *FindConfigStreamingSocketTimeoutInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config streaming socket timeout in ms params +func (o *FindConfigStreamingSocketTimeoutInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config streaming socket timeout in ms params +func (o *FindConfigStreamingSocketTimeoutInMsParams) WithContext(ctx context.Context) *FindConfigStreamingSocketTimeoutInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config streaming socket timeout in ms params +func (o *FindConfigStreamingSocketTimeoutInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config streaming socket timeout in ms params +func (o *FindConfigStreamingSocketTimeoutInMsParams) WithHTTPClient(client *http.Client) *FindConfigStreamingSocketTimeoutInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config streaming socket timeout in ms params +func (o *FindConfigStreamingSocketTimeoutInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigStreamingSocketTimeoutInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_streaming_socket_timeout_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_streaming_socket_timeout_in_ms_responses.go new file mode 100644 index 00000000000..ef02996ffa1 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_streaming_socket_timeout_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigStreamingSocketTimeoutInMsReader is a Reader for the FindConfigStreamingSocketTimeoutInMs structure. +type FindConfigStreamingSocketTimeoutInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigStreamingSocketTimeoutInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigStreamingSocketTimeoutInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigStreamingSocketTimeoutInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigStreamingSocketTimeoutInMsOK creates a FindConfigStreamingSocketTimeoutInMsOK with default headers values +func NewFindConfigStreamingSocketTimeoutInMsOK() *FindConfigStreamingSocketTimeoutInMsOK { + return &FindConfigStreamingSocketTimeoutInMsOK{} +} + +/* +FindConfigStreamingSocketTimeoutInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigStreamingSocketTimeoutInMsOK struct { + Payload int64 +} + +func (o *FindConfigStreamingSocketTimeoutInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigStreamingSocketTimeoutInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigStreamingSocketTimeoutInMsDefault creates a FindConfigStreamingSocketTimeoutInMsDefault with default headers values +func NewFindConfigStreamingSocketTimeoutInMsDefault(code int) *FindConfigStreamingSocketTimeoutInMsDefault { + return &FindConfigStreamingSocketTimeoutInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigStreamingSocketTimeoutInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigStreamingSocketTimeoutInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config streaming socket timeout in ms default response +func (o *FindConfigStreamingSocketTimeoutInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigStreamingSocketTimeoutInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigStreamingSocketTimeoutInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigStreamingSocketTimeoutInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_framed_transport_size_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_framed_transport_size_in_mb_parameters.go new file mode 100644 index 00000000000..9d4177a2c15 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_framed_transport_size_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigThriftFramedTransportSizeInMbParams creates a new FindConfigThriftFramedTransportSizeInMbParams object +// with the default values initialized. +func NewFindConfigThriftFramedTransportSizeInMbParams() *FindConfigThriftFramedTransportSizeInMbParams { + + return &FindConfigThriftFramedTransportSizeInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigThriftFramedTransportSizeInMbParamsWithTimeout creates a new FindConfigThriftFramedTransportSizeInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigThriftFramedTransportSizeInMbParamsWithTimeout(timeout time.Duration) *FindConfigThriftFramedTransportSizeInMbParams { + + return &FindConfigThriftFramedTransportSizeInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigThriftFramedTransportSizeInMbParamsWithContext creates a new FindConfigThriftFramedTransportSizeInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigThriftFramedTransportSizeInMbParamsWithContext(ctx context.Context) *FindConfigThriftFramedTransportSizeInMbParams { + + return &FindConfigThriftFramedTransportSizeInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigThriftFramedTransportSizeInMbParamsWithHTTPClient creates a new FindConfigThriftFramedTransportSizeInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigThriftFramedTransportSizeInMbParamsWithHTTPClient(client *http.Client) *FindConfigThriftFramedTransportSizeInMbParams { + + return &FindConfigThriftFramedTransportSizeInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigThriftFramedTransportSizeInMbParams contains all the parameters to send to the API endpoint +for the find config thrift framed transport size in mb operation typically these are written to a http.Request +*/ +type FindConfigThriftFramedTransportSizeInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config thrift framed transport size in mb params +func (o *FindConfigThriftFramedTransportSizeInMbParams) WithTimeout(timeout time.Duration) *FindConfigThriftFramedTransportSizeInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config thrift framed transport size in mb params +func (o *FindConfigThriftFramedTransportSizeInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config thrift framed transport size in mb params +func (o *FindConfigThriftFramedTransportSizeInMbParams) WithContext(ctx context.Context) *FindConfigThriftFramedTransportSizeInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config thrift framed transport size in mb params +func (o *FindConfigThriftFramedTransportSizeInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config thrift framed transport size in mb params +func (o *FindConfigThriftFramedTransportSizeInMbParams) WithHTTPClient(client *http.Client) *FindConfigThriftFramedTransportSizeInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config thrift framed transport size in mb params +func (o *FindConfigThriftFramedTransportSizeInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigThriftFramedTransportSizeInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_framed_transport_size_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_framed_transport_size_in_mb_responses.go new file mode 100644 index 00000000000..2ab118da162 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_framed_transport_size_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigThriftFramedTransportSizeInMbReader is a Reader for the FindConfigThriftFramedTransportSizeInMb structure. +type FindConfigThriftFramedTransportSizeInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigThriftFramedTransportSizeInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigThriftFramedTransportSizeInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigThriftFramedTransportSizeInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigThriftFramedTransportSizeInMbOK creates a FindConfigThriftFramedTransportSizeInMbOK with default headers values +func NewFindConfigThriftFramedTransportSizeInMbOK() *FindConfigThriftFramedTransportSizeInMbOK { + return &FindConfigThriftFramedTransportSizeInMbOK{} +} + +/* +FindConfigThriftFramedTransportSizeInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigThriftFramedTransportSizeInMbOK struct { + Payload int64 +} + +func (o *FindConfigThriftFramedTransportSizeInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigThriftFramedTransportSizeInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigThriftFramedTransportSizeInMbDefault creates a FindConfigThriftFramedTransportSizeInMbDefault with default headers values +func NewFindConfigThriftFramedTransportSizeInMbDefault(code int) *FindConfigThriftFramedTransportSizeInMbDefault { + return &FindConfigThriftFramedTransportSizeInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigThriftFramedTransportSizeInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigThriftFramedTransportSizeInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config thrift framed transport size in mb default response +func (o *FindConfigThriftFramedTransportSizeInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigThriftFramedTransportSizeInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigThriftFramedTransportSizeInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigThriftFramedTransportSizeInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_max_message_length_in_mb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_max_message_length_in_mb_parameters.go new file mode 100644 index 00000000000..945fa0fedc7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_max_message_length_in_mb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigThriftMaxMessageLengthInMbParams creates a new FindConfigThriftMaxMessageLengthInMbParams object +// with the default values initialized. +func NewFindConfigThriftMaxMessageLengthInMbParams() *FindConfigThriftMaxMessageLengthInMbParams { + + return &FindConfigThriftMaxMessageLengthInMbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigThriftMaxMessageLengthInMbParamsWithTimeout creates a new FindConfigThriftMaxMessageLengthInMbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigThriftMaxMessageLengthInMbParamsWithTimeout(timeout time.Duration) *FindConfigThriftMaxMessageLengthInMbParams { + + return &FindConfigThriftMaxMessageLengthInMbParams{ + + timeout: timeout, + } +} + +// NewFindConfigThriftMaxMessageLengthInMbParamsWithContext creates a new FindConfigThriftMaxMessageLengthInMbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigThriftMaxMessageLengthInMbParamsWithContext(ctx context.Context) *FindConfigThriftMaxMessageLengthInMbParams { + + return &FindConfigThriftMaxMessageLengthInMbParams{ + + Context: ctx, + } +} + +// NewFindConfigThriftMaxMessageLengthInMbParamsWithHTTPClient creates a new FindConfigThriftMaxMessageLengthInMbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigThriftMaxMessageLengthInMbParamsWithHTTPClient(client *http.Client) *FindConfigThriftMaxMessageLengthInMbParams { + + return &FindConfigThriftMaxMessageLengthInMbParams{ + HTTPClient: client, + } +} + +/* +FindConfigThriftMaxMessageLengthInMbParams contains all the parameters to send to the API endpoint +for the find config thrift max message length in mb operation typically these are written to a http.Request +*/ +type FindConfigThriftMaxMessageLengthInMbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config thrift max message length in mb params +func (o *FindConfigThriftMaxMessageLengthInMbParams) WithTimeout(timeout time.Duration) *FindConfigThriftMaxMessageLengthInMbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config thrift max message length in mb params +func (o *FindConfigThriftMaxMessageLengthInMbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config thrift max message length in mb params +func (o *FindConfigThriftMaxMessageLengthInMbParams) WithContext(ctx context.Context) *FindConfigThriftMaxMessageLengthInMbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config thrift max message length in mb params +func (o *FindConfigThriftMaxMessageLengthInMbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config thrift max message length in mb params +func (o *FindConfigThriftMaxMessageLengthInMbParams) WithHTTPClient(client *http.Client) *FindConfigThriftMaxMessageLengthInMbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config thrift max message length in mb params +func (o *FindConfigThriftMaxMessageLengthInMbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigThriftMaxMessageLengthInMbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_max_message_length_in_mb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_max_message_length_in_mb_responses.go new file mode 100644 index 00000000000..2f0f2f111e7 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_thrift_max_message_length_in_mb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigThriftMaxMessageLengthInMbReader is a Reader for the FindConfigThriftMaxMessageLengthInMb structure. +type FindConfigThriftMaxMessageLengthInMbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigThriftMaxMessageLengthInMbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigThriftMaxMessageLengthInMbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigThriftMaxMessageLengthInMbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigThriftMaxMessageLengthInMbOK creates a FindConfigThriftMaxMessageLengthInMbOK with default headers values +func NewFindConfigThriftMaxMessageLengthInMbOK() *FindConfigThriftMaxMessageLengthInMbOK { + return &FindConfigThriftMaxMessageLengthInMbOK{} +} + +/* +FindConfigThriftMaxMessageLengthInMbOK handles this case with default header values. + +Config value +*/ +type FindConfigThriftMaxMessageLengthInMbOK struct { + Payload int64 +} + +func (o *FindConfigThriftMaxMessageLengthInMbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigThriftMaxMessageLengthInMbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigThriftMaxMessageLengthInMbDefault creates a FindConfigThriftMaxMessageLengthInMbDefault with default headers values +func NewFindConfigThriftMaxMessageLengthInMbDefault(code int) *FindConfigThriftMaxMessageLengthInMbDefault { + return &FindConfigThriftMaxMessageLengthInMbDefault{ + _statusCode: code, + } +} + +/* +FindConfigThriftMaxMessageLengthInMbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigThriftMaxMessageLengthInMbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config thrift max message length in mb default response +func (o *FindConfigThriftMaxMessageLengthInMbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigThriftMaxMessageLengthInMbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigThriftMaxMessageLengthInMbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigThriftMaxMessageLengthInMbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_failure_threshold_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_failure_threshold_parameters.go new file mode 100644 index 00000000000..3a61b34a0c2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_failure_threshold_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigTombstoneFailureThresholdParams creates a new FindConfigTombstoneFailureThresholdParams object +// with the default values initialized. +func NewFindConfigTombstoneFailureThresholdParams() *FindConfigTombstoneFailureThresholdParams { + + return &FindConfigTombstoneFailureThresholdParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigTombstoneFailureThresholdParamsWithTimeout creates a new FindConfigTombstoneFailureThresholdParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigTombstoneFailureThresholdParamsWithTimeout(timeout time.Duration) *FindConfigTombstoneFailureThresholdParams { + + return &FindConfigTombstoneFailureThresholdParams{ + + timeout: timeout, + } +} + +// NewFindConfigTombstoneFailureThresholdParamsWithContext creates a new FindConfigTombstoneFailureThresholdParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigTombstoneFailureThresholdParamsWithContext(ctx context.Context) *FindConfigTombstoneFailureThresholdParams { + + return &FindConfigTombstoneFailureThresholdParams{ + + Context: ctx, + } +} + +// NewFindConfigTombstoneFailureThresholdParamsWithHTTPClient creates a new FindConfigTombstoneFailureThresholdParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigTombstoneFailureThresholdParamsWithHTTPClient(client *http.Client) *FindConfigTombstoneFailureThresholdParams { + + return &FindConfigTombstoneFailureThresholdParams{ + HTTPClient: client, + } +} + +/* +FindConfigTombstoneFailureThresholdParams contains all the parameters to send to the API endpoint +for the find config tombstone failure threshold operation typically these are written to a http.Request +*/ +type FindConfigTombstoneFailureThresholdParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config tombstone failure threshold params +func (o *FindConfigTombstoneFailureThresholdParams) WithTimeout(timeout time.Duration) *FindConfigTombstoneFailureThresholdParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config tombstone failure threshold params +func (o *FindConfigTombstoneFailureThresholdParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config tombstone failure threshold params +func (o *FindConfigTombstoneFailureThresholdParams) WithContext(ctx context.Context) *FindConfigTombstoneFailureThresholdParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config tombstone failure threshold params +func (o *FindConfigTombstoneFailureThresholdParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config tombstone failure threshold params +func (o *FindConfigTombstoneFailureThresholdParams) WithHTTPClient(client *http.Client) *FindConfigTombstoneFailureThresholdParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config tombstone failure threshold params +func (o *FindConfigTombstoneFailureThresholdParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigTombstoneFailureThresholdParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_failure_threshold_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_failure_threshold_responses.go new file mode 100644 index 00000000000..0213b8fbeb8 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_failure_threshold_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigTombstoneFailureThresholdReader is a Reader for the FindConfigTombstoneFailureThreshold structure. +type FindConfigTombstoneFailureThresholdReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigTombstoneFailureThresholdReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigTombstoneFailureThresholdOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigTombstoneFailureThresholdDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigTombstoneFailureThresholdOK creates a FindConfigTombstoneFailureThresholdOK with default headers values +func NewFindConfigTombstoneFailureThresholdOK() *FindConfigTombstoneFailureThresholdOK { + return &FindConfigTombstoneFailureThresholdOK{} +} + +/* +FindConfigTombstoneFailureThresholdOK handles this case with default header values. + +Config value +*/ +type FindConfigTombstoneFailureThresholdOK struct { + Payload int64 +} + +func (o *FindConfigTombstoneFailureThresholdOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigTombstoneFailureThresholdOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigTombstoneFailureThresholdDefault creates a FindConfigTombstoneFailureThresholdDefault with default headers values +func NewFindConfigTombstoneFailureThresholdDefault(code int) *FindConfigTombstoneFailureThresholdDefault { + return &FindConfigTombstoneFailureThresholdDefault{ + _statusCode: code, + } +} + +/* +FindConfigTombstoneFailureThresholdDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigTombstoneFailureThresholdDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config tombstone failure threshold default response +func (o *FindConfigTombstoneFailureThresholdDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigTombstoneFailureThresholdDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigTombstoneFailureThresholdDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigTombstoneFailureThresholdDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_warn_threshold_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_warn_threshold_parameters.go new file mode 100644 index 00000000000..31346beb44a --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_warn_threshold_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigTombstoneWarnThresholdParams creates a new FindConfigTombstoneWarnThresholdParams object +// with the default values initialized. +func NewFindConfigTombstoneWarnThresholdParams() *FindConfigTombstoneWarnThresholdParams { + + return &FindConfigTombstoneWarnThresholdParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigTombstoneWarnThresholdParamsWithTimeout creates a new FindConfigTombstoneWarnThresholdParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigTombstoneWarnThresholdParamsWithTimeout(timeout time.Duration) *FindConfigTombstoneWarnThresholdParams { + + return &FindConfigTombstoneWarnThresholdParams{ + + timeout: timeout, + } +} + +// NewFindConfigTombstoneWarnThresholdParamsWithContext creates a new FindConfigTombstoneWarnThresholdParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigTombstoneWarnThresholdParamsWithContext(ctx context.Context) *FindConfigTombstoneWarnThresholdParams { + + return &FindConfigTombstoneWarnThresholdParams{ + + Context: ctx, + } +} + +// NewFindConfigTombstoneWarnThresholdParamsWithHTTPClient creates a new FindConfigTombstoneWarnThresholdParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigTombstoneWarnThresholdParamsWithHTTPClient(client *http.Client) *FindConfigTombstoneWarnThresholdParams { + + return &FindConfigTombstoneWarnThresholdParams{ + HTTPClient: client, + } +} + +/* +FindConfigTombstoneWarnThresholdParams contains all the parameters to send to the API endpoint +for the find config tombstone warn threshold operation typically these are written to a http.Request +*/ +type FindConfigTombstoneWarnThresholdParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config tombstone warn threshold params +func (o *FindConfigTombstoneWarnThresholdParams) WithTimeout(timeout time.Duration) *FindConfigTombstoneWarnThresholdParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config tombstone warn threshold params +func (o *FindConfigTombstoneWarnThresholdParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config tombstone warn threshold params +func (o *FindConfigTombstoneWarnThresholdParams) WithContext(ctx context.Context) *FindConfigTombstoneWarnThresholdParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config tombstone warn threshold params +func (o *FindConfigTombstoneWarnThresholdParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config tombstone warn threshold params +func (o *FindConfigTombstoneWarnThresholdParams) WithHTTPClient(client *http.Client) *FindConfigTombstoneWarnThresholdParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config tombstone warn threshold params +func (o *FindConfigTombstoneWarnThresholdParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigTombstoneWarnThresholdParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_warn_threshold_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_warn_threshold_responses.go new file mode 100644 index 00000000000..d0967467d05 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_tombstone_warn_threshold_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigTombstoneWarnThresholdReader is a Reader for the FindConfigTombstoneWarnThreshold structure. +type FindConfigTombstoneWarnThresholdReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigTombstoneWarnThresholdReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigTombstoneWarnThresholdOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigTombstoneWarnThresholdDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigTombstoneWarnThresholdOK creates a FindConfigTombstoneWarnThresholdOK with default headers values +func NewFindConfigTombstoneWarnThresholdOK() *FindConfigTombstoneWarnThresholdOK { + return &FindConfigTombstoneWarnThresholdOK{} +} + +/* +FindConfigTombstoneWarnThresholdOK handles this case with default header values. + +Config value +*/ +type FindConfigTombstoneWarnThresholdOK struct { + Payload int64 +} + +func (o *FindConfigTombstoneWarnThresholdOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigTombstoneWarnThresholdOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigTombstoneWarnThresholdDefault creates a FindConfigTombstoneWarnThresholdDefault with default headers values +func NewFindConfigTombstoneWarnThresholdDefault(code int) *FindConfigTombstoneWarnThresholdDefault { + return &FindConfigTombstoneWarnThresholdDefault{ + _statusCode: code, + } +} + +/* +FindConfigTombstoneWarnThresholdDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigTombstoneWarnThresholdDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config tombstone warn threshold default response +func (o *FindConfigTombstoneWarnThresholdDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigTombstoneWarnThresholdDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigTombstoneWarnThresholdDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigTombstoneWarnThresholdDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_interval_in_kb_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_interval_in_kb_parameters.go new file mode 100644 index 00000000000..b38bce775a0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_interval_in_kb_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigTrickleFsyncIntervalInKbParams creates a new FindConfigTrickleFsyncIntervalInKbParams object +// with the default values initialized. +func NewFindConfigTrickleFsyncIntervalInKbParams() *FindConfigTrickleFsyncIntervalInKbParams { + + return &FindConfigTrickleFsyncIntervalInKbParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigTrickleFsyncIntervalInKbParamsWithTimeout creates a new FindConfigTrickleFsyncIntervalInKbParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigTrickleFsyncIntervalInKbParamsWithTimeout(timeout time.Duration) *FindConfigTrickleFsyncIntervalInKbParams { + + return &FindConfigTrickleFsyncIntervalInKbParams{ + + timeout: timeout, + } +} + +// NewFindConfigTrickleFsyncIntervalInKbParamsWithContext creates a new FindConfigTrickleFsyncIntervalInKbParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigTrickleFsyncIntervalInKbParamsWithContext(ctx context.Context) *FindConfigTrickleFsyncIntervalInKbParams { + + return &FindConfigTrickleFsyncIntervalInKbParams{ + + Context: ctx, + } +} + +// NewFindConfigTrickleFsyncIntervalInKbParamsWithHTTPClient creates a new FindConfigTrickleFsyncIntervalInKbParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigTrickleFsyncIntervalInKbParamsWithHTTPClient(client *http.Client) *FindConfigTrickleFsyncIntervalInKbParams { + + return &FindConfigTrickleFsyncIntervalInKbParams{ + HTTPClient: client, + } +} + +/* +FindConfigTrickleFsyncIntervalInKbParams contains all the parameters to send to the API endpoint +for the find config trickle fsync interval in kb operation typically these are written to a http.Request +*/ +type FindConfigTrickleFsyncIntervalInKbParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config trickle fsync interval in kb params +func (o *FindConfigTrickleFsyncIntervalInKbParams) WithTimeout(timeout time.Duration) *FindConfigTrickleFsyncIntervalInKbParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config trickle fsync interval in kb params +func (o *FindConfigTrickleFsyncIntervalInKbParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config trickle fsync interval in kb params +func (o *FindConfigTrickleFsyncIntervalInKbParams) WithContext(ctx context.Context) *FindConfigTrickleFsyncIntervalInKbParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config trickle fsync interval in kb params +func (o *FindConfigTrickleFsyncIntervalInKbParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config trickle fsync interval in kb params +func (o *FindConfigTrickleFsyncIntervalInKbParams) WithHTTPClient(client *http.Client) *FindConfigTrickleFsyncIntervalInKbParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config trickle fsync interval in kb params +func (o *FindConfigTrickleFsyncIntervalInKbParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigTrickleFsyncIntervalInKbParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_interval_in_kb_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_interval_in_kb_responses.go new file mode 100644 index 00000000000..d4e3db63363 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_interval_in_kb_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigTrickleFsyncIntervalInKbReader is a Reader for the FindConfigTrickleFsyncIntervalInKb structure. +type FindConfigTrickleFsyncIntervalInKbReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigTrickleFsyncIntervalInKbReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigTrickleFsyncIntervalInKbOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigTrickleFsyncIntervalInKbDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigTrickleFsyncIntervalInKbOK creates a FindConfigTrickleFsyncIntervalInKbOK with default headers values +func NewFindConfigTrickleFsyncIntervalInKbOK() *FindConfigTrickleFsyncIntervalInKbOK { + return &FindConfigTrickleFsyncIntervalInKbOK{} +} + +/* +FindConfigTrickleFsyncIntervalInKbOK handles this case with default header values. + +Config value +*/ +type FindConfigTrickleFsyncIntervalInKbOK struct { + Payload int64 +} + +func (o *FindConfigTrickleFsyncIntervalInKbOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigTrickleFsyncIntervalInKbOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigTrickleFsyncIntervalInKbDefault creates a FindConfigTrickleFsyncIntervalInKbDefault with default headers values +func NewFindConfigTrickleFsyncIntervalInKbDefault(code int) *FindConfigTrickleFsyncIntervalInKbDefault { + return &FindConfigTrickleFsyncIntervalInKbDefault{ + _statusCode: code, + } +} + +/* +FindConfigTrickleFsyncIntervalInKbDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigTrickleFsyncIntervalInKbDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config trickle fsync interval in kb default response +func (o *FindConfigTrickleFsyncIntervalInKbDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigTrickleFsyncIntervalInKbDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigTrickleFsyncIntervalInKbDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigTrickleFsyncIntervalInKbDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_parameters.go new file mode 100644 index 00000000000..c26d03a583e --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigTrickleFsyncParams creates a new FindConfigTrickleFsyncParams object +// with the default values initialized. +func NewFindConfigTrickleFsyncParams() *FindConfigTrickleFsyncParams { + + return &FindConfigTrickleFsyncParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigTrickleFsyncParamsWithTimeout creates a new FindConfigTrickleFsyncParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigTrickleFsyncParamsWithTimeout(timeout time.Duration) *FindConfigTrickleFsyncParams { + + return &FindConfigTrickleFsyncParams{ + + timeout: timeout, + } +} + +// NewFindConfigTrickleFsyncParamsWithContext creates a new FindConfigTrickleFsyncParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigTrickleFsyncParamsWithContext(ctx context.Context) *FindConfigTrickleFsyncParams { + + return &FindConfigTrickleFsyncParams{ + + Context: ctx, + } +} + +// NewFindConfigTrickleFsyncParamsWithHTTPClient creates a new FindConfigTrickleFsyncParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigTrickleFsyncParamsWithHTTPClient(client *http.Client) *FindConfigTrickleFsyncParams { + + return &FindConfigTrickleFsyncParams{ + HTTPClient: client, + } +} + +/* +FindConfigTrickleFsyncParams contains all the parameters to send to the API endpoint +for the find config trickle fsync operation typically these are written to a http.Request +*/ +type FindConfigTrickleFsyncParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config trickle fsync params +func (o *FindConfigTrickleFsyncParams) WithTimeout(timeout time.Duration) *FindConfigTrickleFsyncParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config trickle fsync params +func (o *FindConfigTrickleFsyncParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config trickle fsync params +func (o *FindConfigTrickleFsyncParams) WithContext(ctx context.Context) *FindConfigTrickleFsyncParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config trickle fsync params +func (o *FindConfigTrickleFsyncParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config trickle fsync params +func (o *FindConfigTrickleFsyncParams) WithHTTPClient(client *http.Client) *FindConfigTrickleFsyncParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config trickle fsync params +func (o *FindConfigTrickleFsyncParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigTrickleFsyncParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_responses.go new file mode 100644 index 00000000000..a8d777f30c0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_trickle_fsync_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigTrickleFsyncReader is a Reader for the FindConfigTrickleFsync structure. +type FindConfigTrickleFsyncReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigTrickleFsyncReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigTrickleFsyncOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigTrickleFsyncDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigTrickleFsyncOK creates a FindConfigTrickleFsyncOK with default headers values +func NewFindConfigTrickleFsyncOK() *FindConfigTrickleFsyncOK { + return &FindConfigTrickleFsyncOK{} +} + +/* +FindConfigTrickleFsyncOK handles this case with default header values. + +Config value +*/ +type FindConfigTrickleFsyncOK struct { + Payload bool +} + +func (o *FindConfigTrickleFsyncOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigTrickleFsyncOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigTrickleFsyncDefault creates a FindConfigTrickleFsyncDefault with default headers values +func NewFindConfigTrickleFsyncDefault(code int) *FindConfigTrickleFsyncDefault { + return &FindConfigTrickleFsyncDefault{ + _statusCode: code, + } +} + +/* +FindConfigTrickleFsyncDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigTrickleFsyncDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config trickle fsync default response +func (o *FindConfigTrickleFsyncDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigTrickleFsyncDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigTrickleFsyncDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigTrickleFsyncDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_truncate_request_timeout_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_truncate_request_timeout_in_ms_parameters.go new file mode 100644 index 00000000000..a490f23a3c0 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_truncate_request_timeout_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigTruncateRequestTimeoutInMsParams creates a new FindConfigTruncateRequestTimeoutInMsParams object +// with the default values initialized. +func NewFindConfigTruncateRequestTimeoutInMsParams() *FindConfigTruncateRequestTimeoutInMsParams { + + return &FindConfigTruncateRequestTimeoutInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigTruncateRequestTimeoutInMsParamsWithTimeout creates a new FindConfigTruncateRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigTruncateRequestTimeoutInMsParamsWithTimeout(timeout time.Duration) *FindConfigTruncateRequestTimeoutInMsParams { + + return &FindConfigTruncateRequestTimeoutInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigTruncateRequestTimeoutInMsParamsWithContext creates a new FindConfigTruncateRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigTruncateRequestTimeoutInMsParamsWithContext(ctx context.Context) *FindConfigTruncateRequestTimeoutInMsParams { + + return &FindConfigTruncateRequestTimeoutInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigTruncateRequestTimeoutInMsParamsWithHTTPClient creates a new FindConfigTruncateRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigTruncateRequestTimeoutInMsParamsWithHTTPClient(client *http.Client) *FindConfigTruncateRequestTimeoutInMsParams { + + return &FindConfigTruncateRequestTimeoutInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigTruncateRequestTimeoutInMsParams contains all the parameters to send to the API endpoint +for the find config truncate request timeout in ms operation typically these are written to a http.Request +*/ +type FindConfigTruncateRequestTimeoutInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config truncate request timeout in ms params +func (o *FindConfigTruncateRequestTimeoutInMsParams) WithTimeout(timeout time.Duration) *FindConfigTruncateRequestTimeoutInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config truncate request timeout in ms params +func (o *FindConfigTruncateRequestTimeoutInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config truncate request timeout in ms params +func (o *FindConfigTruncateRequestTimeoutInMsParams) WithContext(ctx context.Context) *FindConfigTruncateRequestTimeoutInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config truncate request timeout in ms params +func (o *FindConfigTruncateRequestTimeoutInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config truncate request timeout in ms params +func (o *FindConfigTruncateRequestTimeoutInMsParams) WithHTTPClient(client *http.Client) *FindConfigTruncateRequestTimeoutInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config truncate request timeout in ms params +func (o *FindConfigTruncateRequestTimeoutInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigTruncateRequestTimeoutInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_truncate_request_timeout_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_truncate_request_timeout_in_ms_responses.go new file mode 100644 index 00000000000..b26a7938e69 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_truncate_request_timeout_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigTruncateRequestTimeoutInMsReader is a Reader for the FindConfigTruncateRequestTimeoutInMs structure. +type FindConfigTruncateRequestTimeoutInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigTruncateRequestTimeoutInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigTruncateRequestTimeoutInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigTruncateRequestTimeoutInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigTruncateRequestTimeoutInMsOK creates a FindConfigTruncateRequestTimeoutInMsOK with default headers values +func NewFindConfigTruncateRequestTimeoutInMsOK() *FindConfigTruncateRequestTimeoutInMsOK { + return &FindConfigTruncateRequestTimeoutInMsOK{} +} + +/* +FindConfigTruncateRequestTimeoutInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigTruncateRequestTimeoutInMsOK struct { + Payload int64 +} + +func (o *FindConfigTruncateRequestTimeoutInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigTruncateRequestTimeoutInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigTruncateRequestTimeoutInMsDefault creates a FindConfigTruncateRequestTimeoutInMsDefault with default headers values +func NewFindConfigTruncateRequestTimeoutInMsDefault(code int) *FindConfigTruncateRequestTimeoutInMsDefault { + return &FindConfigTruncateRequestTimeoutInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigTruncateRequestTimeoutInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigTruncateRequestTimeoutInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config truncate request timeout in ms default response +func (o *FindConfigTruncateRequestTimeoutInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigTruncateRequestTimeoutInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigTruncateRequestTimeoutInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigTruncateRequestTimeoutInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_uuid_sstable_identifiers_enabled_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_uuid_sstable_identifiers_enabled_parameters.go new file mode 100644 index 00000000000..f1eb2172a1b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_uuid_sstable_identifiers_enabled_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigUUIDSstableIdentifiersEnabledParams creates a new FindConfigUUIDSstableIdentifiersEnabledParams object +// with the default values initialized. +func NewFindConfigUUIDSstableIdentifiersEnabledParams() *FindConfigUUIDSstableIdentifiersEnabledParams { + + return &FindConfigUUIDSstableIdentifiersEnabledParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigUUIDSstableIdentifiersEnabledParamsWithTimeout creates a new FindConfigUUIDSstableIdentifiersEnabledParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigUUIDSstableIdentifiersEnabledParamsWithTimeout(timeout time.Duration) *FindConfigUUIDSstableIdentifiersEnabledParams { + + return &FindConfigUUIDSstableIdentifiersEnabledParams{ + + timeout: timeout, + } +} + +// NewFindConfigUUIDSstableIdentifiersEnabledParamsWithContext creates a new FindConfigUUIDSstableIdentifiersEnabledParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigUUIDSstableIdentifiersEnabledParamsWithContext(ctx context.Context) *FindConfigUUIDSstableIdentifiersEnabledParams { + + return &FindConfigUUIDSstableIdentifiersEnabledParams{ + + Context: ctx, + } +} + +// NewFindConfigUUIDSstableIdentifiersEnabledParamsWithHTTPClient creates a new FindConfigUUIDSstableIdentifiersEnabledParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigUUIDSstableIdentifiersEnabledParamsWithHTTPClient(client *http.Client) *FindConfigUUIDSstableIdentifiersEnabledParams { + + return &FindConfigUUIDSstableIdentifiersEnabledParams{ + HTTPClient: client, + } +} + +/* +FindConfigUUIDSstableIdentifiersEnabledParams contains all the parameters to send to the API endpoint +for the find config uuid sstable identifiers enabled operation typically these are written to a http.Request +*/ +type FindConfigUUIDSstableIdentifiersEnabledParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config uuid sstable identifiers enabled params +func (o *FindConfigUUIDSstableIdentifiersEnabledParams) WithTimeout(timeout time.Duration) *FindConfigUUIDSstableIdentifiersEnabledParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config uuid sstable identifiers enabled params +func (o *FindConfigUUIDSstableIdentifiersEnabledParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config uuid sstable identifiers enabled params +func (o *FindConfigUUIDSstableIdentifiersEnabledParams) WithContext(ctx context.Context) *FindConfigUUIDSstableIdentifiersEnabledParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config uuid sstable identifiers enabled params +func (o *FindConfigUUIDSstableIdentifiersEnabledParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config uuid sstable identifiers enabled params +func (o *FindConfigUUIDSstableIdentifiersEnabledParams) WithHTTPClient(client *http.Client) *FindConfigUUIDSstableIdentifiersEnabledParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config uuid sstable identifiers enabled params +func (o *FindConfigUUIDSstableIdentifiersEnabledParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigUUIDSstableIdentifiersEnabledParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_uuid_sstable_identifiers_enabled_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_uuid_sstable_identifiers_enabled_responses.go new file mode 100644 index 00000000000..86ddd1a202b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_uuid_sstable_identifiers_enabled_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigUUIDSstableIdentifiersEnabledReader is a Reader for the FindConfigUUIDSstableIdentifiersEnabled structure. +type FindConfigUUIDSstableIdentifiersEnabledReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigUUIDSstableIdentifiersEnabledReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigUUIDSstableIdentifiersEnabledOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigUUIDSstableIdentifiersEnabledDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigUUIDSstableIdentifiersEnabledOK creates a FindConfigUUIDSstableIdentifiersEnabledOK with default headers values +func NewFindConfigUUIDSstableIdentifiersEnabledOK() *FindConfigUUIDSstableIdentifiersEnabledOK { + return &FindConfigUUIDSstableIdentifiersEnabledOK{} +} + +/* +FindConfigUUIDSstableIdentifiersEnabledOK handles this case with default header values. + +Config value +*/ +type FindConfigUUIDSstableIdentifiersEnabledOK struct { + Payload bool +} + +func (o *FindConfigUUIDSstableIdentifiersEnabledOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigUUIDSstableIdentifiersEnabledOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigUUIDSstableIdentifiersEnabledDefault creates a FindConfigUUIDSstableIdentifiersEnabledDefault with default headers values +func NewFindConfigUUIDSstableIdentifiersEnabledDefault(code int) *FindConfigUUIDSstableIdentifiersEnabledDefault { + return &FindConfigUUIDSstableIdentifiersEnabledDefault{ + _statusCode: code, + } +} + +/* +FindConfigUUIDSstableIdentifiersEnabledDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigUUIDSstableIdentifiersEnabledDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config uuid sstable identifiers enabled default response +func (o *FindConfigUUIDSstableIdentifiersEnabledDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigUUIDSstableIdentifiersEnabledDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigUUIDSstableIdentifiersEnabledDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigUUIDSstableIdentifiersEnabledDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_building_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_building_parameters.go new file mode 100644 index 00000000000..d07e30d3830 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_building_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigViewBuildingParams creates a new FindConfigViewBuildingParams object +// with the default values initialized. +func NewFindConfigViewBuildingParams() *FindConfigViewBuildingParams { + + return &FindConfigViewBuildingParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigViewBuildingParamsWithTimeout creates a new FindConfigViewBuildingParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigViewBuildingParamsWithTimeout(timeout time.Duration) *FindConfigViewBuildingParams { + + return &FindConfigViewBuildingParams{ + + timeout: timeout, + } +} + +// NewFindConfigViewBuildingParamsWithContext creates a new FindConfigViewBuildingParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigViewBuildingParamsWithContext(ctx context.Context) *FindConfigViewBuildingParams { + + return &FindConfigViewBuildingParams{ + + Context: ctx, + } +} + +// NewFindConfigViewBuildingParamsWithHTTPClient creates a new FindConfigViewBuildingParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigViewBuildingParamsWithHTTPClient(client *http.Client) *FindConfigViewBuildingParams { + + return &FindConfigViewBuildingParams{ + HTTPClient: client, + } +} + +/* +FindConfigViewBuildingParams contains all the parameters to send to the API endpoint +for the find config view building operation typically these are written to a http.Request +*/ +type FindConfigViewBuildingParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config view building params +func (o *FindConfigViewBuildingParams) WithTimeout(timeout time.Duration) *FindConfigViewBuildingParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config view building params +func (o *FindConfigViewBuildingParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config view building params +func (o *FindConfigViewBuildingParams) WithContext(ctx context.Context) *FindConfigViewBuildingParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config view building params +func (o *FindConfigViewBuildingParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config view building params +func (o *FindConfigViewBuildingParams) WithHTTPClient(client *http.Client) *FindConfigViewBuildingParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config view building params +func (o *FindConfigViewBuildingParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigViewBuildingParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_building_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_building_responses.go new file mode 100644 index 00000000000..d5959f3dcb4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_building_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigViewBuildingReader is a Reader for the FindConfigViewBuilding structure. +type FindConfigViewBuildingReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigViewBuildingReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigViewBuildingOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigViewBuildingDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigViewBuildingOK creates a FindConfigViewBuildingOK with default headers values +func NewFindConfigViewBuildingOK() *FindConfigViewBuildingOK { + return &FindConfigViewBuildingOK{} +} + +/* +FindConfigViewBuildingOK handles this case with default header values. + +Config value +*/ +type FindConfigViewBuildingOK struct { + Payload bool +} + +func (o *FindConfigViewBuildingOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigViewBuildingOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigViewBuildingDefault creates a FindConfigViewBuildingDefault with default headers values +func NewFindConfigViewBuildingDefault(code int) *FindConfigViewBuildingDefault { + return &FindConfigViewBuildingDefault{ + _statusCode: code, + } +} + +/* +FindConfigViewBuildingDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigViewBuildingDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config view building default response +func (o *FindConfigViewBuildingDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigViewBuildingDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigViewBuildingDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigViewBuildingDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_hints_directory_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_hints_directory_parameters.go new file mode 100644 index 00000000000..2c93737f8e2 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_hints_directory_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigViewHintsDirectoryParams creates a new FindConfigViewHintsDirectoryParams object +// with the default values initialized. +func NewFindConfigViewHintsDirectoryParams() *FindConfigViewHintsDirectoryParams { + + return &FindConfigViewHintsDirectoryParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigViewHintsDirectoryParamsWithTimeout creates a new FindConfigViewHintsDirectoryParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigViewHintsDirectoryParamsWithTimeout(timeout time.Duration) *FindConfigViewHintsDirectoryParams { + + return &FindConfigViewHintsDirectoryParams{ + + timeout: timeout, + } +} + +// NewFindConfigViewHintsDirectoryParamsWithContext creates a new FindConfigViewHintsDirectoryParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigViewHintsDirectoryParamsWithContext(ctx context.Context) *FindConfigViewHintsDirectoryParams { + + return &FindConfigViewHintsDirectoryParams{ + + Context: ctx, + } +} + +// NewFindConfigViewHintsDirectoryParamsWithHTTPClient creates a new FindConfigViewHintsDirectoryParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigViewHintsDirectoryParamsWithHTTPClient(client *http.Client) *FindConfigViewHintsDirectoryParams { + + return &FindConfigViewHintsDirectoryParams{ + HTTPClient: client, + } +} + +/* +FindConfigViewHintsDirectoryParams contains all the parameters to send to the API endpoint +for the find config view hints directory operation typically these are written to a http.Request +*/ +type FindConfigViewHintsDirectoryParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config view hints directory params +func (o *FindConfigViewHintsDirectoryParams) WithTimeout(timeout time.Duration) *FindConfigViewHintsDirectoryParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config view hints directory params +func (o *FindConfigViewHintsDirectoryParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config view hints directory params +func (o *FindConfigViewHintsDirectoryParams) WithContext(ctx context.Context) *FindConfigViewHintsDirectoryParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config view hints directory params +func (o *FindConfigViewHintsDirectoryParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config view hints directory params +func (o *FindConfigViewHintsDirectoryParams) WithHTTPClient(client *http.Client) *FindConfigViewHintsDirectoryParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config view hints directory params +func (o *FindConfigViewHintsDirectoryParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigViewHintsDirectoryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_hints_directory_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_hints_directory_responses.go new file mode 100644 index 00000000000..1251a591d96 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_view_hints_directory_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigViewHintsDirectoryReader is a Reader for the FindConfigViewHintsDirectory structure. +type FindConfigViewHintsDirectoryReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigViewHintsDirectoryReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigViewHintsDirectoryOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigViewHintsDirectoryDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigViewHintsDirectoryOK creates a FindConfigViewHintsDirectoryOK with default headers values +func NewFindConfigViewHintsDirectoryOK() *FindConfigViewHintsDirectoryOK { + return &FindConfigViewHintsDirectoryOK{} +} + +/* +FindConfigViewHintsDirectoryOK handles this case with default header values. + +Config value +*/ +type FindConfigViewHintsDirectoryOK struct { + Payload string +} + +func (o *FindConfigViewHintsDirectoryOK) GetPayload() string { + return o.Payload +} + +func (o *FindConfigViewHintsDirectoryOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigViewHintsDirectoryDefault creates a FindConfigViewHintsDirectoryDefault with default headers values +func NewFindConfigViewHintsDirectoryDefault(code int) *FindConfigViewHintsDirectoryDefault { + return &FindConfigViewHintsDirectoryDefault{ + _statusCode: code, + } +} + +/* +FindConfigViewHintsDirectoryDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigViewHintsDirectoryDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config view hints directory default response +func (o *FindConfigViewHintsDirectoryDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigViewHintsDirectoryDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigViewHintsDirectoryDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigViewHintsDirectoryDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_virtual_dirty_soft_limit_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_virtual_dirty_soft_limit_parameters.go new file mode 100644 index 00000000000..e2e87331838 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_virtual_dirty_soft_limit_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigVirtualDirtySoftLimitParams creates a new FindConfigVirtualDirtySoftLimitParams object +// with the default values initialized. +func NewFindConfigVirtualDirtySoftLimitParams() *FindConfigVirtualDirtySoftLimitParams { + + return &FindConfigVirtualDirtySoftLimitParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigVirtualDirtySoftLimitParamsWithTimeout creates a new FindConfigVirtualDirtySoftLimitParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigVirtualDirtySoftLimitParamsWithTimeout(timeout time.Duration) *FindConfigVirtualDirtySoftLimitParams { + + return &FindConfigVirtualDirtySoftLimitParams{ + + timeout: timeout, + } +} + +// NewFindConfigVirtualDirtySoftLimitParamsWithContext creates a new FindConfigVirtualDirtySoftLimitParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigVirtualDirtySoftLimitParamsWithContext(ctx context.Context) *FindConfigVirtualDirtySoftLimitParams { + + return &FindConfigVirtualDirtySoftLimitParams{ + + Context: ctx, + } +} + +// NewFindConfigVirtualDirtySoftLimitParamsWithHTTPClient creates a new FindConfigVirtualDirtySoftLimitParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigVirtualDirtySoftLimitParamsWithHTTPClient(client *http.Client) *FindConfigVirtualDirtySoftLimitParams { + + return &FindConfigVirtualDirtySoftLimitParams{ + HTTPClient: client, + } +} + +/* +FindConfigVirtualDirtySoftLimitParams contains all the parameters to send to the API endpoint +for the find config virtual dirty soft limit operation typically these are written to a http.Request +*/ +type FindConfigVirtualDirtySoftLimitParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config virtual dirty soft limit params +func (o *FindConfigVirtualDirtySoftLimitParams) WithTimeout(timeout time.Duration) *FindConfigVirtualDirtySoftLimitParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config virtual dirty soft limit params +func (o *FindConfigVirtualDirtySoftLimitParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config virtual dirty soft limit params +func (o *FindConfigVirtualDirtySoftLimitParams) WithContext(ctx context.Context) *FindConfigVirtualDirtySoftLimitParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config virtual dirty soft limit params +func (o *FindConfigVirtualDirtySoftLimitParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config virtual dirty soft limit params +func (o *FindConfigVirtualDirtySoftLimitParams) WithHTTPClient(client *http.Client) *FindConfigVirtualDirtySoftLimitParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config virtual dirty soft limit params +func (o *FindConfigVirtualDirtySoftLimitParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigVirtualDirtySoftLimitParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_virtual_dirty_soft_limit_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_virtual_dirty_soft_limit_responses.go new file mode 100644 index 00000000000..e72e94337ed --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_virtual_dirty_soft_limit_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigVirtualDirtySoftLimitReader is a Reader for the FindConfigVirtualDirtySoftLimit structure. +type FindConfigVirtualDirtySoftLimitReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigVirtualDirtySoftLimitReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigVirtualDirtySoftLimitOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigVirtualDirtySoftLimitDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigVirtualDirtySoftLimitOK creates a FindConfigVirtualDirtySoftLimitOK with default headers values +func NewFindConfigVirtualDirtySoftLimitOK() *FindConfigVirtualDirtySoftLimitOK { + return &FindConfigVirtualDirtySoftLimitOK{} +} + +/* +FindConfigVirtualDirtySoftLimitOK handles this case with default header values. + +Config value +*/ +type FindConfigVirtualDirtySoftLimitOK struct { + Payload float64 +} + +func (o *FindConfigVirtualDirtySoftLimitOK) GetPayload() float64 { + return o.Payload +} + +func (o *FindConfigVirtualDirtySoftLimitOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigVirtualDirtySoftLimitDefault creates a FindConfigVirtualDirtySoftLimitDefault with default headers values +func NewFindConfigVirtualDirtySoftLimitDefault(code int) *FindConfigVirtualDirtySoftLimitDefault { + return &FindConfigVirtualDirtySoftLimitDefault{ + _statusCode: code, + } +} + +/* +FindConfigVirtualDirtySoftLimitDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigVirtualDirtySoftLimitDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config virtual dirty soft limit default response +func (o *FindConfigVirtualDirtySoftLimitDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigVirtualDirtySoftLimitDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigVirtualDirtySoftLimitDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigVirtualDirtySoftLimitDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_volatile_system_keyspace_for_testing_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_volatile_system_keyspace_for_testing_parameters.go new file mode 100644 index 00000000000..e114f669d42 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_volatile_system_keyspace_for_testing_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigVolatileSystemKeyspaceForTestingParams creates a new FindConfigVolatileSystemKeyspaceForTestingParams object +// with the default values initialized. +func NewFindConfigVolatileSystemKeyspaceForTestingParams() *FindConfigVolatileSystemKeyspaceForTestingParams { + + return &FindConfigVolatileSystemKeyspaceForTestingParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigVolatileSystemKeyspaceForTestingParamsWithTimeout creates a new FindConfigVolatileSystemKeyspaceForTestingParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigVolatileSystemKeyspaceForTestingParamsWithTimeout(timeout time.Duration) *FindConfigVolatileSystemKeyspaceForTestingParams { + + return &FindConfigVolatileSystemKeyspaceForTestingParams{ + + timeout: timeout, + } +} + +// NewFindConfigVolatileSystemKeyspaceForTestingParamsWithContext creates a new FindConfigVolatileSystemKeyspaceForTestingParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigVolatileSystemKeyspaceForTestingParamsWithContext(ctx context.Context) *FindConfigVolatileSystemKeyspaceForTestingParams { + + return &FindConfigVolatileSystemKeyspaceForTestingParams{ + + Context: ctx, + } +} + +// NewFindConfigVolatileSystemKeyspaceForTestingParamsWithHTTPClient creates a new FindConfigVolatileSystemKeyspaceForTestingParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigVolatileSystemKeyspaceForTestingParamsWithHTTPClient(client *http.Client) *FindConfigVolatileSystemKeyspaceForTestingParams { + + return &FindConfigVolatileSystemKeyspaceForTestingParams{ + HTTPClient: client, + } +} + +/* +FindConfigVolatileSystemKeyspaceForTestingParams contains all the parameters to send to the API endpoint +for the find config volatile system keyspace for testing operation typically these are written to a http.Request +*/ +type FindConfigVolatileSystemKeyspaceForTestingParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config volatile system keyspace for testing params +func (o *FindConfigVolatileSystemKeyspaceForTestingParams) WithTimeout(timeout time.Duration) *FindConfigVolatileSystemKeyspaceForTestingParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config volatile system keyspace for testing params +func (o *FindConfigVolatileSystemKeyspaceForTestingParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config volatile system keyspace for testing params +func (o *FindConfigVolatileSystemKeyspaceForTestingParams) WithContext(ctx context.Context) *FindConfigVolatileSystemKeyspaceForTestingParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config volatile system keyspace for testing params +func (o *FindConfigVolatileSystemKeyspaceForTestingParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config volatile system keyspace for testing params +func (o *FindConfigVolatileSystemKeyspaceForTestingParams) WithHTTPClient(client *http.Client) *FindConfigVolatileSystemKeyspaceForTestingParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config volatile system keyspace for testing params +func (o *FindConfigVolatileSystemKeyspaceForTestingParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigVolatileSystemKeyspaceForTestingParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_volatile_system_keyspace_for_testing_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_volatile_system_keyspace_for_testing_responses.go new file mode 100644 index 00000000000..7a6f2ed540b --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_volatile_system_keyspace_for_testing_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigVolatileSystemKeyspaceForTestingReader is a Reader for the FindConfigVolatileSystemKeyspaceForTesting structure. +type FindConfigVolatileSystemKeyspaceForTestingReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigVolatileSystemKeyspaceForTestingReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigVolatileSystemKeyspaceForTestingOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigVolatileSystemKeyspaceForTestingDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigVolatileSystemKeyspaceForTestingOK creates a FindConfigVolatileSystemKeyspaceForTestingOK with default headers values +func NewFindConfigVolatileSystemKeyspaceForTestingOK() *FindConfigVolatileSystemKeyspaceForTestingOK { + return &FindConfigVolatileSystemKeyspaceForTestingOK{} +} + +/* +FindConfigVolatileSystemKeyspaceForTestingOK handles this case with default header values. + +Config value +*/ +type FindConfigVolatileSystemKeyspaceForTestingOK struct { + Payload bool +} + +func (o *FindConfigVolatileSystemKeyspaceForTestingOK) GetPayload() bool { + return o.Payload +} + +func (o *FindConfigVolatileSystemKeyspaceForTestingOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigVolatileSystemKeyspaceForTestingDefault creates a FindConfigVolatileSystemKeyspaceForTestingDefault with default headers values +func NewFindConfigVolatileSystemKeyspaceForTestingDefault(code int) *FindConfigVolatileSystemKeyspaceForTestingDefault { + return &FindConfigVolatileSystemKeyspaceForTestingDefault{ + _statusCode: code, + } +} + +/* +FindConfigVolatileSystemKeyspaceForTestingDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigVolatileSystemKeyspaceForTestingDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config volatile system keyspace for testing default response +func (o *FindConfigVolatileSystemKeyspaceForTestingDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigVolatileSystemKeyspaceForTestingDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigVolatileSystemKeyspaceForTestingDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigVolatileSystemKeyspaceForTestingDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_write_request_timeout_in_ms_parameters.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_write_request_timeout_in_ms_parameters.go new file mode 100644 index 00000000000..9eed34ac393 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_write_request_timeout_in_ms_parameters.go @@ -0,0 +1,113 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewFindConfigWriteRequestTimeoutInMsParams creates a new FindConfigWriteRequestTimeoutInMsParams object +// with the default values initialized. +func NewFindConfigWriteRequestTimeoutInMsParams() *FindConfigWriteRequestTimeoutInMsParams { + + return &FindConfigWriteRequestTimeoutInMsParams{ + + timeout: cr.DefaultTimeout, + } +} + +// NewFindConfigWriteRequestTimeoutInMsParamsWithTimeout creates a new FindConfigWriteRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a timeout on a request +func NewFindConfigWriteRequestTimeoutInMsParamsWithTimeout(timeout time.Duration) *FindConfigWriteRequestTimeoutInMsParams { + + return &FindConfigWriteRequestTimeoutInMsParams{ + + timeout: timeout, + } +} + +// NewFindConfigWriteRequestTimeoutInMsParamsWithContext creates a new FindConfigWriteRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a context for a request +func NewFindConfigWriteRequestTimeoutInMsParamsWithContext(ctx context.Context) *FindConfigWriteRequestTimeoutInMsParams { + + return &FindConfigWriteRequestTimeoutInMsParams{ + + Context: ctx, + } +} + +// NewFindConfigWriteRequestTimeoutInMsParamsWithHTTPClient creates a new FindConfigWriteRequestTimeoutInMsParams object +// with the default values initialized, and the ability to set a custom HTTPClient for a request +func NewFindConfigWriteRequestTimeoutInMsParamsWithHTTPClient(client *http.Client) *FindConfigWriteRequestTimeoutInMsParams { + + return &FindConfigWriteRequestTimeoutInMsParams{ + HTTPClient: client, + } +} + +/* +FindConfigWriteRequestTimeoutInMsParams contains all the parameters to send to the API endpoint +for the find config write request timeout in ms operation typically these are written to a http.Request +*/ +type FindConfigWriteRequestTimeoutInMsParams struct { + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithTimeout adds the timeout to the find config write request timeout in ms params +func (o *FindConfigWriteRequestTimeoutInMsParams) WithTimeout(timeout time.Duration) *FindConfigWriteRequestTimeoutInMsParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the find config write request timeout in ms params +func (o *FindConfigWriteRequestTimeoutInMsParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the find config write request timeout in ms params +func (o *FindConfigWriteRequestTimeoutInMsParams) WithContext(ctx context.Context) *FindConfigWriteRequestTimeoutInMsParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the find config write request timeout in ms params +func (o *FindConfigWriteRequestTimeoutInMsParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the find config write request timeout in ms params +func (o *FindConfigWriteRequestTimeoutInMsParams) WithHTTPClient(client *http.Client) *FindConfigWriteRequestTimeoutInMsParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the find config write request timeout in ms params +func (o *FindConfigWriteRequestTimeoutInMsParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WriteToRequest writes these params to a swagger request +func (o *FindConfigWriteRequestTimeoutInMsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_write_request_timeout_in_ms_responses.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_write_request_timeout_in_ms_responses.go new file mode 100644 index 00000000000..72dcdaac2c4 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config/find_config_write_request_timeout_in_ms_responses.go @@ -0,0 +1,114 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package config + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + "strings" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models" +) + +// FindConfigWriteRequestTimeoutInMsReader is a Reader for the FindConfigWriteRequestTimeoutInMs structure. +type FindConfigWriteRequestTimeoutInMsReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *FindConfigWriteRequestTimeoutInMsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewFindConfigWriteRequestTimeoutInMsOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewFindConfigWriteRequestTimeoutInMsDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewFindConfigWriteRequestTimeoutInMsOK creates a FindConfigWriteRequestTimeoutInMsOK with default headers values +func NewFindConfigWriteRequestTimeoutInMsOK() *FindConfigWriteRequestTimeoutInMsOK { + return &FindConfigWriteRequestTimeoutInMsOK{} +} + +/* +FindConfigWriteRequestTimeoutInMsOK handles this case with default header values. + +Config value +*/ +type FindConfigWriteRequestTimeoutInMsOK struct { + Payload int64 +} + +func (o *FindConfigWriteRequestTimeoutInMsOK) GetPayload() int64 { + return o.Payload +} + +func (o *FindConfigWriteRequestTimeoutInMsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewFindConfigWriteRequestTimeoutInMsDefault creates a FindConfigWriteRequestTimeoutInMsDefault with default headers values +func NewFindConfigWriteRequestTimeoutInMsDefault(code int) *FindConfigWriteRequestTimeoutInMsDefault { + return &FindConfigWriteRequestTimeoutInMsDefault{ + _statusCode: code, + } +} + +/* +FindConfigWriteRequestTimeoutInMsDefault handles this case with default header values. + +unexpected error +*/ +type FindConfigWriteRequestTimeoutInMsDefault struct { + _statusCode int + + Payload *models.ErrorModel +} + +// Code gets the status code for the find config write request timeout in ms default response +func (o *FindConfigWriteRequestTimeoutInMsDefault) Code() int { + return o._statusCode +} + +func (o *FindConfigWriteRequestTimeoutInMsDefault) GetPayload() *models.ErrorModel { + return o.Payload +} + +func (o *FindConfigWriteRequestTimeoutInMsDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(models.ErrorModel) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +func (o *FindConfigWriteRequestTimeoutInMsDefault) Error() string { + return fmt.Sprintf("agent [HTTP %d] %s", o._statusCode, strings.TrimRight(o.Payload.Message, ".")) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/scylla_v2_client.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/scylla_v2_client.go new file mode 100644 index 00000000000..54bf9ff84e6 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/scylla_v2_client.go @@ -0,0 +1,112 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package client + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/runtime" + httptransport "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config" +) + +// Default scylla v2 HTTP client. +var Default = NewHTTPClient(nil) + +const ( + // DefaultHost is the default Host + // found in Meta (info) section of spec file + DefaultHost string = "scylla-manager.magic.host" + // DefaultBasePath is the default BasePath + // found in Meta (info) section of spec file + DefaultBasePath string = "/v2" +) + +// DefaultSchemes are the default schemes found in Meta (info) section of spec file +var DefaultSchemes = []string{"http"} + +// NewHTTPClient creates a new scylla v2 HTTP client. +func NewHTTPClient(formats strfmt.Registry) *ScyllaV2 { + return NewHTTPClientWithConfig(formats, nil) +} + +// NewHTTPClientWithConfig creates a new scylla v2 HTTP client, +// using a customizable transport config. +func NewHTTPClientWithConfig(formats strfmt.Registry, cfg *TransportConfig) *ScyllaV2 { + // ensure nullable parameters have default + if cfg == nil { + cfg = DefaultTransportConfig() + } + + // create transport and client + transport := httptransport.New(cfg.Host, cfg.BasePath, cfg.Schemes) + return New(transport, formats) +} + +// New creates a new scylla v2 client +func New(transport runtime.ClientTransport, formats strfmt.Registry) *ScyllaV2 { + // ensure nullable parameters have default + if formats == nil { + formats = strfmt.Default + } + + cli := new(ScyllaV2) + cli.Transport = transport + cli.Config = config.New(transport, formats) + return cli +} + +// DefaultTransportConfig creates a TransportConfig with the +// default settings taken from the meta section of the spec file. +func DefaultTransportConfig() *TransportConfig { + return &TransportConfig{ + Host: DefaultHost, + BasePath: DefaultBasePath, + Schemes: DefaultSchemes, + } +} + +// TransportConfig contains the transport related info, +// found in the meta section of the spec file. +type TransportConfig struct { + Host string + BasePath string + Schemes []string +} + +// WithHost overrides the default host, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithHost(host string) *TransportConfig { + cfg.Host = host + return cfg +} + +// WithBasePath overrides the default basePath, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithBasePath(basePath string) *TransportConfig { + cfg.BasePath = basePath + return cfg +} + +// WithSchemes overrides the default schemes, +// provided by the meta section of the spec file. +func (cfg *TransportConfig) WithSchemes(schemes []string) *TransportConfig { + cfg.Schemes = schemes + return cfg +} + +// ScyllaV2 is a client for scylla v2 +type ScyllaV2 struct { + Config config.ClientService + + Transport runtime.ClientTransport +} + +// SetTransport changes the transport on the client and all its subresources +func (c *ScyllaV2) SetTransport(transport runtime.ClientTransport) { + c.Transport = transport + c.Config.SetTransport(transport) +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models/client_encryption_options.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models/client_encryption_options.go new file mode 100644 index 00000000000..5b6dc3bc8a3 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models/client_encryption_options.go @@ -0,0 +1,52 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ClientEncryptionOptions client encryption options +// +// swagger:model ClientEncryptionOptions +type ClientEncryptionOptions struct { + + // certificate + Certificate string `json:"certificate,omitempty"` + + // enabled + Enabled string `json:"enabled,omitempty"` + + // keyfile + Keyfile string `json:"keyfile,omitempty"` + + // require client auth + RequireClientAuth string `json:"require_client_auth,omitempty"` +} + +// Validate validates this client encryption options +func (m *ClientEncryptionOptions) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ClientEncryptionOptions) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ClientEncryptionOptions) UnmarshalBinary(b []byte) error { + var res ClientEncryptionOptions + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models/error_model.go b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models/error_model.go new file mode 100644 index 00000000000..9887571ce72 --- /dev/null +++ b/vendor/github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models/error_model.go @@ -0,0 +1,46 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package models + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ErrorModel error model +// +// swagger:model ErrorModel +type ErrorModel struct { + + // code + Code int64 `json:"code,omitempty"` + + // message + Message string `json:"message,omitempty"` +} + +// Validate validates this error model +func (m *ErrorModel) Validate(formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ErrorModel) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ErrorModel) UnmarshalBinary(b []byte) error { + var res ErrorModel + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go index 4f7b42488aa..915d5090dde 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -34,8 +34,6 @@ const ( DiffInsert Operation = 1 // DiffEqual item represents an equal diff. DiffEqual Operation = 0 - //IndexSeparator is used to seperate the array indexes in an index string - IndexSeparator = "," ) // Diff represents one diff operation @@ -406,14 +404,11 @@ func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { hydrated := make([]Diff, 0, len(diffs)) for _, aDiff := range diffs { - chars := strings.Split(aDiff.Text, IndexSeparator) - text := make([]string, len(chars)) + runes := []rune(aDiff.Text) + text := make([]string, len(runes)) - for i, r := range chars { - i1, err := strconv.Atoi(r) - if err == nil { - text[i] = lineArray[i1] - } + for i, r := range runes { + text[i] = lineArray[runeToInt(r)] } aDiff.Text = strings.Join(text, "") diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go index 44c43595478..eb727bb5948 100644 --- a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -9,11 +9,16 @@ package diffmatchpatch import ( - "strconv" + "fmt" "strings" "unicode/utf8" ) +const UNICODE_INVALID_RANGE_START = 0xD800 +const UNICODE_INVALID_RANGE_END = 0xDFFF +const UNICODE_INVALID_RANGE_DELTA = UNICODE_INVALID_RANGE_END - UNICODE_INVALID_RANGE_START + 1 +const UNICODE_RANGE_MAX = 0x10FFFF + // unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. // In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. var unescaper = strings.NewReplacer( @@ -93,14 +98,93 @@ func intArrayToString(ns []uint32) string { return "" } - indexSeparator := IndexSeparator[0] - - // Appr. 3 chars per num plus the comma. - b := []byte{} + b := []rune{} for _, n := range ns { - b = strconv.AppendInt(b, int64(n), 10) - b = append(b, indexSeparator) + b = append(b, intToRune(n)) } - b = b[:len(b)-1] return string(b) } + +// These constants define the number of bits representable +// in 1,2,3,4 byte utf8 sequences, respectively. +const ONE_BYTE_BITS = 7 +const TWO_BYTE_BITS = 11 +const THREE_BYTE_BITS = 16 +const FOUR_BYTE_BITS = 21 + +// Helper for getting a sequence of bits from an integer. +func getBits(i uint32, cnt byte, from byte) byte { + return byte((i >> from) & ((1 << cnt) - 1)) +} + +// Converts an integer in the range 0~1112060 into a rune. +// Based on the ranges table in https://en.wikipedia.org/wiki/UTF-8 +func intToRune(i uint32) rune { + if i < (1 << ONE_BYTE_BITS) { + return rune(i) + } + + if i < (1 << TWO_BYTE_BITS) { + r, size := utf8.DecodeRune([]byte{0b11000000 | getBits(i, 5, 6), 0b10000000 | getBits(i, 6, 0)}) + if size != 2 || r == utf8.RuneError { + panic(fmt.Sprintf("Error encoding an int %d with size 2, got rune %v and size %d", size, r, i)) + } + return r + } + + // Last -3 here needed because for some reason 3rd to last codepoint 65533 in this range + // was returning utf8.RuneError during encoding. + if i < ((1 << THREE_BYTE_BITS) - UNICODE_INVALID_RANGE_DELTA - 3) { + if i >= UNICODE_INVALID_RANGE_START { + i += UNICODE_INVALID_RANGE_DELTA + } + + r, size := utf8.DecodeRune([]byte{0b11100000 | getBits(i, 4, 12), 0b10000000 | getBits(i, 6, 6), 0b10000000 | getBits(i, 6, 0)}) + if size != 3 || r == utf8.RuneError { + panic(fmt.Sprintf("Error encoding an int %d with size 3, got rune %v and size %d", size, r, i)) + } + return r + } + + if i < (1<= UNICODE_INVALID_RANGE_END { + return result - UNICODE_INVALID_RANGE_DELTA + } + + return result + } + + if size == 4 { + result := uint32(bytes[0]&0b111)<<18 | uint32(bytes[1]&0b111111)<<12 | uint32(bytes[2]&0b111111)<<6 | uint32(bytes[3]&0b111111) + return result - UNICODE_INVALID_RANGE_DELTA - 3 + } + + panic(fmt.Sprintf("Unexpected state decoding rune=%v size=%d", r, size)) +} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore new file mode 100644 index 00000000000..1fb13abebe7 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.gitignore @@ -0,0 +1,4 @@ +logrus +vendor + +.idea/ diff --git a/vendor/github.com/sirupsen/logrus/.golangci.yml b/vendor/github.com/sirupsen/logrus/.golangci.yml new file mode 100644 index 00000000000..65dc2850377 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.golangci.yml @@ -0,0 +1,40 @@ +run: + # do not run on test files yet + tests: false + +# all available settings of specific linters +linters-settings: + errcheck: + # report about not checking of errors in type assetions: `a := b.(MyStruct)`; + # default is false: such cases aren't reported by default. + check-type-assertions: false + + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`; + # default is false: such cases aren't reported by default. + check-blank: false + + lll: + line-length: 100 + tab-width: 4 + + prealloc: + simple: false + range-loops: false + for-loops: false + + whitespace: + multi-if: false # Enforces newlines (or comments) after every multi-line if statement + multi-func: false # Enforces newlines (or comments) after every multi-line function signature + +linters: + enable: + - megacheck + - govet + disable: + - maligned + - prealloc + disable-all: false + presets: + - bugs + - unused + fast: false diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml new file mode 100644 index 00000000000..c1dbd5a3a3e --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -0,0 +1,15 @@ +language: go +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 +env: + - GO111MODULE=on +go: 1.15.x +os: linux +install: + - ./travis/install.sh +script: + - cd ci + - go run mage.go -v -w ../ crossBuild + - go run mage.go -v -w ../ lint + - go run mage.go -v -w ../ test diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md new file mode 100644 index 00000000000..7567f612898 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -0,0 +1,259 @@ +# 1.8.1 +Code quality: + * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer + * improve timestamp format documentation + +Fixes: + * fix race condition on logger hooks + + +# 1.8.0 + +Correct versioning number replacing v1.7.1. + +# 1.7.1 + +Beware this release has introduced a new public API and its semver is therefore incorrect. + +Code quality: + * use go 1.15 in travis + * use magefile as task runner + +Fixes: + * small fixes about new go 1.13 error formatting system + * Fix for long time race condiction with mutating data hooks + +Features: + * build support for zos + +# 1.7.0 +Fixes: + * the dependency toward a windows terminal library has been removed + +Features: + * a new buffer pool management API has been added + * a set of `Fn()` functions have been added + +# 1.6.0 +Fixes: + * end of line cleanup + * revert the entry concurrency bug fix whic leads to deadlock under some circumstances + * update dependency on go-windows-terminal-sequences to fix a crash with go 1.14 + +Features: + * add an option to the `TextFormatter` to completely disable fields quoting + +# 1.5.0 +Code quality: + * add golangci linter run on travis + +Fixes: + * add mutex for hooks concurrent access on `Entry` data + * caller function field for go1.14 + * fix build issue for gopherjs target + +Feature: + * add an hooks/writer sub-package whose goal is to split output on different stream depending on the trace level + * add a `DisableHTMLEscape` option in the `JSONFormatter` + * add `ForceQuote` and `PadLevelText` options in the `TextFormatter` + +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter` (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + +# 1.0.3 + +* Replace example files with testable examples + +# 1.0.2 + +* bug: quote non-string values in text formatter (#583) +* Make (*Logger) SetLevel a public method + +# 1.0.1 + +* bug: fix escaping in text formatter (#575) + +# 1.0.0 + +* Officially changed name to lower-case +* bug: colors on Windows 10 (#541) +* bug: fix race in accessing level (#512) + +# 0.11.5 + +* feature: add writer and writerlevel to entry (#372) + +# 0.11.4 + +* bug: fix undefined variable on solaris (#493) + +# 0.11.3 + +* formatter: configure quoting of empty values (#484) +* formatter: configure quoting character (default is `"`) (#484) +* bug: fix not importing io correctly in non-linux environments (#481) + +# 0.11.2 + +* bug: fix windows terminal detection (#476) + +# 0.11.1 + +* bug: fix tty detection with custom out (#471) + +# 0.11.0 + +* performance: Use bufferpool to allocate (#370) +* terminal: terminal detection for app-engine (#343) +* feature: exit handler (#375) + +# 0.10.0 + +* feature: Add a test hook (#180) +* feature: `ParseLevel` is now case-insensitive (#326) +* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) +* performance: avoid re-allocations on `WithFields` (#335) + +# 0.9.0 + +* logrus/text_formatter: don't emit empty msg +* logrus/hooks/airbrake: move out of main repository +* logrus/hooks/sentry: move out of main repository +* logrus/hooks/papertrail: move out of main repository +* logrus/hooks/bugsnag: move out of main repository +* logrus/core: run tests with `-race` +* logrus/core: detect TTY based on `stderr` +* logrus/core: support `WithError` on logger +* logrus/core: Solaris support + +# 0.8.7 + +* logrus/core: fix possible race (#216) +* logrus/doc: small typo fixes and doc improvements + + +# 0.8.6 + +* hooks/raven: allow passing an initialized client + +# 0.8.5 + +* logrus/core: revert #208 + +# 0.8.4 + +* formatter/text: fix data race (#218) + +# 0.8.3 + +* logrus/core: fix entry log level (#208) +* logrus/core: improve performance of text formatter by 40% +* logrus/core: expose `LevelHooks` type +* logrus/core: add support for DragonflyBSD and NetBSD +* formatter/text: print structs more verbosely + +# 0.8.2 + +* logrus: fix more Fatal family functions + +# 0.8.1 + +* logrus: fix not exiting on `Fatalf` and `Fatalln` + +# 0.8.0 + +* logrus: defaults to stderr instead of stdout +* hooks/sentry: add special field for `*http.Request` +* formatter/text: ignore Windows for colors + +# 0.7.3 + +* formatter/\*: allow configuration of timestamp layout + +# 0.7.2 + +* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE new file mode 100644 index 00000000000..f090cb42f37 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Simon Eskildsen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md new file mode 100644 index 00000000000..d1d4a85fd75 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -0,0 +1,515 @@ +# Logrus :walrus: [![Build Status](https://github.com/sirupsen/logrus/workflows/CI/badge.svg)](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![Go Reference](https://pkg.go.dev/badge/github.com/sirupsen/logrus.svg)](https://pkg.go.dev/github.com/sirupsen/logrus) + +Logrus is a structured logger for Go (golang), completely API compatible with +the standard library logger. + +**Logrus is in maintenance-mode.** We will not be introducing new features. It's +simply too hard to do in a way that won't break many people's projects, which is +the last thing you want from your Logging library (again...). + +This does not mean Logrus is dead. Logrus will continue to be maintained for +security, (backwards compatible) bug fixes, and performance (where we are +limited by the interface). + +I believe Logrus' biggest contribution is to have played a part in today's +widespread use of structured logging in Golang. There doesn't seem to be a +reason to do a major, breaking iteration into Logrus V2, since the fantastic Go +community has built those independently. Many fantastic alternatives have sprung +up. Logrus would look like those, had it been re-designed with what we know +about structured logging in Go today. Check out, for example, +[Zerolog][zerolog], [Zap][zap], and [Apex][apex]. + +[zerolog]: https://github.com/rs/zerolog +[zap]: https://github.com/uber-go/zap +[apex]: https://github.com/apex/log + +**Seeing weird case-sensitive problems?** It's in the past been possible to +import Logrus as both upper- and lower-case. Due to the Go package environment, +this caused issues in the community and we needed a standard. Some environments +experienced problems with the upper-case variant, so the lower-case was decided. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). +For an in-depth explanation of the casing issue, see [this +comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276). + +Nicely color-coded in development (when a TTY is attached, otherwise just +plain text): + +![Colored](http://i.imgur.com/PY7qMwd.png) + +With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash +or Splunk: + +```text +{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the +ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} + +{"level":"warning","msg":"The group's number increased tremendously!", +"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"A giant walrus appears!", +"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} + +{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", +"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} + +{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, +"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} +``` + +With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not +attached, the output is compatible with the +[logfmt](http://godoc.org/github.com/kr/logfmt) format: + +```text +time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 +time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 +time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true +time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 +time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 +time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true +``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + + +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + +#### Example + +The simplest way to use Logrus is simply the package-level exported logger: + +```go +package main + +import ( + log "github.com/sirupsen/logrus" +) + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + }).Info("A walrus appears") +} +``` + +Note that it's completely api-compatible with the stdlib logger, so you can +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` +and you'll now have the flexibility of Logrus. You can customize it all you +want: + +```go +package main + +import ( + "os" + log "github.com/sirupsen/logrus" +) + +func init() { + // Log as JSON instead of the default ASCII formatter. + log.SetFormatter(&log.JSONFormatter{}) + + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) + + // Only log the warning severity or above. + log.SetLevel(log.WarnLevel) +} + +func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") + + log.WithFields(log.Fields{ + "omg": true, + "number": 122, + }).Warn("The group's number increased tremendously!") + + log.WithFields(log.Fields{ + "omg": true, + "number": 100, + }).Fatal("The ice breaks!") + + // A common pattern is to re-use fields between logging statements by re-using + // the logrus.Entry returned from WithFields() + contextLogger := log.WithFields(log.Fields{ + "common": "this is a common field", + "other": "I also should be logged always", + }) + + contextLogger.Info("I'll be logged with common and other field") + contextLogger.Info("Me too") +} +``` + +For more advanced usage such as logging to multiple locations from the same +application, you can also create an instance of the `logrus` Logger: + +```go +package main + +import ( + "os" + "github.com/sirupsen/logrus" +) + +// Create a new instance of the logger. You can have any number of instances. +var log = logrus.New() + +func main() { + // The API for setting attributes is a little different than the package level + // exported logger. See Godoc. + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } + + log.WithFields(logrus.Fields{ + "animal": "walrus", + "size": 10, + }).Info("A group of walrus emerges from the ocean") +} +``` + +#### Fields + +Logrus encourages careful, structured logging through logging fields instead of +long, unparseable error messages. For example, instead of: `log.Fatalf("Failed +to send event %s to topic %s with key %d")`, you should log the much more +discoverable: + +```go +log.WithFields(log.Fields{ + "event": event, + "topic": topic, + "key": key, +}).Fatal("Failed to send event") +``` + +We've found this API forces you to think about logging in a way that produces +much more useful logging messages. We've been in countless situations where just +a single added field to a log statement that was already there would've saved us +hours. The `WithFields` call is optional. + +In general, with Logrus using any of the `printf`-family functions should be +seen as a hint you should add a field, however, you can still use the +`printf`-family functions with Logrus. + +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + +#### Hooks + +You can add hooks for logging levels. For example to send errors to an exception +tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to +multiple places simultaneously, e.g. syslog. + +Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in +`init`: + +```go +import ( + log "github.com/sirupsen/logrus" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" + "log/syslog" +) + +func init() { + + // Use the Airbrake hook to report errors that have Error severity or above to + // an exception tracker. You can create custom hooks, see the Hooks section. + log.AddHook(airbrake.NewHook(123, "xyz", "production")) + + hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") + if err != nil { + log.Error("Unable to connect to local syslog daemon") + } else { + log.AddHook(hook) + } +} +``` +Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). + +A list of currently known service hooks can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + + +#### Level logging + +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. + +```go +log.Trace("Something very low level.") +log.Debug("Useful debugging information.") +log.Info("Something noteworthy happened!") +log.Warn("You should probably take a look at this.") +log.Error("Something failed but I'm not quitting.") +// Calls os.Exit(1) after logging +log.Fatal("Bye.") +// Calls panic() after logging +log.Panic("I'm bailing.") +``` + +You can set the logging level on a `Logger`, then it will only log entries with +that severity or anything above it: + +```go +// Will log anything that is info or above (warn, error, fatal, panic). Default. +log.SetLevel(log.InfoLevel) +``` + +It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose +environment if your application has that. + +Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging). + +#### Entries + +Besides the fields added with `WithField` or `WithFields` some fields are +automatically added to all logging events: + +1. `time`. The timestamp when the entry was created. +2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after + the `AddFields` call. E.g. `Failed to send event.` +3. `level`. The logging level. E.g. `info`. + +#### Environments + +Logrus has no notion of environment. + +If you wish for hooks and formatters to only be used in specific environments, +you should handle that yourself. For example, if your application has a global +variable `Environment`, which is a string representation of the environment you +could do: + +```go +import ( + log "github.com/sirupsen/logrus" +) + +func init() { + // do something here to set environment depending on an environment variable + // or command-line flag + if Environment == "production" { + log.SetFormatter(&log.JSONFormatter{}) + } else { + // The TextFormatter is default, you don't actually have to do this. + log.SetFormatter(&log.TextFormatter{}) + } +} +``` + +This configuration is how `logrus` was intended to be used, but JSON in +production is mostly only useful if you do log aggregation with tools like +Splunk or Logstash. + +#### Formatters + +The built-in logging formatters are: + +* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise + without colors. + * *Note:* to force colored output when there is no TTY, set the `ForceColors` + field to `true`. To force no colored output even if there is a TTY set the + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. + * When outputting to a TTY, it's often helpful to visually scan down a column where all the levels are the same width. Setting the `PadLevelText` field to `true` enables this behavior, by adding padding to the level text. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). +* `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). + +Third party logging formatters: + +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). +* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the Power of Zalgo. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. +* [`powerful-logrus-formatter`](https://github.com/zput/zxcTool). get fileName, log's line number and the latest function's name when print log; Sava log to files. +* [`caption-json-formatter`](https://github.com/nolleh/caption_json_formatter). logrus's message json formatter with human-readable caption added. + +You can define your formatter by implementing the `Formatter` interface, +requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a +`Fields` type (`map[string]interface{}`) with all your fields as well as the +default ones (see Entries section above): + +```go +type MyJSONFormatter struct { +} + +log.SetFormatter(new(MyJSONFormatter)) + +func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { + // Note this doesn't include Time, Level and Message which are available on + // the Entry. Consult `godoc` on information about those fields or read the + // source of the official loggers. + serialized, err := json.Marshal(entry.Data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err) + } + return append(serialized, '\n'), nil +} +``` + +#### Logger as an `io.Writer` + +Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. + +```go +w := logger.Writer() +defer w.Close() + +srv := http.Server{ + // create a stdlib log.Logger that writes to + // logrus.Logger. + ErrorLog: log.New(w, "", 0), +} +``` + +Each line written to that writer will be printed the usual way, using formatters +and hooks. The level for those entries is `info`. + +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + +#### Rotation + +Log rotation is not provided with Logrus. Log rotation should be done by an +external program (like `logrotate(8)`) that can compress and delete old log +entries. It should not be a feature of the application-level logger. + +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will be generated with different configs in different environments.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | + +#### Testing + +Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: + +* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just adds the `test` hook +* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): + +```go +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSomething(t*testing.T){ + logger, hook := test.NewNullLogger() + logger.Error("Helloerror") + + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} +``` + +#### Fatal handlers + +Logrus can register one or more functions that will be called when any `fatal` +level message is logged. The registered handlers will be executed before +logrus performs an `os.Exit(1)`. This behavior may be helpful if callers need +to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted. + +``` +... +handler := func() { + // gracefully shutdown something... +} +logrus.RegisterExitHandler(handler) +... +``` + +#### Thread safety + +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. +If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. + +Situation when locking is not needed includes: + +* You have no hooks registered, or hooks calling is already thread-safe. + +* Writing to logger.Out is already thread-safe, for example: + + 1) logger.Out is protected by locks. + + 2) logger.Out is an os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allows multi-thread/multi-process writing) + + (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/) diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go new file mode 100644 index 00000000000..8fd189e1cca --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -0,0 +1,76 @@ +package logrus + +// The following code was sourced and modified from the +// https://github.com/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/sirupsen/logrus/appveyor.yml b/vendor/github.com/sirupsen/logrus/appveyor.yml new file mode 100644 index 00000000000..df9d65c3a5b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/appveyor.yml @@ -0,0 +1,14 @@ +version: "{build}" +platform: x64 +clone_folder: c:\gopath\src\github.com\sirupsen\logrus +environment: + GOPATH: c:\gopath +branches: + only: + - master +install: + - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - go version +build_script: + - go get -t + - go test diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go new file mode 100644 index 00000000000..c7787f77cbf --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go @@ -0,0 +1,43 @@ +package logrus + +import ( + "bytes" + "sync" +) + +var ( + bufferPool BufferPool +) + +type BufferPool interface { + Put(*bytes.Buffer) + Get() *bytes.Buffer +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +// SetBufferPool allows to replace the default logrus buffer pool +// to better meets the specific needs of an application. +func SetBufferPool(bp BufferPool) { + bufferPool = bp +} + +func init() { + SetBufferPool(&defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, + }) +} diff --git a/vendor/github.com/sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go new file mode 100644 index 00000000000..da67aba06de --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go new file mode 100644 index 00000000000..71cdbbc35d2 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -0,0 +1,442 @@ +package logrus + +import ( + "bytes" + "context" + "fmt" + "os" + "reflect" + "runtime" + "strings" + "sync" + "time" +) + +var ( + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) + +func init() { + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic + // This field will be set on entry firing and the value will be equal to the one in Logger struct field. + Level Level + + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), a Buffer may be set to entry + Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), + } +} + +func (entry *Entry) Dup() *Entry { + data := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err} +} + +// Returns the bytes representation of this entry from the formatter. +func (entry *Entry) Bytes() ([]byte, error) { + return entry.Logger.Formatter.Format(entry) +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Bytes() + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: entry.Time, err: entry.err, Context: ctx} +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + fieldErr := entry.err + for k, v := range fields { + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch { + case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func: + isErrField = true + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } + } + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + dataCopy := make(Fields, len(entry.Data)) + for k, v := range entry.Data { + dataCopy[k] = v + } + return &Entry{Logger: entry.Logger, Data: dataCopy, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, maximumCallerDepth) + _ = runtime.Callers(0, pcs) + + // dynamic get the package name and the minimum caller depth + for i := 0; i < maximumCallerDepth; i++ { + funcName := runtime.FuncForPC(pcs[i]).Name() + if strings.Contains(funcName, "getCaller") { + logrusPackage = getPackageName(funcName) + break + } + } + + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f //nolint:scopelint + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil +} + +func (entry *Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + + newEntry := entry.Dup() + + if newEntry.Time.IsZero() { + newEntry.Time = time.Now() + } + + newEntry.Level = level + newEntry.Message = msg + + newEntry.Logger.mu.Lock() + reportCaller := newEntry.Logger.ReportCaller + bufPool := newEntry.getBufferPool() + newEntry.Logger.mu.Unlock() + + if reportCaller { + newEntry.Caller = getCaller() + } + + newEntry.fireHooks() + buffer = bufPool.Get() + defer func() { + newEntry.Buffer = nil + buffer.Reset() + bufPool.Put(buffer) + }() + buffer.Reset() + newEntry.Buffer = buffer + + newEntry.write() + + newEntry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(newEntry) + } +} + +func (entry *Entry) getBufferPool() (pool BufferPool) { + if entry.Logger.BufferPool != nil { + return entry.Logger.BufferPool + } + return bufferPool +} + +func (entry *Entry) fireHooks() { + var tmpHooks LevelHooks + entry.Logger.mu.Lock() + tmpHooks = make(LevelHooks, len(entry.Logger.Hooks)) + for k, v := range entry.Logger.Hooks { + tmpHooks[k] = v + } + entry.Logger.mu.Unlock() + + err := tmpHooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + return + } + if _, err := entry.Logger.Out.Write(serialized); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } +} + +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Entry.Panic or Entry.Fatal should be used instead. +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + entry.Log(InfoLevel, args...) +} + +func (entry *Entry) Warn(args ...interface{}) { + entry.Log(WarnLevel, args...) +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + entry.Log(ErrorLevel, args...) +} + +func (entry *Entry) Fatal(args ...interface{}) { + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + entry.Log(PanicLevel, args...) +} + +// Entry Printf family functions + +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + entry.Logf(InfoLevel, format, args...) +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + entry.Logf(WarnLevel, format, args...) +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + entry.Logf(ErrorLevel, format, args...) +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + entry.Logf(PanicLevel, format, args...) +} + +// Entry Println family functions + +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + +func (entry *Entry) Infoln(args ...interface{}) { + entry.Logln(InfoLevel, args...) +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + entry.Logln(WarnLevel, args...) +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + entry.Logln(ErrorLevel, args...) +} + +func (entry *Entry) Fatalln(args ...interface{}) { + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + entry.Logln(PanicLevel, args...) +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go new file mode 100644 index 00000000000..017c30ce678 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -0,0 +1,270 @@ +package logrus + +import ( + "context" + "io" + "time" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.SetOutput(out) +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.SetLevel(level) +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.AddHook(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// WithTime creates an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// TraceFn logs a message from a func at level Trace on the standard logger. +func TraceFn(fn LogFunction) { + std.TraceFn(fn) +} + +// DebugFn logs a message from a func at level Debug on the standard logger. +func DebugFn(fn LogFunction) { + std.DebugFn(fn) +} + +// PrintFn logs a message from a func at level Info on the standard logger. +func PrintFn(fn LogFunction) { + std.PrintFn(fn) +} + +// InfoFn logs a message from a func at level Info on the standard logger. +func InfoFn(fn LogFunction) { + std.InfoFn(fn) +} + +// WarnFn logs a message from a func at level Warn on the standard logger. +func WarnFn(fn LogFunction) { + std.WarnFn(fn) +} + +// WarningFn logs a message from a func at level Warn on the standard logger. +func WarningFn(fn LogFunction) { + std.WarningFn(fn) +} + +// ErrorFn logs a message from a func at level Error on the standard logger. +func ErrorFn(fn LogFunction) { + std.ErrorFn(fn) +} + +// PanicFn logs a message from a func at level Panic on the standard logger. +func PanicFn(fn LogFunction) { + std.PanicFn(fn) +} + +// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1. +func FatalFn(fn LogFunction) { + std.FatalFn(fn) +} + +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go new file mode 100644 index 00000000000..408883773eb --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -0,0 +1,78 @@ +package logrus + +import "time" + +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) + } + + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) + } + + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } + } +} diff --git a/vendor/github.com/sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go new file mode 100644 index 00000000000..3f151cdc392 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 00000000000..c96dc5636bf --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,128 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "fmt" + "runtime" +) + +type fieldKey string + +// FieldMap allows customization of the key names for default fields. +type FieldMap map[fieldKey]string + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +// JSONFormatter formats logs into parsable json +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // DisableHTMLEscape allows disabling html escaping in output + DisableHTMLEscape bool + + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", + // }, + // } + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool +} + +// Format renders a single log entry +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+4) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + encoder.SetEscapeHTML(!f.DisableHTMLEscape) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err) + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go new file mode 100644 index 00000000000..5ff0aef6d3f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -0,0 +1,417 @@ +package logrus + +import ( + "context" + "io" + "os" + "sync" + "sync/atomic" + "time" +) + +// LogFunction For big messages, it can be more efficient to pass a function +// and only call it if the log level is actually enables rather than +// generating the log message and then checking if the level is enabled +type LogFunction func() []interface{} + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventurous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc + // The buffer pool used to format the log. If it is nil, the default global + // buffer pool will be used. + BufferPool BufferPool +} + +type exitFunc func(int) + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &logrus.Logger{ +// Out: os.Stderr, +// Formatter: new(logrus.TextFormatter), +// Hooks: make(logrus.LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} + logger.entryPool.Put(entry) +} + +// WithField allocates a new entry and adds a field to it. +// Debug, Print, Info, Warn, Error, Fatal or Panic must be then applied to +// this new returned entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logf(level, format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + logger.Logf(InfoLevel, format, args...) +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + logger.Logf(WarnLevel, format, args...) +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + logger.Warnf(format, args...) +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + logger.Logf(ErrorLevel, format, args...) +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + logger.Logf(PanicLevel, format, args...) +} + +// Log will log a message at the level given as parameter. +// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit. +// For this behaviour Logger.Panic or Logger.Fatal should be used instead. +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) LogFn(level Level, fn LogFunction) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Log(level, fn()...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + +func (logger *Logger) Debug(args ...interface{}) { + logger.Log(DebugLevel, args...) +} + +func (logger *Logger) Info(args ...interface{}) { + logger.Log(InfoLevel, args...) +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Print(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + logger.Log(WarnLevel, args...) +} + +func (logger *Logger) Warning(args ...interface{}) { + logger.Warn(args...) +} + +func (logger *Logger) Error(args ...interface{}) { + logger.Log(ErrorLevel, args...) +} + +func (logger *Logger) Fatal(args ...interface{}) { + logger.Log(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) TraceFn(fn LogFunction) { + logger.LogFn(TraceLevel, fn) +} + +func (logger *Logger) DebugFn(fn LogFunction) { + logger.LogFn(DebugLevel, fn) +} + +func (logger *Logger) InfoFn(fn LogFunction) { + logger.LogFn(InfoLevel, fn) +} + +func (logger *Logger) PrintFn(fn LogFunction) { + entry := logger.newEntry() + entry.Print(fn()...) + logger.releaseEntry(entry) +} + +func (logger *Logger) WarnFn(fn LogFunction) { + logger.LogFn(WarnLevel, fn) +} + +func (logger *Logger) WarningFn(fn LogFunction) { + logger.WarnFn(fn) +} + +func (logger *Logger) ErrorFn(fn LogFunction) { + logger.LogFn(ErrorLevel, fn) +} + +func (logger *Logger) FatalFn(fn LogFunction) { + logger.LogFn(FatalLevel, fn) + logger.Exit(1) +} + +func (logger *Logger) PanicFn(fn LogFunction) { + logger.LogFn(PanicLevel, fn) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { + entry := logger.newEntry() + entry.Logln(level, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + +func (logger *Logger) Debugln(args ...interface{}) { + logger.Logln(DebugLevel, args...) +} + +func (logger *Logger) Infoln(args ...interface{}) { + logger.Logln(InfoLevel, args...) +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + logger.Logln(WarnLevel, args...) +} + +func (logger *Logger) Warningln(args ...interface{}) { + logger.Warnln(args...) +} + +func (logger *Logger) Errorln(args ...interface{}) { + logger.Logln(ErrorLevel, args...) +} + +func (logger *Logger) Fatalln(args ...interface{}) { + logger.Logln(FatalLevel, args...) + logger.Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit + } + logger.ExitFunc(code) +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +// SetLevel sets the logger level. +func (logger *Logger) SetLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} + +// SetBufferPool sets the logger buffer pool. +func (logger *Logger) SetBufferPool(pool BufferPool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.BufferPool = pool +} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go new file mode 100644 index 00000000000..2f16224cb9f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -0,0 +1,186 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint32 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" + } +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + case "trace": + return TraceLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = l + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, + TraceLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 00000000000..2403de98192 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 00000000000..499789984d2 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go new file mode 100644 index 00000000000..ebdae3ec626 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go @@ -0,0 +1,7 @@ +// +build js + +package logrus + +func isTerminal(fd int) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 00000000000..97af92c68ea --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 00000000000..3293fb3caad --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows,!nacl,!plan9 + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 00000000000..f6710b3bd0b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 00000000000..04748b8515f --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix zos +// +build !js + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 00000000000..2879eb50ea6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,27 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/windows" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + handle := windows.Handle(v.Fd()) + var mode uint32 + if err := windows.GetConsoleMode(handle, &mode); err != nil { + return false + } + mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + if err := windows.SetConsoleMode(handle, mode); err != nil { + return false + } + return true + } + return false +} diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go new file mode 100644 index 00000000000..be2c6efe5ed --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -0,0 +1,339 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + red = 31 + yellow = 33 + blue = 36 + gray = 37 +) + +var baseTimestamp time.Time + +func init() { + baseTimestamp = time.Now() +} + +// TextFormatter formats logs into text +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Force quoting of all values + ForceQuote bool + + // DisableQuote disables quoting for all values. + // DisableQuote will have a lower priority than ForceQuote. + // If both of them are set to true, quote will be forced on all values. + DisableQuote bool + + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed. + // The format to use is the same than for time.Format or time.Parse from the standard + // library. + // The standard Library already provides a set of predefined format. + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool + + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + + // PadLevelText Adds padding the level text so that all the levels output at the same length + // PadLevelText is a superset of the DisableLevelTruncation option + PadLevelText bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // Whether the logger's out is to a terminal + isTerminal bool + + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once + + // The max length of the level text, generated dynamically on init + levelTextMaxLength int +} + +func (f *TextFormatter) init(entry *Entry) { + if entry.Logger != nil { + f.isTerminal = checkIfTerminal(entry.Logger.Out) + } + // Get the max length of the level text + for _, level := range AllLevels { + levelTextLength := utf8.RuneCount([]byte(level.String())) + if levelTextLength > f.levelTextMaxLength { + f.levelTextMaxLength = levelTextLength + } + } +} + +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + switch force, ok := os.LookupEnv("CLICOLOR_FORCE"); { + case ok && force != "0": + isColored = true + case ok && force == "0", os.Getenv("CLICOLOR") == "0": + isColored = false + } + } + + return isColored && !f.DisableColors +} + +// Format renders a single log entry +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) + } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + f.terminalInitOnce.Do(func() { f.init(entry) }) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = defaultTimestampFormat + } + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) + } else { + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel, TraceLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + case InfoLevel: + levelColor = blue + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation && !f.PadLevelText { + levelText = levelText[0:4] + } + if f.PadLevelText { + // Generates the format string used in the next line, for example "%-6s" or "%-7s". + // Based on the max level text length. + formatString := "%-" + strconv.Itoa(f.levelTextMaxLength) + "s" + // Formats the level text by appending spaces up to the max length, for example: + // - "INFO " + // - "WARNING" + levelText = fmt.Sprintf(formatString, levelText) + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } + + switch { + case f.DisableTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) + case !f.FullTimestamp: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) + default: + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) + } + for _, k := range keys { + v := data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func (f *TextFormatter) needsQuoting(text string) bool { + if f.ForceQuote { + return true + } + if f.QuoteEmptyFields && len(text) == 0 { + return true + } + if f.DisableQuote { + return false + } + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + if b.Len() > 0 { + b.WriteByte(' ') + } + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + stringVal, ok := value.(string) + if !ok { + stringVal = fmt.Sprint(value) + } + + if !f.needsQuoting(stringVal) { + b.WriteString(stringVal) + } else { + b.WriteString(fmt.Sprintf("%q", stringVal)) + } +} diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go new file mode 100644 index 00000000000..074fd4b8bd7 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -0,0 +1,102 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" + "strings" +) + +// Writer at INFO level. See WriterLevel for details. +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that can be used to write arbitrary text to +// the logger at the given log level. Each line written to the writer will be +// printed in the usual way using formatters and hooks. The writer is part of an +// io.Pipe and it is the callers responsibility to close the writer when done. +// This can be used to override the standard library logger easily. +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +// Writer returns an io.Writer that writes to the logger at the info log level +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +// WriterLevel returns an io.Writer that writes to the logger at the given log level +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + + // Determine which log function to use based on the specified log level + switch level { + case TraceLevel: + printFunc = entry.Trace + case DebugLevel: + printFunc = entry.Debug + case InfoLevel: + printFunc = entry.Info + case WarnLevel: + printFunc = entry.Warn + case ErrorLevel: + printFunc = entry.Error + case FatalLevel: + printFunc = entry.Fatal + case PanicLevel: + printFunc = entry.Panic + default: + printFunc = entry.Print + } + + // Start a new goroutine to scan the input and write it to the logger using the specified print function. + // It splits the input into chunks of up to 64KB to avoid buffer overflows. + go entry.writerScanner(reader, printFunc) + + // Set a finalizer function to close the writer when it is garbage collected + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +// writerScanner scans the input from the reader and writes it to the logger +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + + // Set the buffer size to the maximum token size to avoid buffer overflows + scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize) + + // Define a split function to split the input into chunks of up to 64KB + chunkSize := bufio.MaxScanTokenSize // 64KB + splitFunc := func(data []byte, atEOF bool) (int, []byte, error) { + if len(data) >= chunkSize { + return chunkSize, data[:chunkSize], nil + } + + return bufio.ScanLines(data, atEOF) + } + + // Use the custom split function to split the input + scanner.Split(splitFunc) + + // Scan the input and write it to the logger using the specified print function + for scanner.Scan() { + printFunc(strings.TrimRight(scanner.Text(), "\r\n")) + } + + // If there was an error while scanning the input, log an error + if err := scanner.Err(); err != nil { + entry.Errorf("Error while reading from Writer: %s", err) + } + + // Close the reader when we are done + reader.Close() +} + +// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/skeema/knownhosts/NOTICE b/vendor/github.com/skeema/knownhosts/NOTICE index 619a5a7ea77..a92cb34d674 100644 --- a/vendor/github.com/skeema/knownhosts/NOTICE +++ b/vendor/github.com/skeema/knownhosts/NOTICE @@ -1,4 +1,4 @@ -Copyright 2023 Skeema LLC and the Skeema Knownhosts authors +Copyright 2024 Skeema LLC and the Skeema Knownhosts authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/skeema/knownhosts/README.md b/vendor/github.com/skeema/knownhosts/README.md index 85339bc03b0..36b847614ca 100644 --- a/vendor/github.com/skeema/knownhosts/README.md +++ b/vendor/github.com/skeema/knownhosts/README.md @@ -100,7 +100,7 @@ config := &ssh.ClientConfig{ ## License -**Source code copyright 2023 Skeema LLC and the Skeema Knownhosts authors** +**Source code copyright 2024 Skeema LLC and the Skeema Knownhosts authors** ```text Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/vendor/github.com/skeema/knownhosts/knownhosts.go b/vendor/github.com/skeema/knownhosts/knownhosts.go index c2fb5160576..4dad7771b88 100644 --- a/vendor/github.com/skeema/knownhosts/knownhosts.go +++ b/vendor/github.com/skeema/knownhosts/knownhosts.go @@ -76,13 +76,23 @@ func (hkcb HostKeyCallback) HostKeyAlgorithms(hostWithPort string) (algos []stri // example by https://github.com/golang/crypto/pull/254. hostKeys := hkcb.HostKeys(hostWithPort) seen := make(map[string]struct{}, len(hostKeys)) - for _, key := range hostKeys { - typ := key.Type() + addAlgo := func(typ string) { if _, already := seen[typ]; !already { algos = append(algos, typ) seen[typ] = struct{}{} } } + for _, key := range hostKeys { + typ := key.Type() + if typ == ssh.KeyAlgoRSA { + // KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, + // not public key formats, so they can't appear as a PublicKey.Type. + // The corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2. + addAlgo(ssh.KeyAlgoRSASHA512) + addAlgo(ssh.KeyAlgoRSASHA256) + } + addAlgo(typ) + } return algos } diff --git a/vendor/github.com/skratchdot/open-golang/LICENSE b/vendor/github.com/skratchdot/open-golang/LICENSE new file mode 100644 index 00000000000..afd04c82161 --- /dev/null +++ b/vendor/github.com/skratchdot/open-golang/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 skratchdot + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/skratchdot/open-golang/open/exec.go b/vendor/github.com/skratchdot/open-golang/open/exec.go new file mode 100644 index 00000000000..1b0e713685c --- /dev/null +++ b/vendor/github.com/skratchdot/open-golang/open/exec.go @@ -0,0 +1,18 @@ +// +build !windows,!darwin + +package open + +import ( + "os/exec" +) + +// http://sources.debian.net/src/xdg-utils/1.1.0~rc1%2Bgit20111210-7.1/scripts/xdg-open/ +// http://sources.debian.net/src/xdg-utils/1.1.0~rc1%2Bgit20111210-7.1/scripts/xdg-mime/ + +func open(input string) *exec.Cmd { + return exec.Command("xdg-open", input) +} + +func openWith(input string, appName string) *exec.Cmd { + return exec.Command(appName, input) +} diff --git a/vendor/github.com/skratchdot/open-golang/open/exec_darwin.go b/vendor/github.com/skratchdot/open-golang/open/exec_darwin.go new file mode 100644 index 00000000000..16160e6f043 --- /dev/null +++ b/vendor/github.com/skratchdot/open-golang/open/exec_darwin.go @@ -0,0 +1,15 @@ +// +build darwin + +package open + +import ( + "os/exec" +) + +func open(input string) *exec.Cmd { + return exec.Command("open", input) +} + +func openWith(input string, appName string) *exec.Cmd { + return exec.Command("open", "-a", appName, input) +} diff --git a/vendor/github.com/skratchdot/open-golang/open/exec_windows.go b/vendor/github.com/skratchdot/open-golang/open/exec_windows.go new file mode 100644 index 00000000000..6e46c005427 --- /dev/null +++ b/vendor/github.com/skratchdot/open-golang/open/exec_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package open + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + // "syscall" +) + +var ( + cmd = "url.dll,FileProtocolHandler" + runDll32 = filepath.Join(os.Getenv("SYSTEMROOT"), "System32", "rundll32.exe") +) + +func cleaninput(input string) string { + r := strings.NewReplacer("&", "^&") + return r.Replace(input) +} + +func open(input string) *exec.Cmd { + cmd := exec.Command(runDll32, cmd, input) + //cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} + return cmd +} + +func openWith(input string, appName string) *exec.Cmd { + cmd := exec.Command("cmd", "/C", "start", "", appName, cleaninput(input)) + //cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true} + return cmd +} diff --git a/vendor/github.com/skratchdot/open-golang/open/open.go b/vendor/github.com/skratchdot/open-golang/open/open.go new file mode 100644 index 00000000000..b1f648ff51e --- /dev/null +++ b/vendor/github.com/skratchdot/open-golang/open/open.go @@ -0,0 +1,50 @@ +/* + + Open a file, directory, or URI using the OS's default + application for that object type. Optionally, you can + specify an application to use. + + This is a proxy for the following commands: + + OSX: "open" + Windows: "start" + Linux/Other: "xdg-open" + + This is a golang port of the node.js module: https://github.com/pwnall/node-open + +*/ +package open + +/* + Open a file, directory, or URI using the OS's default + application for that object type. Wait for the open + command to complete. +*/ +func Run(input string) error { + return open(input).Run() +} + +/* + Open a file, directory, or URI using the OS's default + application for that object type. Don't wait for the + open command to complete. +*/ +func Start(input string) error { + return open(input).Start() +} + +/* + Open a file, directory, or URI using the specified application. + Wait for the open command to complete. +*/ +func RunWith(input string, appName string) error { + return openWith(input, appName).Run() +} + +/* + Open a file, directory, or URI using the specified application. + Don't wait for the open command to complete. +*/ +func StartWith(input string, appName string) error { + return openWith(input, appName).Start() +} diff --git a/vendor/go.opencensus.io/.gitignore b/vendor/go.opencensus.io/.gitignore new file mode 100644 index 00000000000..74a6db472e5 --- /dev/null +++ b/vendor/go.opencensus.io/.gitignore @@ -0,0 +1,9 @@ +/.idea/ + +# go.opencensus.io/exporter/aws +/exporter/aws/ + +# Exclude vendor, use dep ensure after checkout: +/vendor/github.com/ +/vendor/golang.org/ +/vendor/google.golang.org/ diff --git a/vendor/go.opencensus.io/AUTHORS b/vendor/go.opencensus.io/AUTHORS new file mode 100644 index 00000000000..e491a9e7f78 --- /dev/null +++ b/vendor/go.opencensus.io/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/go.opencensus.io/CONTRIBUTING.md b/vendor/go.opencensus.io/CONTRIBUTING.md new file mode 100644 index 00000000000..1ba3962c8bf --- /dev/null +++ b/vendor/go.opencensus.io/CONTRIBUTING.md @@ -0,0 +1,63 @@ +# How to contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub Help] for more +information on using pull requests. + +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ + +## Instructions + +Fork the repo, checkout the upstream repo to your GOPATH by: + +``` +$ go get -d go.opencensus.io +``` + +Add your fork as an origin: + +``` +cd $(go env GOPATH)/src/go.opencensus.io +git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git +``` + +Run tests: + +``` +$ make install-tools # Only first time. +$ make +``` + +Checkout a new branch, make modifications and push the branch to your fork: + +``` +$ git checkout -b feature +# edit files +$ git commit +$ git push fork feature +``` + +Open a pull request against the main opencensus-go repo. + +## General Notes +This project uses Appveyor and Travis for CI. + +The dependencies are managed with `go mod` if you work with the sources under your +`$GOPATH` you need to set the environment variable `GO111MODULE=on`. \ No newline at end of file diff --git a/vendor/go.opencensus.io/LICENSE b/vendor/go.opencensus.io/LICENSE new file mode 100644 index 00000000000..7a4a3ea2424 --- /dev/null +++ b/vendor/go.opencensus.io/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile new file mode 100644 index 00000000000..d896edc9968 --- /dev/null +++ b/vendor/go.opencensus.io/Makefile @@ -0,0 +1,97 @@ +# TODO: Fix this on windows. +ALL_SRC := $(shell find . -name '*.go' \ + -not -path './vendor/*' \ + -not -path '*/gen-go/*' \ + -type f | sort) +ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) + +GOTEST_OPT?=-v -race -timeout 30s +GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic +GOTEST=go test +GOIMPORTS=goimports +GOLINT=golint +GOVET=go vet +EMBEDMD=embedmd +# TODO decide if we need to change these names. +TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" +TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" +README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') + +.DEFAULT_GOAL := imports-lint-vet-embedmd-test + +.PHONY: imports-lint-vet-embedmd-test +imports-lint-vet-embedmd-test: imports lint vet embedmd test + +# TODO enable test-with-coverage in tavis +.PHONY: travis-ci +travis-ci: imports lint vet embedmd test test-386 + +all-pkgs: + @echo $(ALL_PKGS) | tr ' ' '\n' | sort + +all-srcs: + @echo $(ALL_SRC) | tr ' ' '\n' | sort + +.PHONY: test +test: + $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) + +.PHONY: test-386 +test-386: + GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) + +.PHONY: test-with-coverage +test-with-coverage: + $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) + +.PHONY: imports +imports: + @IMPORTSOUT=`$(GOIMPORTS) -l $(ALL_SRC) 2>&1`; \ + if [ "$$IMPORTSOUT" ]; then \ + echo "$(GOIMPORTS) FAILED => goimports the following files:\n"; \ + echo "$$IMPORTSOUT\n"; \ + exit 1; \ + else \ + echo "Imports finished successfully"; \ + fi + +.PHONY: lint +lint: + @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \ + if [ "$$LINTOUT" ]; then \ + echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ + echo "$$LINTOUT\n"; \ + exit 1; \ + else \ + echo "Lint finished successfully"; \ + fi + +.PHONY: vet +vet: + # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" + @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ + if [ "$$VETOUT" ]; then \ + echo "$(GOVET) FAILED => go vet the following files:\n"; \ + echo "$$VETOUT\n"; \ + exit 1; \ + else \ + echo "Vet finished successfully"; \ + fi + +.PHONY: embedmd +embedmd: + @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ + if [ "$$EMBEDMDOUT" ]; then \ + echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ + echo "$$EMBEDMDOUT\n"; \ + exit 1; \ + else \ + echo "Embedmd finished successfully"; \ + fi + +.PHONY: install-tools +install-tools: + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/README.md b/vendor/go.opencensus.io/README.md new file mode 100644 index 00000000000..1d7e837116f --- /dev/null +++ b/vendor/go.opencensus.io/README.md @@ -0,0 +1,267 @@ +# OpenCensus Libraries for Go + +[![Build Status][travis-image]][travis-url] +[![Windows Build Status][appveyor-image]][appveyor-url] +[![GoDoc][godoc-image]][godoc-url] +[![Gitter chat][gitter-image]][gitter-url] + +OpenCensus Go is a Go implementation of OpenCensus, a toolkit for +collecting application performance and behavior monitoring data. +Currently it consists of three major components: tags, stats and tracing. + +#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). + +## Installation + +``` +$ go get -u go.opencensus.io +``` + +The API of this project is still evolving, see: [Deprecation Policy](#deprecation-policy). +The use of vendoring or a dependency management tool is recommended. + +## Prerequisites + +OpenCensus Go libraries require Go 1.8 or later. + +## Getting Started + +The easiest way to get started using OpenCensus in your application is to use an existing +integration with your RPC framework: + +* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) +* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) +* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) +* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) +* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) +* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) +* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) +* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) +* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) +* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) + +If you're using a framework not listed here, you could either implement your own middleware for your +framework or use [custom stats](#stats) and [spans](#spans) directly in your application. + +## Exporters + +OpenCensus can export instrumentation data to various backends. +OpenCensus has exporter implementations for the following, users +can implement their own exporters by implementing the exporter interfaces +([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), +[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): + +* [Prometheus][exporter-prom] for stats +* [OpenZipkin][exporter-zipkin] for traces +* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces +* [Jaeger][exporter-jaeger] for traces +* [AWS X-Ray][exporter-xray] for traces +* [Datadog][exporter-datadog] for stats and traces +* [Graphite][exporter-graphite] for stats +* [Honeycomb][exporter-honeycomb] for traces +* [New Relic][exporter-newrelic] for stats and traces + +## Overview + +![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) + +In a microservices environment, a user request may go through +multiple services until there is a response. OpenCensus allows +you to instrument your services and collect diagnostics data all +through your services end-to-end. + +## Tags + +Tags represent propagated key-value pairs. They are propagated using `context.Context` +in the same process or can be encoded to be transmitted on the wire. Usually, this will +be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` +for gRPC. + +Package `tag` allows adding or modifying tags in the current context. + +[embedmd]:# (internal/readme/tags.go new) +```go +ctx, err := tag.New(ctx, + tag.Insert(osKey, "macOS-10.12.5"), + tag.Upsert(userIDKey, "cde36753ed"), +) +if err != nil { + log.Fatal(err) +} +``` + +## Stats + +OpenCensus is a low-overhead framework even if instrumentation is always enabled. +In order to be so, it is optimized to make recording of data points fast +and separate from the data aggregation. + +OpenCensus stats collection happens in two stages: + +* Definition of measures and recording of data points +* Definition of views and aggregation of the recorded data + +### Recording + +Measurements are data points associated with a measure. +Recording implicitly tags the set of Measurements with the tags from the +provided context: + +[embedmd]:# (internal/readme/stats.go record) +```go +stats.Record(ctx, videoSize.M(102478)) +``` + +### Views + +Views are how Measures are aggregated. You can think of them as queries over the +set of recorded data points (measurements). + +Views have two parts: the tags to group by and the aggregation type used. + +Currently three types of aggregations are supported: +* CountAggregation is used to count the number of times a sample was recorded. +* DistributionAggregation is used to provide a histogram of the values of the samples. +* SumAggregation is used to sum up all sample values. + +[embedmd]:# (internal/readme/stats.go aggs) +```go +distAgg := view.Distribution(1<<32, 2<<32, 3<<32) +countAgg := view.Count() +sumAgg := view.Sum() +``` + +Here we create a view with the DistributionAggregation over our measure. + +[embedmd]:# (internal/readme/stats.go view) +```go +if err := view.Register(&view.View{ + Name: "example.com/video_size_distribution", + Description: "distribution of processed video size over time", + Measure: videoSize, + Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), +}); err != nil { + log.Fatalf("Failed to register view: %v", err) +} +``` + +Register begins collecting data for the view. Registered views' data will be +exported via the registered exporters. + +## Traces + +A distributed trace tracks the progression of a single user request as +it is handled by the services and processes that make up an application. +Each step is called a span in the trace. Spans include metadata about the step, +including especially the time spent in the step, called the span’s latency. + +Below you see a trace and several spans underneath it. + +![Traces and spans](https://i.imgur.com/7hZwRVj.png) + +### Spans + +Span is the unit step in a trace. Each span has a name, latency, status and +additional metadata. + +Below we are starting a span for a cache read and ending it +when we are done: + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +### Propagation + +Spans can have parents or can be root spans if they don't have any parents. +The current span is propagated in-process and across the network to allow associating +new child spans with the parent. + +In the same process, `context.Context` is used to propagate spans. +`trace.StartSpan` creates a new span as a root if the current context +doesn't contain a span. Or, it creates a child of the span that is +already in current context. The returned context can be used to keep +propagating the newly created span in the current context. + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() + +// Do work to get from cache. +``` + +Across the network, OpenCensus provides different propagation +methods for different protocols. + +* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). +* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) + by default but can be configured to use a custom propagation method by setting another + [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). + +## Execution Tracer + +With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. +See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) +for an example of their mutual use. + +## Profiles + +OpenCensus tags can be applied as profiler labels +for users who are on Go 1.9 and above. + +[embedmd]:# (internal/readme/tags.go profiler) +```go +ctx, err = tag.New(ctx, + tag.Insert(osKey, "macOS-10.12.5"), + tag.Insert(userIDKey, "fff0989878"), +) +if err != nil { + log.Fatal(err) +} +tag.Do(ctx, func(ctx context.Context) { + // Do work. + // When profiling is on, samples will be + // recorded with the key/values from the tag map. +}) +``` + +A screenshot of the CPU profile from the program above: + +![CPU profile](https://i.imgur.com/jBKjlkw.png) + +## Deprecation Policy + +Before version 1.0.0, the following deprecation policy will be observed: + +No backwards-incompatible changes will be made except for the removal of symbols that have +been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release +removing the *Deprecated* functionality will be made no sooner than 28 days after the first +release in which the functionality was marked *Deprecated*. + +[travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master +[travis-url]: https://travis-ci.org/census-instrumentation/opencensus-go +[appveyor-image]: https://ci.appveyor.com/api/projects/status/vgtt29ps1783ig38?svg=true +[appveyor-url]: https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/branch/master +[godoc-image]: https://godoc.org/go.opencensus.io?status.svg +[godoc-url]: https://godoc.org/go.opencensus.io +[gitter-image]: https://badges.gitter.im/census-instrumentation/lobby.svg +[gitter-url]: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + + +[new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap +[new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace + +[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus +[exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver +[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin +[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger +[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws +[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog +[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite +[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter +[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go diff --git a/vendor/go.opencensus.io/appveyor.yml b/vendor/go.opencensus.io/appveyor.yml new file mode 100644 index 00000000000..d08f0edaff9 --- /dev/null +++ b/vendor/go.opencensus.io/appveyor.yml @@ -0,0 +1,24 @@ +version: "{build}" + +platform: x64 + +clone_folder: c:\gopath\src\go.opencensus.io + +environment: + GOPATH: 'c:\gopath' + GO111MODULE: 'on' + CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 + +stack: go 1.11 + +before_test: + - go version + - go env + +build: false +deploy: false + +test_script: + - cd %APPVEYOR_BUILD_FOLDER% + - go build -v .\... + - go test -v .\... # No -race because cgo is disabled diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go new file mode 100644 index 00000000000..81dc7183ec3 --- /dev/null +++ b/vendor/go.opencensus.io/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opencensus.io/internal" + +import ( + "fmt" + "time" + + opencensus "go.opencensus.io" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} diff --git a/vendor/go.opencensus.io/internal/sanitize.go b/vendor/go.opencensus.io/internal/sanitize.go new file mode 100644 index 00000000000..de8ccf236c4 --- /dev/null +++ b/vendor/go.opencensus.io/internal/sanitize.go @@ -0,0 +1,50 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "strings" + "unicode" +) + +const labelKeySizeLimit = 100 + +// Sanitize returns a string that is trunacated to 100 characters if it's too +// long, and replaces non-alphanumeric characters to underscores. +func Sanitize(s string) string { + if len(s) == 0 { + return s + } + if len(s) > labelKeySizeLimit { + s = s[:labelKeySizeLimit] + } + s = strings.Map(sanitizeRune, s) + if unicode.IsDigit(rune(s[0])) { + s = "key_" + s + } + if s[0] == '_' { + s = "key" + s + } + return s +} + +// converts anything that is not a letter or digit to an underscore +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + // Everything else turns into an underscore + return '_' +} diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go new file mode 100644 index 00000000000..41b2c3fc038 --- /dev/null +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package tagencoding contains the tag encoding +// used interally by the stats collector. +package tagencoding // import "go.opencensus.io/internal/tagencoding" + +// Values represent the encoded buffer for the values. +type Values struct { + Buffer []byte + WriteIndex int + ReadIndex int +} + +func (vb *Values) growIfRequired(expected int) { + if len(vb.Buffer)-vb.WriteIndex < expected { + tmp := make([]byte, 2*(len(vb.Buffer)+1)+expected) + copy(tmp, vb.Buffer) + vb.Buffer = tmp + } +} + +// WriteValue is the helper method to encode Values from map[Key][]byte. +func (vb *Values) WriteValue(v []byte) { + length := len(v) & 0xff + vb.growIfRequired(1 + length) + + // writing length of v + vb.Buffer[vb.WriteIndex] = byte(length) + vb.WriteIndex++ + + if length == 0 { + // No value was encoded for this key + return + } + + // writing v + copy(vb.Buffer[vb.WriteIndex:], v[:length]) + vb.WriteIndex += length +} + +// ReadValue is the helper method to decode Values to a map[Key][]byte. +func (vb *Values) ReadValue() []byte { + // read length of v + length := int(vb.Buffer[vb.ReadIndex]) + vb.ReadIndex++ + if length == 0 { + // No value was encoded for this key + return nil + } + + // read value of v + v := make([]byte, length) + endIdx := vb.ReadIndex + length + copy(v, vb.Buffer[vb.ReadIndex:endIdx]) + vb.ReadIndex = endIdx + return v +} + +// Bytes returns a reference to already written bytes in the Buffer. +func (vb *Values) Bytes() []byte { + return vb.Buffer[:vb.WriteIndex] +} diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go new file mode 100644 index 00000000000..073af7b473a --- /dev/null +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -0,0 +1,53 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "time" +) + +// Trace allows internal access to some trace functionality. +// TODO(#412): remove this +var Trace interface{} + +// LocalSpanStoreEnabled true if the local span store is enabled. +var LocalSpanStoreEnabled bool + +// BucketConfiguration stores the number of samples to store for span buckets +// for successful and failed spans for a particular span name. +type BucketConfiguration struct { + Name string + MaxRequestsSucceeded int + MaxRequestsErrors int +} + +// PerMethodSummary is a summary of the spans stored for a single span name. +type PerMethodSummary struct { + Active int + LatencyBuckets []LatencyBucketSummary + ErrorBuckets []ErrorBucketSummary +} + +// LatencyBucketSummary is a summary of a latency bucket. +type LatencyBucketSummary struct { + MinLatency, MaxLatency time.Duration + Size int +} + +// ErrorBucketSummary is a summary of an error bucket. +type ErrorBucketSummary struct { + ErrorCode int32 + Size int +} diff --git a/vendor/go.opencensus.io/metric/metricdata/doc.go b/vendor/go.opencensus.io/metric/metricdata/doc.go new file mode 100644 index 00000000000..52a7b3bf850 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricdata contains the metrics data model. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go new file mode 100644 index 00000000000..12695ce2dc7 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/exemplar.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Exemplars keys. +const ( + AttachmentKeySpanContext = "SpanContext" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Their purpose is to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go new file mode 100644 index 00000000000..aadae41e6a2 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/label.go @@ -0,0 +1,35 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// LabelKey represents key of a label. It has optional +// description attribute. +type LabelKey struct { + Key string + Description string +} + +// LabelValue represents the value of a label. +// The zero value represents a missing label value, which may be treated +// differently to an empty string value by some back ends. +type LabelValue struct { + Value string // string value of the label + Present bool // flag that indicated whether a value is present or not +} + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return LabelValue{Value: val, Present: true} +} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go new file mode 100644 index 00000000000..8293712c77f --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/metric.go @@ -0,0 +1,46 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/resource" +) + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []LabelKey // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go new file mode 100644 index 00000000000..7fe057b19cf --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/point.go @@ -0,0 +1,193 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of bucket upper bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go new file mode 100644 index 00000000000..c3f8ec27b53 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metricdata + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go new file mode 100644 index 00000000000..b483a1371b0 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/unit.go @@ -0,0 +1,27 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string + +// Predefined units. To record against a unit not represented here, create your +// own Unit type constant from a string. +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" +) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go new file mode 100644 index 00000000000..ca1f3904938 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/manager.go @@ -0,0 +1,78 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "sync" +) + +// Manager maintains a list of active producers. Producers can register +// with the manager to allow readers to read all metrics provided by them. +// Readers can retrieve all producers registered with the manager, +// read metrics from the producers and export them. +type Manager struct { + mu sync.RWMutex + producers map[Producer]struct{} +} + +var prodMgr *Manager +var once sync.Once + +// GlobalManager is a single instance of producer manager +// that is used by all producers and all readers. +func GlobalManager() *Manager { + once.Do(func() { + prodMgr = &Manager{} + prodMgr.producers = make(map[Producer]struct{}) + }) + return prodMgr +} + +// AddProducer adds the producer to the Manager if it is not already present. +func (pm *Manager) AddProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} +} + +// DeleteProducer deletes the producer from the Manager if it is present. +func (pm *Manager) DeleteProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) +} + +// GetAll returns a slice of all producer currently registered with +// the Manager. For each call it generates a new slice. The slice +// should not be cached as registration may change at any time. It is +// typically called periodically by exporter to read metrics from +// the producers. +func (pm *Manager) GetAll() []Producer { + pm.mu.Lock() + defer pm.mu.Unlock() + producers := make([]Producer, len(pm.producers)) + i := 0 + for producer := range pm.producers { + producers[i] = producer + i++ + } + return producers +} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go new file mode 100644 index 00000000000..6cee9ed1783 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/producer.go @@ -0,0 +1,28 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "go.opencensus.io/metric/metricdata" +) + +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*metricdata.Metric +} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go new file mode 100644 index 00000000000..11e31f421c5 --- /dev/null +++ b/vendor/go.opencensus.io/opencensus.go @@ -0,0 +1,21 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package opencensus contains Go support for OpenCensus. +package opencensus // import "go.opencensus.io" + +// Version is the current release version of OpenCensus in use. +func Version() string { + return "0.24.0" +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go new file mode 100644 index 00000000000..da815b2a734 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + "net/http/httptrace" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Transport is an http.RoundTripper that instruments all outgoing requests with +// OpenCensus stats and tracing. +// +// The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation, since the default +// for this may change. +type Transport struct { + // Base may be set to wrap another http.RoundTripper that does the actual + // requests. By default http.DefaultTransport is used. + // + // If base HTTP roundtripper implements CancelRequest, + // the returned round tripper will be cancelable. + Base http.RoundTripper + + // Propagation defines how traces are propagated. If unspecified, a default + // (currently B3 format) will be used. + Propagation propagation.HTTPFormat + + // StartOptions are applied to the span started by this Transport around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // NameFromRequest holds the function to use for generating the span name + // from the information found in the outgoing HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // NewClientTrace may be set to a function allowing the current *trace.Span + // to be annotated with HTTP request event information emitted by the + // httptrace package. + NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace + + // TODO: Implement tag propagation for HTTP. +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base() + if isHealthEndpoint(req.URL.Path) { + return rt.RoundTrip(req) + } + // TODO: remove excessive nesting of http.RoundTrippers here. + format := t.Propagation + if format == nil { + format = defaultFormat + } + spanNameFormatter := t.FormatSpanName + if spanNameFormatter == nil { + spanNameFormatter = spanNameFromURL + } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + + rt = &traceTransport{ + base: rt, + format: format, + startOptions: trace.StartOptions{ + Sampler: startOpts.Sampler, + SpanKind: trace.SpanKindClient, + }, + formatSpanName: spanNameFormatter, + newClientTrace: t.NewClientTrace, + } + rt = statsTransport{base: rt} + return rt.RoundTrip(req) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go new file mode 100644 index 00000000000..17142aabe00 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -0,0 +1,143 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. +type statsTransport struct { + base http.RoundTripper +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. +func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, _ := tag.New(req.Context(), + tag.Upsert(KeyClientHost, req.Host), + tag.Upsert(Host, req.Host), + tag.Upsert(KeyClientPath, req.URL.Path), + tag.Upsert(Path, req.URL.Path), + tag.Upsert(KeyClientMethod, req.Method), + tag.Upsert(Method, req.Method)) + req = req.WithContext(ctx) + track := &tracker{ + start: time.Now(), + ctx: ctx, + } + if req.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if req.ContentLength > 0 { + track.reqSize = req.ContentLength + } + stats.Record(ctx, ClientRequestCount.M(1)) + + // Perform request. + resp, err := t.base.RoundTrip(req) + + if err != nil { + track.statusCode = http.StatusInternalServerError + track.end() + } else { + track.statusCode = resp.StatusCode + if req.Method != "HEAD" { + track.respContentLength = resp.ContentLength + } + if resp.Body == nil { + track.end() + } else { + track.body = resp.Body + resp.Body = wrappedBody(track, resp.Body) + } + } + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t statsTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +type tracker struct { + ctx context.Context + respSize int64 + respContentLength int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once +} + +var _ io.ReadCloser = (*tracker)(nil) + +func (t *tracker) end() { + t.endOnce.Do(func() { + latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) + respSize := t.respSize + if t.respSize == 0 && t.respContentLength > 0 { + respSize = t.respContentLength + } + m := []stats.Measurement{ + ClientSentBytes.M(t.reqSize), + ClientReceivedBytes.M(respSize), + ClientRoundtripLatency.M(latencyMs), + ClientLatency.M(latencyMs), + ClientResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ClientRequestBytes.M(t.reqSize)) + } + + stats.RecordWithTags(t.ctx, []tag.Mutator{ + tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), + }, m...) + }) +} + +func (t *tracker) Read(b []byte) (int, error) { + n, err := t.body.Read(b) + t.respSize += int64(n) + switch err { + case nil: + return n, nil + case io.EOF: + t.end() + } + return n, err +} + +func (t *tracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + t.end() + return t.body.Close() +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go new file mode 100644 index 00000000000..10e626b16e6 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ochttp provides OpenCensus instrumentation for net/http package. +// +// For server instrumentation, see Handler. For client-side instrumentation, +// see Transport. +package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go new file mode 100644 index 00000000000..9ad8852198d --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -0,0 +1,123 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b3 contains a propagation.HTTPFormat implementation +// for B3 propagation. See https://github.com/openzipkin/b3-propagation +// for more details. +package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" + +import ( + "encoding/hex" + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// B3 headers that OpenCensus understands. +const ( + TraceIDHeader = "X-B3-TraceId" + SpanIDHeader = "X-B3-SpanId" + SampledHeader = "X-B3-Sampled" +) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers in B3 propagation format. +// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers +// because there are additional fields not represented in the +// OpenCensus span context. Spans created from the incoming +// header will be the direct children of the client-side span. +// Similarly, receiver of the outgoing spans should use client-side +// span created by OpenCensus as the parent. +type HTTPFormat struct{} + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// SpanContextFromRequest extracts a B3 span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) + return trace.SpanContext{ + TraceID: tid, + SpanID: sid, + TraceOptions: sampled, + }, true +} + +// ParseTraceID parses the value of the X-B3-TraceId header. +func ParseTraceID(tid string) (trace.TraceID, bool) { + if tid == "" { + return trace.TraceID{}, false + } + b, err := hex.DecodeString(tid) + if err != nil || len(b) > 16 { + return trace.TraceID{}, false + } + var traceID trace.TraceID + if len(b) <= 8 { + // The lower 64-bits. + start := 8 + (8 - len(b)) + copy(traceID[start:], b) + } else { + start := 16 - len(b) + copy(traceID[start:], b) + } + + return traceID, true +} + +// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. +func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { + if sid == "" { + return trace.SpanID{}, false + } + b, err := hex.DecodeString(sid) + if err != nil || len(b) > 8 { + return trace.SpanID{}, false + } + start := 8 - len(b) + copy(spanID[start:], b) + return spanID, true +} + +// ParseSampled parses the value of the X-B3-Sampled header. +func ParseSampled(sampled string) (trace.TraceOptions, bool) { + switch sampled { + case "true", "1": + return trace.TraceOptions(1), true + default: + return trace.TraceOptions(0), false + } +} + +// SpanContextToRequest modifies the given request to include B3 headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) + req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) + + var sampled string + if sc.IsSampled() { + sampled = "1" + } else { + sampled = "0" + } + req.Header.Set(SampledHeader, sampled) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go new file mode 100644 index 00000000000..5e6a3430760 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/route.go @@ -0,0 +1,61 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "net/http" + + "go.opencensus.io/tag" +) + +// SetRoute sets the http_server_route tag to the given value. +// It's useful when an HTTP framework does not support the http.Handler interface +// and using WithRouteTag is not an option, but provides a way to hook into the request flow. +func SetRoute(ctx context.Context, route string) { + if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) + } +} + +// WithRouteTag returns an http.Handler that records stats with the +// http_server_route tag set to the given value. +func WithRouteTag(handler http.Handler, route string) http.Handler { + return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { + addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} + ctx, _ := tag.New(r.Context(), addRoute...) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) + return addRoute + }) +} + +// taggedHandlerFunc is a http.Handler that returns tags describing the +// processing of the request. These tags will be recorded along with the +// measures in this package at the end of the request. +type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator + +func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { + tags := h(w, r) + if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tags...) + } +} + +type addedTagsKey struct{} + +type addedTags struct { + t []tag.Mutator +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go new file mode 100644 index 00000000000..f7c8434be06 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -0,0 +1,455 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Handler is an http.Handler wrapper to instrument your HTTP server with +// OpenCensus. It supports both stats and tracing. +// +// # Tracing +// +// This handler is aware of the incoming request's span, reading it from request +// headers as configured using the Propagation field. +// The extracted span can be accessed from the incoming request's +// context. +// +// span := trace.FromContext(r.Context()) +// +// The server span will be automatically ended at the end of ServeHTTP. +type Handler struct { + // Propagation defines how traces are propagated. If unspecified, + // B3 propagation will be used. + Propagation propagation.HTTPFormat + + // Handler is the handler used to handle the incoming request. + Handler http.Handler + + // StartOptions are applied to the span started by this Handler around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) + // servers. If true, any trace metadata set on the incoming request will + // be added as a linked trace instead of being added as a parent of the + // current trace. + IsPublicEndpoint bool + + // FormatSpanName holds the function to use for generating the span name + // from the information found in the incoming HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // IsHealthEndpoint holds the function to use for determining if the + // incoming HTTP request should be considered a health check. This is in + // addition to the private isHealthEndpoint func which may also indicate + // tracing should be skipped. + IsHealthEndpoint func(*http.Request) bool +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var tags addedTags + r, traceEnd := h.startTrace(w, r) + defer traceEnd() + w, statsEnd := h.startStats(w, r) + defer statsEnd(&tags) + handler := h.Handler + if handler == nil { + handler = http.DefaultServeMux + } + r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) + handler.ServeHTTP(w, r) +} + +func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { + return r, func() {} + } + var name string + if h.FormatSpanName == nil { + name = spanNameFromURL(r) + } else { + name = h.FormatSpanName(r) + } + ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + + var span *trace.Span + sc, ok := h.extractSpanContext(r) + if ok && !h.IsPublicEndpoint { + ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer)) + } else { + ctx, span = trace.StartSpan(ctx, name, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer), + ) + if ok { + span.AddLink(trace.Link{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Type: trace.LinkTypeParent, + Attributes: nil, + }) + } + } + span.AddAttributes(requestAttrs(r)...) + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + } else if r.ContentLength > 0 { + span.AddMessageReceiveEvent(0, /* TODO: messageID */ + r.ContentLength, -1) + } + return r.WithContext(ctx), span.End +} + +func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { + if h.Propagation == nil { + return defaultFormat.SpanContextFromRequest(r) + } + return h.Propagation.SpanContextFromRequest(r) +} + +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { + ctx, _ := tag.New(r.Context(), + tag.Upsert(Host, r.Host), + tag.Upsert(Path, r.URL.Path), + tag.Upsert(Method, r.Method)) + track := &trackingResponseWriter{ + start: time.Now(), + ctx: ctx, + writer: w, + } + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if r.ContentLength > 0 { + track.reqSize = r.ContentLength + } + stats.Record(ctx, ServerRequestCount.M(1)) + return track.wrappedResponseWriter(), track.end +} + +type trackingResponseWriter struct { + ctx context.Context + reqSize int64 + respSize int64 + start time.Time + statusCode int + statusLine string + endOnce sync.Once + writer http.ResponseWriter +} + +// Compile time assertion for ResponseWriter interface +var _ http.ResponseWriter = (*trackingResponseWriter)(nil) + +func (t *trackingResponseWriter) end(tags *addedTags) { + t.endOnce.Do(func() { + if t.statusCode == 0 { + t.statusCode = 200 + } + + span := trace.FromContext(t.ctx) + span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) + + m := []stats.Measurement{ + ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ServerResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ServerRequestBytes.M(t.reqSize)) + } + allTags := make([]tag.Mutator, len(tags.t)+1) + allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) + copy(allTags[1:], tags.t) + stats.RecordWithTags(t.ctx, allTags, m...) + }) +} + +func (t *trackingResponseWriter) Header() http.Header { + return t.writer.Header() +} + +func (t *trackingResponseWriter) Write(data []byte) (int, error) { + n, err := t.writer.Write(data) + t.respSize += int64(n) + // Add message event for request bytes sent. + span := trace.FromContext(t.ctx) + span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) + return n, err +} + +func (t *trackingResponseWriter) WriteHeader(statusCode int) { + t.writer.WriteHeader(statusCode) + t.statusCode = statusCode + t.statusLine = http.StatusText(t.statusCode) +} + +// wrappedResponseWriter returns a wrapped version of the original +// +// ResponseWriter and only implements the same combination of additional +// +// interfaces as the original. +// This implementation is based on https://github.com/felixge/httpsnoop. +func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = t.writer.(http.Hijacker) + cn, i1 = t.writer.(http.CloseNotifier) + pu, i2 = t.writer.(http.Pusher) + fl, i3 = t.writer.(http.Flusher) + rf, i4 = t.writer.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{t} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{t, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{t, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{t, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{t, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{t, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{t, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{t, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{t, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{t, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{t, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{t, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{t, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{t, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{t, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{t, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{t, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{t, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{t, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{t, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{t, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{t, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{t, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{t, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{t, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{t} + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go new file mode 100644 index 00000000000..05c6c56cc79 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go @@ -0,0 +1,169 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "strings" + + "go.opencensus.io/trace" +) + +type spanAnnotator struct { + sp *trace.Span +} + +// TODO: Remove NewSpanAnnotator at the next release. + +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +// Deprecated: Use NewSpanAnnotatingClientTrace instead +func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { + return NewSpanAnnotatingClientTrace(r, s) +} + +// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { + sa := spanAnnotator{sp: s} + + return &httptrace.ClientTrace{ + GetConn: sa.getConn, + GotConn: sa.gotConn, + PutIdleConn: sa.putIdleConn, + GotFirstResponseByte: sa.gotFirstResponseByte, + Got100Continue: sa.got100Continue, + DNSStart: sa.dnsStart, + DNSDone: sa.dnsDone, + ConnectStart: sa.connectStart, + ConnectDone: sa.connectDone, + TLSHandshakeStart: sa.tlsHandshakeStart, + TLSHandshakeDone: sa.tlsHandshakeDone, + WroteHeaders: sa.wroteHeaders, + Wait100Continue: sa.wait100Continue, + WroteRequest: sa.wroteRequest, + } +} + +func (s spanAnnotator) getConn(hostPort string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.get_connection.host_port", hostPort), + } + s.sp.Annotate(attrs, "GetConn") +} + +func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { + attrs := []trace.Attribute{ + trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), + trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, + trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) + } + s.sp.Annotate(attrs, "GotConn") +} + +// PutIdleConn implements a httptrace.ClientTrace hook +func (s spanAnnotator) putIdleConn(err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) + } + s.sp.Annotate(attrs, "PutIdleConn") +} + +func (s spanAnnotator) gotFirstResponseByte() { + s.sp.Annotate(nil, "GotFirstResponseByte") +} + +func (s spanAnnotator) got100Continue() { + s.sp.Annotate(nil, "Got100Continue") +} + +func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_start.host", info.Host), + } + s.sp.Annotate(attrs, "DNSStart") +} + +func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, addr := range info.Addrs { + addrs = append(addrs, addr.String()) + } + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), + } + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "DNSDone") +} + +func (s spanAnnotator) connectStart(network, addr string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_start.network", network), + trace.StringAttribute("httptrace.connect_start.addr", addr), + } + s.sp.Annotate(attrs, "ConnectStart") +} + +func (s spanAnnotator) connectDone(network, addr string, err error) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_done.network", network), + trace.StringAttribute("httptrace.connect_done.addr", addr), + } + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.connect_done.error", err.Error())) + } + s.sp.Annotate(attrs, "ConnectDone") +} + +func (s spanAnnotator) tlsHandshakeStart() { + s.sp.Annotate(nil, "TLSHandshakeStart") +} + +func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) + } + s.sp.Annotate(attrs, "TLSHandshakeDone") +} + +func (s spanAnnotator) wroteHeaders() { + s.sp.Annotate(nil, "WroteHeaders") +} + +func (s spanAnnotator) wait100Continue() { + s.sp.Annotate(nil, "Wait100Continue") +} + +func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { + var attrs []trace.Attribute + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "WroteRequest") +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go new file mode 100644 index 00000000000..ee3729040dd --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -0,0 +1,292 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// Deprecated: client HTTP measures. +var ( + // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. + ClientRequestCount = stats.Int64( + "opencensus.io/http/client/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + // Deprecated: Use ClientSentBytes. + ClientRequestBytes = stats.Int64( + "opencensus.io/http/client/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientReceivedBytes. + ClientResponseBytes = stats.Int64( + "opencensus.io/http/client/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientRoundtripLatency. + ClientLatency = stats.Float64( + "opencensus.io/http/client/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following client HTTP measures are supported for use in custom views. +var ( + ClientSentBytes = stats.Int64( + "opencensus.io/http/client/sent_bytes", + "Total bytes sent in request body (not including headers)", + stats.UnitBytes, + ) + ClientReceivedBytes = stats.Int64( + "opencensus.io/http/client/received_bytes", + "Total bytes received in response bodies (not including headers but including error responses with bodies)", + stats.UnitBytes, + ) + ClientRoundtripLatency = stats.Float64( + "opencensus.io/http/client/roundtrip_latency", + "Time between first byte of request headers sent to last byte of response received, or terminal error", + stats.UnitMilliseconds, + ) +) + +// The following server HTTP measures are supported for use in custom views: +var ( + ServerRequestCount = stats.Int64( + "opencensus.io/http/server/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + ServerRequestBytes = stats.Int64( + "opencensus.io/http/server/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + ServerResponseBytes = stats.Int64( + "opencensus.io/http/server/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + ServerLatency = stats.Float64( + "opencensus.io/http/server/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following tags are applied to stats recorded by this package. Host, Path +// and Method are applied to all measures. StatusCode is not applied to +// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. +var ( + // Host is the value of the HTTP Host header. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Host = tag.MustNewKey("http.host") + + // StatusCode is the numeric HTTP response status code, + // or "error" if a transport error occurred and no status code was read. + StatusCode = tag.MustNewKey("http.status") + + // Path is the URL path (not including query string) in the request. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Path = tag.MustNewKey("http.path") + + // Method is the HTTP method of the request, capitalized (GET, POST, etc.). + Method = tag.MustNewKey("http.method") + + // KeyServerRoute is a low cardinality string representing the logical + // handler of the request. This is usually the pattern registered on the a + // ServeMux (or similar string). + KeyServerRoute = tag.MustNewKey("http_server_route") +) + +// Client tag keys. +var ( + // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). + KeyClientMethod = tag.MustNewKey("http_client_method") + // KeyClientPath is the URL path (not including query string). + KeyClientPath = tag.MustNewKey("http_client_path") + // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. + KeyClientStatus = tag.MustNewKey("http_client_status") + // KeyClientHost is the value of the request Host header. + KeyClientHost = tag.MustNewKey("http_client_host") +) + +// Default distributions used by views in this package. +var ( + DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Package ochttp provides some convenience views for client measures. +// You still need to register these views for data to actually be collected. +var ( + ClientSentBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/sent_bytes", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientReceivedBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/received_bytes", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientRoundtripLatencyDistribution = &view.View{ + Name: "opencensus.io/http/client/roundtrip_latency", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + Description: "End-to-end latency, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientCompletedCount = &view.View{ + Name: "opencensus.io/http/client/completed_count", + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + Description: "Count of completed requests, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } +) + +// Deprecated: Old client Views. +var ( + // Deprecated: No direct replacement, but see ClientCompletedCount. + ClientRequestCountView = &view.View{ + Name: "opencensus.io/http/client/request_count", + Description: "Count of HTTP requests started", + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientSentBytesDistribution. + ClientRequestBytesView = &view.View{ + Name: "opencensus.io/http/client/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientReceivedBytesDistribution instead. + ClientResponseBytesView = &view.View{ + Name: "opencensus.io/http/client/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientRoundtripLatencyDistribution instead. + ClientLatencyView = &view.View{ + Name: "opencensus.io/http/client/latency", + Description: "Latency distribution of HTTP requests", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + } + + // Deprecated: Use ClientCompletedCount instead. + ClientRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/client/request_count_by_method", + Description: "Client request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ClientSentBytes, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientCompletedCount instead. + ClientResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/client/response_count_by_status_code", + Description: "Client response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + } +) + +// Package ochttp provides some convenience views for server measures. +// You still need to register these views for data to actually be collected. +var ( + ServerRequestCountView = &view.View{ + Name: "opencensus.io/http/server/request_count", + Description: "Count of HTTP requests started", + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerRequestBytesView = &view.View{ + Name: "opencensus.io/http/server/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ServerRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "opencensus.io/http/server/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ServerResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerLatencyView = &view.View{ + Name: "opencensus.io/http/server/latency", + Description: "Latency distribution of HTTP requests", + Measure: ServerLatency, + Aggregation: DefaultLatencyDistribution, + } + + ServerRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/server/request_count_by_method", + Description: "Server request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/server/response_count_by_status_code", + Description: "Server response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ServerLatency, + Aggregation: view.Count(), + } +) + +// DefaultClientViews are the default client views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultClientViews = []*view.View{ + ClientRequestCountView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientLatencyView, + ClientRequestCountByMethod, + ClientResponseCountByStatusCode, +} + +// DefaultServerViews are the default server views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultServerViews = []*view.View{ + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go new file mode 100644 index 00000000000..ed3a5db5611 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -0,0 +1,244 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" + "net/http" + "net/http/httptrace" + + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// TODO(jbd): Add godoc examples. + +var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} + +// Attributes recorded on the span for the requests. +// Only trace exporters will need them. +const ( + HostAttribute = "http.host" + MethodAttribute = "http.method" + PathAttribute = "http.path" + URLAttribute = "http.url" + UserAgentAttribute = "http.user_agent" + StatusCodeAttribute = "http.status_code" +) + +type traceTransport struct { + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat + formatSpanName func(*http.Request) string + newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace +} + +// TODO(jbd): Add message events for request and response size. + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { + name := t.formatSpanName(req) + // TODO(jbd): Discuss whether we want to prefix + // outgoing requests with Sent. + ctx, span := trace.StartSpan(req.Context(), name, + trace.WithSampler(t.startOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) + + if t.newClientTrace != nil { + req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) + } else { + req = req.WithContext(ctx) + } + + if t.format != nil { + // SpanContextToRequest will modify its Request argument, which is + // contrary to the contract for http.RoundTripper, so we need to + // pass it a copy of the Request. + // However, the Request struct itself was already copied by + // the WithContext calls above and so we just need to copy the header. + header := make(http.Header) + for k, v := range req.Header { + header[k] = v + } + req.Header = header + t.format.SpanContextToRequest(span.SpanContext(), req) + } + + span.AddAttributes(requestAttrs(req)...) + resp, err := t.base.RoundTrip(req) + if err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + span.End() + return resp, err + } + + span.AddAttributes(responseAttrs(resp)...) + span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) + + // span.End() will be invoked after + // a read from resp.Body returns io.EOF or when + // resp.Body.Close() is invoked. + bt := &bodyTracker{rc: resp.Body, span: span} + resp.Body = wrappedBody(bt, resp.Body) + return resp, err +} + +// bodyTracker wraps a response.Body and invokes +// trace.EndSpan on encountering io.EOF on reading +// the body of the original response. +type bodyTracker struct { + rc io.ReadCloser + span *trace.Span +} + +var _ io.ReadCloser = (*bodyTracker)(nil) + +func (bt *bodyTracker) Read(b []byte) (int, error) { + n, err := bt.rc.Read(b) + + switch err { + case nil: + return n, nil + case io.EOF: + bt.span.End() + default: + // For all other errors, set the span status + bt.span.SetStatus(trace.Status{ + // Code 2 is the error code for Internal server error. + Code: 2, + Message: err.Error(), + }) + } + return n, err +} + +func (bt *bodyTracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + bt.span.End() + return bt.rc.Close() +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *traceTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +func spanNameFromURL(req *http.Request) string { + return req.URL.Path +} + +func requestAttrs(r *http.Request) []trace.Attribute { + userAgent := r.UserAgent() + + attrs := make([]trace.Attribute, 0, 5) + attrs = append(attrs, + trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(URLAttribute, r.URL.String()), + trace.StringAttribute(HostAttribute, r.Host), + trace.StringAttribute(MethodAttribute, r.Method), + ) + + if userAgent != "" { + attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) + } + + return attrs +} + +func responseAttrs(resp *http.Response) []trace.Attribute { + return []trace.Attribute{ + trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), + } +} + +// TraceStatus is a utility to convert the HTTP status code to a trace.Status that +// represents the outcome as closely as possible. +func TraceStatus(httpStatusCode int, statusLine string) trace.Status { + var code int32 + if httpStatusCode < 200 || httpStatusCode >= 400 { + code = trace.StatusCodeUnknown + } + switch httpStatusCode { + case 499: + code = trace.StatusCodeCancelled + case http.StatusBadRequest: + code = trace.StatusCodeInvalidArgument + case http.StatusUnprocessableEntity: + code = trace.StatusCodeInvalidArgument + case http.StatusGatewayTimeout: + code = trace.StatusCodeDeadlineExceeded + case http.StatusNotFound: + code = trace.StatusCodeNotFound + case http.StatusForbidden: + code = trace.StatusCodePermissionDenied + case http.StatusUnauthorized: // 401 is actually unauthenticated. + code = trace.StatusCodeUnauthenticated + case http.StatusTooManyRequests: + code = trace.StatusCodeResourceExhausted + case http.StatusNotImplemented: + code = trace.StatusCodeUnimplemented + case http.StatusServiceUnavailable: + code = trace.StatusCodeUnavailable + case http.StatusOK: + code = trace.StatusCodeOK + case http.StatusConflict: + code = trace.StatusCodeAlreadyExists + } + + return trace.Status{Code: code, Message: codeToStr[code]} +} + +var codeToStr = map[int32]string{ + trace.StatusCodeOK: `OK`, + trace.StatusCodeCancelled: `CANCELLED`, + trace.StatusCodeUnknown: `UNKNOWN`, + trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, + trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, + trace.StatusCodeNotFound: `NOT_FOUND`, + trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, + trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, + trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, + trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, + trace.StatusCodeAborted: `ABORTED`, + trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, + trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, + trace.StatusCodeInternal: `INTERNAL`, + trace.StatusCodeUnavailable: `UNAVAILABLE`, + trace.StatusCodeDataLoss: `DATA_LOSS`, + trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, +} + +func isHealthEndpoint(path string) bool { + // Health checking is pretty frequent and + // traces collected for health endpoints + // can be extremely noisy and expensive. + // Disable canonical health checking endpoints + // like /healthz and /_ah/health for now. + if path == "/healthz" || path == "/_ah/health" { + return true + } + return false +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go new file mode 100644 index 00000000000..7d75cae2b18 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go @@ -0,0 +1,44 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" +) + +// wrappedBody returns a wrapped version of the original +// Body and only implements the same combination of additional +// interfaces as the original. +func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { + var ( + wr, i0 = body.(io.Writer) + ) + switch { + case !i0: + return struct { + io.ReadCloser + }{wrapper} + + case i0: + return struct { + io.ReadCloser + io.Writer + }{wrapper, wr} + default: + return struct { + io.ReadCloser + }{wrapper} + } +} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go new file mode 100644 index 00000000000..b1764e1d3b9 --- /dev/null +++ b/vendor/go.opencensus.io/resource/resource.go @@ -0,0 +1,164 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides functionality for resource, which capture +// identifying information about the entities for which signals are exported. +package resource + +import ( + "context" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +// Environment variables used by FromEnv to decode a resource. +const ( + EnvVarType = "OC_RESOURCE_TYPE" + EnvVarLabels = "OC_RESOURCE_LABELS" +) + +// Resource describes an entity about which identifying information and metadata is exposed. +// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. +type Resource struct { + Type string + Labels map[string]string +} + +// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. +func EncodeLabels(labels map[string]string) string { + sortedKeys := make([]string, 0, len(labels)) + for k := range labels { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + s := "" + for i, k := range sortedKeys { + if i > 0 { + s += "," + } + s += k + "=" + strconv.Quote(labels[k]) + } + return s +} + +var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) + +// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. +// A list of labels of the form `="",="",...` is accepted. +// Domain names and paths are accepted as label keys. +// Most users will want to use FromEnv instead. +func DecodeLabels(s string) (map[string]string, error) { + m := map[string]string{} + // Ensure a trailing comma, which allows us to keep the regex simpler + s = strings.TrimRight(strings.TrimSpace(s), ",") + "," + + for len(s) > 0 { + match := labelRegex.FindStringSubmatch(s) + if len(match) == 0 { + return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) + } + v := match[2] + if v == "" { + v = match[3] + } else { + var err error + if v, err = strconv.Unquote(v); err != nil { + return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) + } + } + m[match[1]] = v + + s = s[len(match[0]):] + } + return m, nil +} + +// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE +// and OC_RESOURCE_labelS environment variables. +func FromEnv(context.Context) (*Resource, error) { + res := &Resource{ + Type: strings.TrimSpace(os.Getenv(EnvVarType)), + } + labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) + if labels == "" { + return res, nil + } + var err error + if res.Labels, err = DecodeLabels(labels); err != nil { + return nil, err + } + return res, nil +} + +var _ Detector = FromEnv + +// merge resource information from b into a. In case of a collision, a takes precedence. +func merge(a, b *Resource) *Resource { + if a == nil { + return b + } + if b == nil { + return a + } + res := &Resource{ + Type: a.Type, + Labels: map[string]string{}, + } + if res.Type == "" { + res.Type = b.Type + } + for k, v := range b.Labels { + res.Labels[k] = v + } + // Labels from resource a overwrite labels from resource b. + for k, v := range a.Labels { + res.Labels[k] = v + } + return res +} + +// Detector attempts to detect resource information. +// If the detector cannot find resource information, the returned resource is nil but no +// error is returned. +// An error is only returned on unexpected failures. +type Detector func(context.Context) (*Resource, error) + +// MultiDetector returns a Detector that calls all input detectors in order and +// merges each result with the previous one. In case a type of label key is already set, +// the first set value is takes precedence. +// It returns on the first error that a sub-detector encounters. +func MultiDetector(detectors ...Detector) Detector { + return func(ctx context.Context) (*Resource, error) { + return detectAll(ctx, detectors...) + } +} + +// detectall calls all input detectors sequentially an merges each result with the previous one. +// It returns on the first error that a sub-detector encounters. +func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { + var res *Resource + for _, d := range detectors { + r, err := d(ctx) + if err != nil { + return nil, err + } + res = merge(res, r) + } + return res, nil +} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go new file mode 100644 index 00000000000..31477a464fd --- /dev/null +++ b/vendor/go.opencensus.io/stats/doc.go @@ -0,0 +1,68 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package stats contains support for OpenCensus stats recording. + +OpenCensus allows users to create typed measures, record measurements, +aggregate the collected data, and export the aggregated data. + +# Measures + +A measure represents a type of data point to be tracked and recorded. +For example, latency, request Mb/s, and response Mb/s are measures +to collect from a server. + +Measure constructors such as Int64 and Float64 automatically +register the measure by the given name. Each registered measure needs +to be unique by name. Measures also have a description and a unit. + +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. + +# Recording measurements + +Measurement is a data point to be collected for a measure. For example, +for a latency (ms) measure, 100 is a measurement that represents a 100ms +latency event. Measurements are created from measures with +the current context. Tags from the current context are recorded with the +measurements if they are any. + +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable +recording to reduce cost. Recording of measurements is cheap. + +Libraries can always record measurements, and applications can later decide +on which measurements they want to collect by registering views. This allows +libraries to turn on the instrumentation by default. + +# Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. +*/ +package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go new file mode 100644 index 00000000000..436dc791f83 --- /dev/null +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -0,0 +1,31 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "go.opencensus.io/tag" +) + +// DefaultRecorder will be called for each Record call. +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) + +// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but +// avoids interface{} conversion. +// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, +// but is interface{} here to avoid import loops +var MeasurementRecorder interface{} + +// SubscriptionReporter reports when a view subscribed with a measure. +var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go new file mode 100644 index 00000000000..1ffd3cefc73 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure.go @@ -0,0 +1,109 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "sync" + "sync/atomic" +) + +// Measure represents a single numeric value to be tracked and recorded. +// For example, latency, request bytes, and response bytes could be measures +// to collect from a server. +// +// Measures by themselves have no outside effects. In order to be exported, +// the measure needs to be used in a View. If no Views are defined over a +// measure, there is very little cost in recording it. +type Measure interface { + // Name returns the name of this measure. + // + // Measure names are globally unique (among all libraries linked into your program). + // We recommend prefixing the measure name with a domain name relevant to your + // project or application. + // + // Measure names are never sent over the wire or exported to backends. + // They are only used to create Views. + Name() string + + // Description returns the human-readable description of this measure. + Description() string + + // Unit returns the units for the values this measure takes on. + // + // Units are encoded according to the case-sensitive abbreviations from the + // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html + Unit() string +} + +// measureDescriptor is the untyped descriptor associated with each measure. +// Int64Measure and Float64Measure wrap measureDescriptor to provide typed +// recording APIs. +// Two Measures with the same name will have the same measureDescriptor. +type measureDescriptor struct { + subs int32 // access atomically + + name string + description string + unit string +} + +func (m *measureDescriptor) subscribe() { + atomic.StoreInt32(&m.subs, 1) +} + +func (m *measureDescriptor) subscribed() bool { + return atomic.LoadInt32(&m.subs) == 1 +} + +var ( + mu sync.RWMutex + measures = make(map[string]*measureDescriptor) +) + +func registerMeasureHandle(name, desc, unit string) *measureDescriptor { + mu.Lock() + defer mu.Unlock() + + if stored, ok := measures[name]; ok { + return stored + } + m := &measureDescriptor{ + name: name, + description: desc, + unit: unit, + } + measures[name] = m + return m +} + +// Measurement is the numeric value measured when recording stats. Each measure +// provides methods to create measurements of their kind. For example, Int64Measure +// provides M to convert an int64 into a measurement. +type Measurement struct { + v float64 + m Measure + desc *measureDescriptor +} + +// Value returns the value of the Measurement as a float64. +func (m Measurement) Value() float64 { + return m.v +} + +// Measure returns the Measure from which this Measurement was created. +func (m Measurement) Measure() Measure { + return m.m +} diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go new file mode 100644 index 00000000000..f02c1eda845 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Float64Measure is a measure for float64 values. +type Float64Measure struct { + desc *measureDescriptor +} + +// M creates a new float64 measurement. +// Use Record to record measurements. +func (m *Float64Measure) M(v float64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: v, + } +} + +// Float64 creates a new measure for float64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Float64(name, description, unit string) *Float64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Float64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Float64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Float64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Float64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go new file mode 100644 index 00000000000..d101d797358 --- /dev/null +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -0,0 +1,55 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Int64Measure is a measure for int64 values. +type Int64Measure struct { + desc *measureDescriptor +} + +// M creates a new int64 measurement. +// Use Record to record measurements. +func (m *Int64Measure) M(v int64) Measurement { + return Measurement{ + m: m, + desc: m.desc, + v: float64(v), + } +} + +// Int64 creates a new measure for int64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. +func Int64(name, description, unit string) *Int64Measure { + mi := registerMeasureHandle(name, description, unit) + return &Int64Measure{mi} +} + +// Name returns the name of the measure. +func (m *Int64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Int64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Int64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go new file mode 100644 index 00000000000..8b5b99803ce --- /dev/null +++ b/vendor/go.opencensus.io/stats/record.go @@ -0,0 +1,156 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +import ( + "context" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + internal.SubscriptionReporter = func(measure string) { + mu.Lock() + measures[measure].subscribe() + mu.Unlock() + } +} + +// Recorder provides an interface for exporting measurement information from +// the static Record method by using the WithRecorder option. +type Recorder interface { + // Record records a set of measurements associated with the given tags and attachments. + // The second argument is a `[]Measurement`. + Record(*tag.Map, interface{}, map[string]interface{}) +} + +type recordOptions struct { + attachments metricdata.Attachments + mutators []tag.Mutator + measurements []Measurement + recorder Recorder +} + +// WithAttachments applies provided exemplar attachments. +func WithAttachments(attachments metricdata.Attachments) Options { + return func(ro *recordOptions) { + ro.attachments = attachments + } +} + +// WithTags applies provided tag mutators. +func WithTags(mutators ...tag.Mutator) Options { + return func(ro *recordOptions) { + ro.mutators = mutators + } +} + +// WithMeasurements applies provided measurements. +func WithMeasurements(measurements ...Measurement) Options { + return func(ro *recordOptions) { + ro.measurements = measurements + } +} + +// WithRecorder records the measurements to the specified `Recorder`, rather +// than to the global metrics recorder. +func WithRecorder(meter Recorder) Options { + return func(ro *recordOptions) { + ro.recorder = meter + } +} + +// Options apply changes to recordOptions. +type Options func(*recordOptions) + +func createRecordOption(ros ...Options) *recordOptions { + o := &recordOptions{} + for _, ro := range ros { + ro(o) + } + return o +} + +type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) + +// Record records one or multiple measurements with the same context at once. +// If there are any tags in the context, measurements will be tagged with them. +func Record(ctx context.Context, ms ...Measurement) { + // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality + // (RecordOptions) we can reduce some allocations to speed up this hot path + if len(ms) == 0 { + return + } + recorder := internal.MeasurementRecorder.(measurementRecorder) + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, nil) + return +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) +} + +// RecordWithOptions records measurements from the given options (if any) against context +// and tags and attachments in the options (if any). +// If there are any tags in the context, measurements will be tagged with them. +func RecordWithOptions(ctx context.Context, ros ...Options) error { + o := createRecordOption(ros...) + if len(o.measurements) == 0 { + return nil + } + recorder := internal.DefaultRecorder + if o.recorder != nil { + recorder = o.recorder.Record + } + if recorder == nil { + return nil + } + record := false + for _, m := range o.measurements { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return nil + } + if len(o.mutators) > 0 { + var err error + if ctx, err = tag.New(ctx, o.mutators...); err != nil { + return err + } + } + recorder(tag.FromContext(ctx), o.measurements, o.attachments) + return nil +} diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go new file mode 100644 index 00000000000..736399652cc --- /dev/null +++ b/vendor/go.opencensus.io/stats/units.go @@ -0,0 +1,26 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package stats + +// Units are encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +const ( + UnitNone = "1" // Deprecated: Use UnitDimensionless. + UnitDimensionless = "1" + UnitBytes = "By" + UnitMilliseconds = "ms" + UnitSeconds = "s" +) diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go new file mode 100644 index 00000000000..61f72d20da3 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -0,0 +1,123 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import "time" + +// AggType represents the type of aggregation function used on a View. +type AggType int + +// All available aggregation types. +const ( + AggTypeNone AggType = iota // no aggregation; reserved for future use. + AggTypeCount // the count aggregation, see Count. + AggTypeSum // the sum aggregation, see Sum. + AggTypeDistribution // the distribution aggregation, see Distribution. + AggTypeLastValue // the last value aggregation, see LastValue. +) + +func (t AggType) String() string { + return aggTypeName[t] +} + +var aggTypeName = map[AggType]string{ + AggTypeNone: "None", + AggTypeCount: "Count", + AggTypeSum: "Sum", + AggTypeDistribution: "Distribution", + AggTypeLastValue: "LastValue", +} + +// Aggregation represents a data aggregation method. Use one of the functions: +// Count, Sum, or Distribution to construct an Aggregation. +type Aggregation struct { + Type AggType // Type is the AggType of this Aggregation. + Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. + + newData func(time.Time) AggregationData +} + +var ( + aggCount = &Aggregation{ + Type: AggTypeCount, + newData: func(t time.Time) AggregationData { + return &CountData{Start: t} + }, + } + aggSum = &Aggregation{ + Type: AggTypeSum, + newData: func(t time.Time) AggregationData { + return &SumData{Start: t} + }, + } +) + +// Count indicates that data collected and aggregated +// with this method will be turned into a count value. +// For example, total number of accepted requests can be +// aggregated by using Count. +func Count() *Aggregation { + return aggCount +} + +// Sum indicates that data collected and aggregated +// with this method will be summed up. +// For example, accumulated request bytes can be aggregated by using +// Sum. +func Sum() *Aggregation { + return aggSum +} + +// Distribution indicates that the desired aggregation is +// a histogram distribution. +// +// A distribution aggregation may contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described +// by the bounds. This defines len(bounds)+1 buckets. +// +// If len(bounds) >= 2 then the boundaries for bucket index i are: +// +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length +// +// If len(bounds) is 0 then there is no histogram associated with the +// distribution. There will be a single bucket with boundaries +// (-infinity, +infinity). +// +// If len(bounds) is 1 then there is no finite buckets, and that single +// element is the common boundary of the overflow and underflow buckets. +func Distribution(bounds ...float64) *Aggregation { + agg := &Aggregation{ + Type: AggTypeDistribution, + Buckets: bounds, + } + agg.newData = func(t time.Time) AggregationData { + return newDistributionData(agg, t) + } + return agg +} + +// LastValue only reports the last value recorded using this +// aggregation. All other measurements will be dropped. +func LastValue() *Aggregation { + return &Aggregation{ + Type: AggTypeLastValue, + newData: func(_ time.Time) AggregationData { + return &LastValueData{} + }, + } +} diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go new file mode 100644 index 00000000000..d93b520662d --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -0,0 +1,336 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "math" + "time" + + "go.opencensus.io/metric/metricdata" +) + +// AggregationData represents an aggregated value from a collection. +// They are reported on the view data during exporting. +// Mosts users won't directly access aggregration data. +type AggregationData interface { + isAggregationData() bool + addSample(v float64, attachments map[string]interface{}, t time.Time) + clone() AggregationData + equal(other AggregationData) bool + toPoint(t metricdata.Type, time time.Time) metricdata.Point + StartTime() time.Time +} + +const epsilon = 1e-9 + +// CountData is the aggregated data for the Count aggregation. +// A count aggregation processes data and counts the recordings. +// +// Most users won't directly access count data. +type CountData struct { + Start time.Time + Value int64 +} + +func (a *CountData) isAggregationData() bool { return true } + +func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { + a.Value = a.Value + 1 +} + +func (a *CountData) clone() AggregationData { + return &CountData{Value: a.Value, Start: a.Start} +} + +func (a *CountData) equal(other AggregationData) bool { + a2, ok := other.(*CountData) + if !ok { + return false + } + + return a.Start.Equal(a2.Start) && a.Value == a2.Value +} + +func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// StartTime returns the start time of the data being aggregated by CountData. +func (a *CountData) StartTime() time.Time { + return a.Start +} + +// SumData is the aggregated data for the Sum aggregation. +// A sum aggregation processes data and sums up the recordings. +// +// Most users won't directly access sum data. +type SumData struct { + Start time.Time + Value float64 +} + +func (a *SumData) isAggregationData() bool { return true } + +func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + a.Value += v +} + +func (a *SumData) clone() AggregationData { + return &SumData{Value: a.Value, Start: a.Start} +} + +func (a *SumData) equal(other AggregationData) bool { + a2, ok := other.(*SumData) + if !ok { + return false + } + return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon +} + +func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, int64(a.Value)) + case metricdata.TypeCumulativeFloat64: + return metricdata.NewFloat64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// StartTime returns the start time of the data being aggregated by SumData. +func (a *SumData) StartTime() time.Time { + return a.Start +} + +// DistributionData is the aggregated data for the +// Distribution aggregation. +// +// Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. +type DistributionData struct { + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*metricdata.Exemplar + bounds []float64 // histogram distribution of the values + Start time.Time +} + +func newDistributionData(agg *Aggregation, t time.Time) *DistributionData { + bucketCount := len(agg.Buckets) + 1 + return &DistributionData{ + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), + bounds: agg.Buckets, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, + Start: t, + } +} + +// Sum returns the sum of all samples collected. +func (a *DistributionData) Sum() float64 { return a.Mean * float64(a.Count) } + +func (a *DistributionData) variance() float64 { + if a.Count <= 1 { + return 0 + } + return a.SumOfSquaredDev / float64(a.Count-1) +} + +func (a *DistributionData) isAggregationData() bool { return true } + +// TODO(songy23): support exemplar attachments. +func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { + if v < a.Min { + a.Min = v + } + if v > a.Max { + a.Max = v + } + a.Count++ + a.addToBucket(v, attachments, t) + + if a.Count == 1 { + a.Mean = v + return + } + + oldMean := a.Mean + a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) +} + +func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { + var count *int64 + var i int + var b float64 + for i, b = range a.bounds { + if v < b { + count = &a.CountPerBucket[i] + break + } + } + if count == nil { // Last bucket. + i = len(a.bounds) + count = &a.CountPerBucket[i] + } + *count++ + if exemplar := getExemplar(v, attachments, t); exemplar != nil { + a.ExemplarsPerBucket[i] = exemplar + } +} + +func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { + if len(attachments) == 0 { + return nil + } + return &metricdata.Exemplar{ + Value: v, + Timestamp: t, + Attachments: attachments, + } +} + +func (a *DistributionData) clone() AggregationData { + c := *a + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) + return &c +} + +func (a *DistributionData) equal(other AggregationData) bool { + a2, ok := other.(*DistributionData) + if !ok { + return false + } + if a2 == nil { + return false + } + if len(a.CountPerBucket) != len(a2.CountPerBucket) { + return false + } + for i := range a.CountPerBucket { + if a.CountPerBucket[i] != a2.CountPerBucket[i] { + return false + } + } + return a.Start.Equal(a2.Start) && + a.Count == a2.Count && + a.Min == a2.Min && + a.Max == a2.Max && + math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon +} + +func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeDistribution: + buckets := []metricdata.Bucket{} + for i := 0; i < len(a.CountPerBucket); i++ { + buckets = append(buckets, metricdata.Bucket{ + Count: a.CountPerBucket[i], + Exemplar: a.ExemplarsPerBucket[i], + }) + } + bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} + + val := &metricdata.Distribution{ + Count: a.Count, + Sum: a.Sum(), + SumOfSquaredDeviation: a.SumOfSquaredDev, + BucketOptions: bucketOptions, + Buckets: buckets, + } + return metricdata.NewDistributionPoint(t, val) + + default: + // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. + panic("unsupported metricdata.Type") + } +} + +// StartTime returns the start time of the data being aggregated by DistributionData. +func (a *DistributionData) StartTime() time.Time { + return a.Start +} + +// LastValueData returns the last value recorded for LastValue aggregation. +type LastValueData struct { + Value float64 +} + +func (l *LastValueData) isAggregationData() bool { + return true +} + +func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + l.Value = v +} + +func (l *LastValueData) clone() AggregationData { + return &LastValueData{l.Value} +} + +func (l *LastValueData) equal(other AggregationData) bool { + a2, ok := other.(*LastValueData) + if !ok { + return false + } + return l.Value == a2.Value +} + +func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeGaugeInt64: + return metricdata.NewInt64Point(t, int64(l.Value)) + case metricdata.TypeGaugeFloat64: + return metricdata.NewFloat64Point(t, l.Value) + default: + panic("unsupported metricdata.Type") + } +} + +// StartTime returns an empty time value as start time is not recorded when using last value +// aggregation. +func (l *LastValueData) StartTime() time.Time { + return time.Time{} +} + +// ClearStart clears the Start field from data if present. Useful for testing in cases where the +// start time will be nondeterministic. +func ClearStart(data AggregationData) { + switch data := data.(type) { + case *CountData: + data.Start = time.Time{} + case *SumData: + data.Start = time.Time{} + case *DistributionData: + data.Start = time.Time{} + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go new file mode 100644 index 00000000000..bcd6e08c748 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -0,0 +1,93 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "sort" + "time" + + "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/tag" +) + +type collector struct { + // signatures holds the aggregations values for each unique tag signature + // (values for all keys) to its aggregator. + signatures map[string]AggregationData + // Aggregation is the description of the aggregation to perform for this + // view. + a *Aggregation +} + +func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { + aggregator, ok := c.signatures[s] + if !ok { + aggregator = c.a.newData(t) + c.signatures[s] = aggregator + } + aggregator.addSample(v, attachments, t) +} + +// collectRows returns a snapshot of the collected Row values. +func (c *collector) collectedRows(keys []tag.Key) []*Row { + rows := make([]*Row, 0, len(c.signatures)) + for sig, aggregator := range c.signatures { + tags := decodeTags([]byte(sig), keys) + row := &Row{Tags: tags, Data: aggregator.clone()} + rows = append(rows, row) + } + return rows +} + +func (c *collector) clearRows() { + c.signatures = make(map[string]AggregationData) +} + +// encodeWithKeys encodes the map by using values +// only associated with the keys provided. +func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + // Compute the buffer length we will need ahead of time to avoid resizing later + reqLen := 0 + for _, k := range keys { + s, _ := m.Value(k) + // We will store each key + its length + reqLen += len(s) + 1 + } + vb := &tagencoding.Values{ + Buffer: make([]byte, reqLen), + } + for _, k := range keys { + v, _ := m.Value(k) + vb.WriteValue([]byte(v)) + } + return vb.Bytes() +} + +// decodeTags decodes tags from the buffer and +// orders them by the keys. +func decodeTags(buf []byte, keys []tag.Key) []tag.Tag { + vb := &tagencoding.Values{Buffer: buf} + var tags []tag.Tag + for _, k := range keys { + v := vb.ReadValue() + if v != nil { + tags = append(tags, tag.Tag{Key: k, Value: string(v)}) + } + } + vb.ReadIndex = 0 + sort.Slice(tags, func(i, j int) bool { return tags[i].Key.Name() < tags[j].Key.Name() }) + return tags +} diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go new file mode 100644 index 00000000000..60bf0e39254 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -0,0 +1,47 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package view contains support for collecting and exposing aggregates over stats. +// +// In order to collect measurements, views need to be defined and registered. +// A view allows recorded measurements to be filtered and aggregated. +// +// All recorded measurements can be grouped by a list of tags. +// +// OpenCensus provides several aggregation methods: Count, Distribution and Sum. +// +// Count only counts the number of measurement points recorded. +// Distribution provides statistical summary of the aggregated data by counting +// how many recorded measurements fall into each bucket. +// Sum adds up the measurement values. +// LastValue just keeps track of the most recently recorded measurement value. +// All aggregations are cumulative. +// +// Views can be registered and unregistered at any time during program execution. +// +// Libraries can define views but it is recommended that in most cases registering +// views be left up to applications. +// +// # Exporting +// +// Collected and aggregated data can be exported to a metric collection +// backend by registering its exporter. +// +// Multiple exporters can be registered to upload the data to various +// different back ends. +package view // import "go.opencensus.io/stats/view" + +// TODO(acetechnologist): Add a link to the language independent OpenCensus +// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go new file mode 100644 index 00000000000..73ba11f5b6e --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -0,0 +1,45 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package view + +// Exporter exports the collected records as view data. +// +// The ExportView method should return quickly; if an +// Exporter takes a significant amount of time to +// process a Data, that work should be done on another goroutine. +// +// It is safe to assume that ExportView will not be called concurrently from +// multiple goroutines. +// +// The Data should not be modified. +type Exporter interface { + ExportView(viewData *Data) +} + +// RegisterExporter registers an exporter. +// Collected data will be reported via all the +// registered exporters. Once you no longer +// want data to be exported, invoke UnregisterExporter +// with the previously registered exporter. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + defaultWorker.RegisterExporter(e) +} + +// UnregisterExporter unregisters an exporter. +func UnregisterExporter(e Exporter) { + defaultWorker.UnregisterExporter(e) +} diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go new file mode 100644 index 00000000000..293b54ecbed --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -0,0 +1,221 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "sync/atomic" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// View allows users to aggregate the recorded stats.Measurements. +// Views need to be passed to the Register function before data will be +// collected and sent to Exporters. +type View struct { + Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. + Description string // Description is a human-readable description for this view. + + // TagKeys are the tag keys describing the grouping of this view. + // A single Row will be produced for each combination of associated tag values. + TagKeys []tag.Key + + // Measure is a stats.Measure to aggregate in this view. + Measure stats.Measure + + // Aggregation is the aggregation function to apply to the set of Measurements. + Aggregation *Aggregation +} + +// WithName returns a copy of the View with a new name. This is useful for +// renaming views to cope with limitations placed on metric names by various +// backends. +func (v *View) WithName(name string) *View { + vNew := *v + vNew.Name = name + return &vNew +} + +// same compares two views and returns true if they represent the same aggregation. +func (v *View) same(other *View) bool { + if v == other { + return true + } + if v == nil { + return false + } + return reflect.DeepEqual(v.Aggregation, other.Aggregation) && + v.Measure.Name() == other.Measure.Name() +} + +// ErrNegativeBucketBounds error returned if histogram contains negative bounds. +// +// Deprecated: this should not be public. +var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") + +// canonicalize canonicalizes v by setting explicit +// defaults for Name and Description and sorting the TagKeys +func (v *View) canonicalize() error { + if v.Measure == nil { + return fmt.Errorf("cannot register view %q: measure not set", v.Name) + } + if v.Aggregation == nil { + return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) + } + if v.Name == "" { + v.Name = v.Measure.Name() + } + if v.Description == "" { + v.Description = v.Measure.Description() + } + if err := checkViewName(v.Name); err != nil { + return err + } + sort.Slice(v.TagKeys, func(i, j int) bool { + return v.TagKeys[i].Name() < v.TagKeys[j].Name() + }) + sort.Float64s(v.Aggregation.Buckets) + for _, b := range v.Aggregation.Buckets { + if b < 0 { + return ErrNegativeBucketBounds + } + } + // drop 0 bucket silently. + v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) + + return nil +} + +func dropZeroBounds(bounds ...float64) []float64 { + for i, bound := range bounds { + if bound > 0 { + return bounds[i:] + } + } + return []float64{} +} + +// viewInternal is the internal representation of a View. +type viewInternal struct { + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector + metricDescriptor *metricdata.Descriptor +} + +func newViewInternal(v *View) (*viewInternal, error) { + return &viewInternal{ + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + metricDescriptor: viewToMetricDescriptor(v), + }, nil +} + +func (v *viewInternal) subscribe() { + atomic.StoreUint32(&v.subscribed, 1) +} + +func (v *viewInternal) unsubscribe() { + atomic.StoreUint32(&v.subscribed, 0) +} + +// isSubscribed returns true if the view is exporting +// data by subscription. +func (v *viewInternal) isSubscribed() bool { + return atomic.LoadUint32(&v.subscribed) == 1 +} + +func (v *viewInternal) clearRows() { + v.collector.clearRows() +} + +func (v *viewInternal) collectedRows() []*Row { + return v.collector.collectedRows(v.view.TagKeys) +} + +func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { + if !v.isSubscribed() { + return + } + sig := string(encodeWithKeys(m, v.view.TagKeys)) + v.collector.addSample(sig, val, attachments, t) +} + +// A Data is a set of rows about usage of the single measure associated +// with the given view. Each row is specific to a unique set of tags. +type Data struct { + View *View + Start, End time.Time + Rows []*Row +} + +// Row is the collected value for a specific set of key value pairs a.k.a tags. +type Row struct { + Tags []tag.Tag + Data AggregationData +} + +func (r *Row) String() string { + var buffer bytes.Buffer + buffer.WriteString("{ ") + buffer.WriteString("{ ") + for _, t := range r.Tags { + buffer.WriteString(fmt.Sprintf("{%v %v}", t.Key.Name(), t.Value)) + } + buffer.WriteString(" }") + buffer.WriteString(fmt.Sprintf("%v", r.Data)) + buffer.WriteString(" }") + return buffer.String() +} + +// Equal returns true if both rows are equal. Tags are expected to be ordered +// by the key name. Even if both rows have the same tags but the tags appear in +// different orders it will return false. +func (r *Row) Equal(other *Row) bool { + if r == other { + return true + } + return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) +} + +const maxNameLength = 255 + +// Returns true if the given string contains only printable characters. +func isPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} + +func checkViewName(name string) error { + if len(name) > maxNameLength { + return fmt.Errorf("view name cannot be larger than %v", maxNameLength) + } + if !isPrintable(name) { + return fmt.Errorf("view name needs to be an ASCII string") + } + return nil +} diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go new file mode 100644 index 00000000000..57d615ec7e1 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -0,0 +1,147 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "time" + + "go.opencensus.io/resource" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" +) + +func getUnit(unit string) metricdata.Unit { + switch unit { + case "1": + return metricdata.UnitDimensionless + case "ms": + return metricdata.UnitMilliseconds + case "By": + return metricdata.UnitBytes + } + return metricdata.UnitDimensionless +} + +func getType(v *View) metricdata.Type { + m := v.Measure + agg := v.Aggregation + + switch agg.Type { + case AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeDistribution: + return metricdata.TypeCumulativeDistribution + case AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeGaugeInt64 + case *stats.Float64Measure: + return metricdata.TypeGaugeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeCount: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeInt64 + default: + panic("unexpected measure type") + } + default: + panic("unexpected aggregation type") + } +} + +func getLabelKeys(v *View) []metricdata.LabelKey { + labelKeys := []metricdata.LabelKey{} + for _, k := range v.TagKeys { + labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) + } + return labelKeys +} + +func viewToMetricDescriptor(v *View) *metricdata.Descriptor { + return &metricdata.Descriptor{ + Name: v.Name, + Description: v.Description, + Unit: convertUnit(v), + Type: getType(v), + LabelKeys: getLabelKeys(v), + } +} + +func convertUnit(v *View) metricdata.Unit { + switch v.Aggregation.Type { + case AggTypeCount: + return metricdata.UnitDimensionless + default: + return getUnit(v.Measure.Unit()) + } +} + +func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { + labelValues := []metricdata.LabelValue{} + tagMap := make(map[string]string) + for _, tag := range row.Tags { + tagMap[tag.Key.Name()] = tag.Value + } + + for _, key := range expectedKeys { + if val, ok := tagMap[key.Key]; ok { + labelValues = append(labelValues, metricdata.NewLabelValue(val)) + } else { + labelValues = append(labelValues, metricdata.LabelValue{}) + } + } + return labelValues +} + +func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries { + return &metricdata.TimeSeries{ + Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, + LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), + StartTime: row.Data.StartTime(), + } +} + +func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric { + rows := v.collectedRows() + if len(rows) == 0 { + return nil + } + + ts := []*metricdata.TimeSeries{} + for _, row := range rows { + ts = append(ts, rowToTimeseries(v, row, now)) + } + + m := &metricdata.Metric{ + Descriptor: *v.metricDescriptor, + TimeSeries: ts, + Resource: r, + } + return m +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go new file mode 100644 index 00000000000..6a79cd8a34c --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -0,0 +1,424 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "fmt" + "sync" + "time" + + "go.opencensus.io/resource" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +func init() { + defaultWorker = NewMeter().(*worker) + go defaultWorker.start() + internal.DefaultRecorder = record + internal.MeasurementRecorder = recordMeasurement +} + +type measureRef struct { + measure string + views map[*viewInternal]struct{} +} + +type worker struct { + measures map[string]*measureRef + views map[string]*viewInternal + viewStartTimes map[*viewInternal]time.Time + + timer *time.Ticker + c chan command + quit, done chan bool + mu sync.RWMutex + r *resource.Resource + + exportersMu sync.RWMutex + exporters map[Exporter]struct{} +} + +// Meter defines an interface which allows a single process to maintain +// multiple sets of metrics exports (intended for the advanced case where a +// single process wants to report metrics about multiple objects, such as +// multiple databases or HTTP services). +// +// Note that this is an advanced use case, and the static functions in this +// module should cover the common use cases. +type Meter interface { + stats.Recorder + // Find returns a registered view associated with this name. + // If no registered view is found, nil is returned. + Find(name string) *View + // Register begins collecting data for the given views. + // Once a view is registered, it reports data to the registered exporters. + Register(views ...*View) error + // Unregister the given views. Data will not longer be exported for these views + // after Unregister returns. + // It is not necessary to unregister from views you expect to collect for the + // duration of your program execution. + Unregister(views ...*View) + // SetReportingPeriod sets the interval between reporting aggregated views in + // the program. If duration is less than or equal to zero, it enables the + // default behavior. + // + // Note: each exporter makes different promises about what the lowest supported + // duration is. For example, the Stackdriver exporter recommends a value no + // lower than 1 minute. Consult each exporter per your needs. + SetReportingPeriod(time.Duration) + + // RegisterExporter registers an exporter. + // Collected data will be reported via all the + // registered exporters. Once you no longer + // want data to be exported, invoke UnregisterExporter + // with the previously registered exporter. + // + // Binaries can register exporters, libraries shouldn't register exporters. + RegisterExporter(Exporter) + // UnregisterExporter unregisters an exporter. + UnregisterExporter(Exporter) + // SetResource may be used to set the Resource associated with this registry. + // This is intended to be used in cases where a single process exports metrics + // for multiple Resources, typically in a multi-tenant situation. + SetResource(*resource.Resource) + + // Start causes the Meter to start processing Record calls and aggregating + // statistics as well as exporting data. + Start() + // Stop causes the Meter to stop processing calls and terminate data export. + Stop() + + // RetrieveData gets a snapshot of the data collected for the the view registered + // with the given name. It is intended for testing only. + RetrieveData(viewName string) ([]*Row, error) +} + +var _ Meter = (*worker)(nil) + +var defaultWorker *worker + +var defaultReportingDuration = 10 * time.Second + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func Find(name string) (v *View) { + return defaultWorker.Find(name) +} + +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. +func (w *worker) Find(name string) (v *View) { + req := &getViewByNameReq{ + name: name, + c: make(chan *getViewByNameResp), + } + w.c <- req + resp := <-req.c + return resp.v +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func Register(views ...*View) error { + return defaultWorker.Register(views...) +} + +// Register begins collecting data for the given views. +// Once a view is registered, it reports data to the registered exporters. +func (w *worker) Register(views ...*View) error { + req := ®isterViewReq{ + views: views, + err: make(chan error), + } + w.c <- req + return <-req.err +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func Unregister(views ...*View) { + defaultWorker.Unregister(views...) +} + +// Unregister the given views. Data will not longer be exported for these views +// after Unregister returns. +// It is not necessary to unregister from views you expect to collect for the +// duration of your program execution. +func (w *worker) Unregister(views ...*View) { + names := make([]string, len(views)) + for i := range views { + names[i] = views[i].Name + } + req := &unregisterFromViewReq{ + views: names, + done: make(chan struct{}), + } + w.c <- req + <-req.done +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func RetrieveData(viewName string) ([]*Row, error) { + return defaultWorker.RetrieveData(viewName) +} + +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. +func (w *worker) RetrieveData(viewName string) ([]*Row, error) { + req := &retrieveDataReq{ + now: time.Now(), + v: viewName, + c: make(chan *retrieveDataResp), + } + w.c <- req + resp := <-req.c + return resp.rows, resp.err +} + +func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + defaultWorker.Record(tags, ms, attachments) +} + +func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + defaultWorker.recordMeasurement(tags, ms, attachments) +} + +// Record records a set of measurements ms associated with the given tags and attachments. +func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) +} + +// recordMeasurement records a set of measurements ms associated with the given tags and attachments. +// This is the same as Record but without an interface{} type to avoid allocations +func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + req := &recordReq{ + tm: tags, + ms: ms, + attachments: attachments, + t: time.Now(), + } + w.c <- req +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func SetReportingPeriod(d time.Duration) { + defaultWorker.SetReportingPeriod(d) +} + +// Stop stops the default worker. +func Stop() { + defaultWorker.Stop() +} + +// SetReportingPeriod sets the interval between reporting aggregated views in +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. +func (w *worker) SetReportingPeriod(d time.Duration) { + // TODO(acetechnologist): ensure that the duration d is more than a certain + // value. e.g. 1s + req := &setReportingPeriodReq{ + d: d, + c: make(chan bool), + } + w.c <- req + <-req.c // don't return until the timer is set to the new duration. +} + +// NewMeter constructs a Meter instance. You should only need to use this if +// you need to separate out Measurement recordings and View aggregations within +// a single process. +func NewMeter() Meter { + return &worker{ + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + viewStartTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), + + exporters: make(map[Exporter]struct{}), + } +} + +// SetResource associates all data collected by this Meter with the specified +// resource. This resource is reported when using metricexport.ReadAndExport; +// it is not provided when used with ExportView/RegisterExporter, because that +// interface does not provide a means for reporting the Resource. +func (w *worker) SetResource(r *resource.Resource) { + w.r = r +} + +func (w *worker) Start() { + go w.start() +} + +func (w *worker) start() { + prodMgr := metricproducer.GlobalManager() + prodMgr.AddProducer(w) + + for { + select { + case cmd := <-w.c: + cmd.handleCommand(w) + case <-w.timer.C: + w.reportUsage() + case <-w.quit: + w.timer.Stop() + close(w.c) + close(w.done) + return + } + } +} + +func (w *worker) Stop() { + prodMgr := metricproducer.GlobalManager() + prodMgr.DeleteProducer(w) + select { + case <-w.quit: + default: + close(w.quit) + } + <-w.done +} + +func (w *worker) getMeasureRef(name string) *measureRef { + if mr, ok := w.measures[name]; ok { + return mr + } + mr := &measureRef{ + measure: name, + views: make(map[*viewInternal]struct{}), + } + w.measures[name] = mr + return mr +} + +func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + w.mu.Lock() + defer w.mu.Unlock() + vi, err := newViewInternal(v) + if err != nil { + return nil, err + } + if x, ok := w.views[vi.view.Name]; ok { + if !x.view.same(vi.view) { + return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) + } + + // the view is already registered so there is nothing to do and the + // command is considered successful. + return x, nil + } + w.views[vi.view.Name] = vi + w.viewStartTimes[vi] = time.Now() + ref := w.getMeasureRef(vi.view.Measure.Name()) + ref.views[vi] = struct{}{} + return vi, nil +} + +func (w *worker) unregisterView(v *viewInternal) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.views, v.view.Name) + delete(w.viewStartTimes, v) + if measure := w.measures[v.view.Measure.Name()]; measure != nil { + delete(measure.views, v) + } +} + +func (w *worker) reportView(v *viewInternal) { + if !v.isSubscribed() { + return + } + rows := v.collectedRows() + viewData := &Data{ + View: v.view, + Start: w.viewStartTimes[v], + End: time.Now(), + Rows: rows, + } + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + for e := range w.exporters { + e.ExportView(viewData) + } +} + +func (w *worker) reportUsage() { + w.mu.Lock() + defer w.mu.Unlock() + for _, v := range w.views { + w.reportView(v) + } +} + +func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { + if !v.isSubscribed() { + return nil + } + + return viewToMetric(v, w.r, now) +} + +// Read reads all view data and returns them as metrics. +// It is typically invoked by metric reader to export stats in metric format. +func (w *worker) Read() []*metricdata.Metric { + w.mu.Lock() + defer w.mu.Unlock() + now := time.Now() + metrics := make([]*metricdata.Metric, 0, len(w.views)) + for _, v := range w.views { + metric := w.toMetric(v, now) + if metric != nil { + metrics = append(metrics, metric) + } + } + return metrics +} + +func (w *worker) RegisterExporter(e Exporter) { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + + w.exporters[e] = struct{}{} +} + +func (w *worker) UnregisterExporter(e Exporter) { + w.exportersMu.Lock() + defer w.exportersMu.Unlock() + + delete(w.exporters, e) +} diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go new file mode 100644 index 00000000000..9ac4cc05992 --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -0,0 +1,186 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "errors" + "fmt" + "strings" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/internal" + "go.opencensus.io/tag" +) + +type command interface { + handleCommand(w *worker) +} + +// getViewByNameReq is the command to get a view given its name. +type getViewByNameReq struct { + name string + c chan *getViewByNameResp +} + +type getViewByNameResp struct { + v *View +} + +func (cmd *getViewByNameReq) handleCommand(w *worker) { + v := w.views[cmd.name] + if v == nil { + cmd.c <- &getViewByNameResp{nil} + return + } + cmd.c <- &getViewByNameResp{v.view} +} + +// registerViewReq is the command to register a view. +type registerViewReq struct { + views []*View + err chan error +} + +func (cmd *registerViewReq) handleCommand(w *worker) { + for _, v := range cmd.views { + if err := v.canonicalize(); err != nil { + cmd.err <- err + return + } + } + var errstr []string + for _, view := range cmd.views { + vi, err := w.tryRegisterView(view) + if err != nil { + errstr = append(errstr, fmt.Sprintf("%s: %v", view.Name, err)) + continue + } + internal.SubscriptionReporter(view.Measure.Name()) + vi.subscribe() + } + if len(errstr) > 0 { + cmd.err <- errors.New(strings.Join(errstr, "\n")) + } else { + cmd.err <- nil + } +} + +// unregisterFromViewReq is the command to unregister to a view. Has no +// impact on the data collection for client that are pulling data from the +// library. +type unregisterFromViewReq struct { + views []string + done chan struct{} +} + +func (cmd *unregisterFromViewReq) handleCommand(w *worker) { + for _, name := range cmd.views { + vi, ok := w.views[name] + if !ok { + continue + } + + // Report pending data for this view before removing it. + w.reportView(vi) + + vi.unsubscribe() + if !vi.isSubscribed() { + // this was the last subscription and view is not collecting anymore. + // The collected data can be cleared. + vi.clearRows() + } + w.unregisterView(vi) + } + cmd.done <- struct{}{} +} + +// retrieveDataReq is the command to retrieve data for a view. +type retrieveDataReq struct { + now time.Time + v string + c chan *retrieveDataResp +} + +type retrieveDataResp struct { + rows []*Row + err error +} + +func (cmd *retrieveDataReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() + vi, ok := w.views[cmd.v] + if !ok { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q is not registered", cmd.v), + } + return + } + + if !vi.isSubscribed() { + cmd.c <- &retrieveDataResp{ + nil, + fmt.Errorf("cannot retrieve data; view %q has no subscriptions or collection is not forcibly started", cmd.v), + } + return + } + cmd.c <- &retrieveDataResp{ + vi.collectedRows(), + nil, + } +} + +// recordReq is the command to record data related to multiple measures +// at once. +type recordReq struct { + tm *tag.Map + ms []stats.Measurement + attachments map[string]interface{} + t time.Time +} + +func (cmd *recordReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() + for _, m := range cmd.ms { + if (m == stats.Measurement{}) { // not registered + continue + } + ref := w.getMeasureRef(m.Measure().Name()) + for v := range ref.views { + v.addSample(cmd.tm, m.Value(), cmd.attachments, cmd.t) + } + } +} + +// setReportingPeriodReq is the command to modify the duration between +// reporting the collected data to the registered clients. +type setReportingPeriodReq struct { + d time.Duration + c chan bool +} + +func (cmd *setReportingPeriodReq) handleCommand(w *worker) { + w.timer.Stop() + if cmd.d <= 0 { + w.timer = time.NewTicker(defaultReportingDuration) + } else { + w.timer = time.NewTicker(cmd.d) + } + cmd.c <- true +} diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go new file mode 100644 index 00000000000..b27d1b26b13 --- /dev/null +++ b/vendor/go.opencensus.io/tag/context.go @@ -0,0 +1,43 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "context" +) + +// FromContext returns the tag map stored in the context. +func FromContext(ctx context.Context) *Map { + // The returned tag map shouldn't be mutated. + ts := ctx.Value(mapCtxKey) + if ts == nil { + return nil + } + return ts.(*Map) +} + +// NewContext creates a new context with the given tag map. +// To propagate a tag map to downstream methods and downstream RPCs, add a tag map +// to the current context. NewContext will return a copy of the current context, +// and put the tag map into the returned one. +// If there is already a tag map in the current context, it will be replaced with m. +func NewContext(ctx context.Context, m *Map) context.Context { + return context.WithValue(ctx, mapCtxKey, m) +} + +type ctxKey struct{} + +var mapCtxKey = ctxKey{} diff --git a/vendor/go.opencensus.io/tag/doc.go b/vendor/go.opencensus.io/tag/doc.go new file mode 100644 index 00000000000..da16b74e4de --- /dev/null +++ b/vendor/go.opencensus.io/tag/doc.go @@ -0,0 +1,26 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +/* +Package tag contains OpenCensus tags. + +Tags are key-value pairs. Tags provide additional cardinality to +the OpenCensus instrumentation data. + +Tags can be propagated on the wire and in the same +process via context.Context. Encode and Decode should be +used to represent tags into their binary propagation form. +*/ +package tag // import "go.opencensus.io/tag" diff --git a/vendor/go.opencensus.io/tag/key.go b/vendor/go.opencensus.io/tag/key.go new file mode 100644 index 00000000000..71ec913657b --- /dev/null +++ b/vendor/go.opencensus.io/tag/key.go @@ -0,0 +1,44 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +// Key represents a tag key. +type Key struct { + name string +} + +// NewKey creates or retrieves a string key identified by name. +// Calling NewKey more than once with the same name returns the same key. +func NewKey(name string) (Key, error) { + if !checkKeyName(name) { + return Key{}, errInvalidKeyName + } + return Key{name: name}, nil +} + +// MustNewKey returns a key with the given name, and panics if name is an invalid key name. +func MustNewKey(name string) Key { + k, err := NewKey(name) + if err != nil { + panic(err) + } + return k +} + +// Name returns the name of the key. +func (k Key) Name() string { + return k.name +} diff --git a/vendor/go.opencensus.io/tag/map.go b/vendor/go.opencensus.io/tag/map.go new file mode 100644 index 00000000000..0272ef85a4c --- /dev/null +++ b/vendor/go.opencensus.io/tag/map.go @@ -0,0 +1,229 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "bytes" + "context" + "fmt" + "sort" +) + +// Tag is a key value pair that can be propagated on wire. +type Tag struct { + Key Key + Value string +} + +type tagContent struct { + value string + m metadatas +} + +// Map is a map of tags. Use New to create a context containing +// a new Map. +type Map struct { + m map[Key]tagContent +} + +// Value returns the value for the key if a value for the key exists. +func (m *Map) Value(k Key) (string, bool) { + if m == nil { + return "", false + } + v, ok := m.m[k] + return v.value, ok +} + +func (m *Map) String() string { + if m == nil { + return "nil" + } + keys := make([]Key, 0, len(m.m)) + for k := range m.m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i].Name() < keys[j].Name() }) + + var buffer bytes.Buffer + buffer.WriteString("{ ") + for _, k := range keys { + buffer.WriteString(fmt.Sprintf("{%v %v}", k.name, m.m[k])) + } + buffer.WriteString(" }") + return buffer.String() +} + +func (m *Map) insert(k Key, v string, md metadatas) { + if _, ok := m.m[k]; ok { + return + } + m.m[k] = tagContent{value: v, m: md} +} + +func (m *Map) update(k Key, v string, md metadatas) { + if _, ok := m.m[k]; ok { + m.m[k] = tagContent{value: v, m: md} + } +} + +func (m *Map) upsert(k Key, v string, md metadatas) { + m.m[k] = tagContent{value: v, m: md} +} + +func (m *Map) delete(k Key) { + delete(m.m, k) +} + +func newMap() *Map { + return &Map{m: make(map[Key]tagContent)} +} + +// Mutator modifies a tag map. +type Mutator interface { + Mutate(t *Map) (*Map, error) +} + +// Insert returns a mutator that inserts a +// value associated with k. If k already exists in the tag map, +// mutator doesn't update the value. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Insert(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.insert(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +// Update returns a mutator that updates the +// value of the tag associated with k with v. If k doesn't +// exists in the tag map, the mutator doesn't insert the value. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Update(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.update(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +// Upsert returns a mutator that upserts the +// value of the tag associated with k with v. It inserts the +// value if k doesn't exist already. It mutates the value +// if k already exists. +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Upsert(k Key, v string, mds ...Metadata) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + if !checkValue(v) { + return nil, errInvalidValue + } + m.upsert(k, v, createMetadatas(mds...)) + return m, nil + }, + } +} + +func createMetadatas(mds ...Metadata) metadatas { + var metas metadatas + if len(mds) > 0 { + for _, md := range mds { + if md != nil { + md(&metas) + } + } + } else { + WithTTL(TTLUnlimitedPropagation)(&metas) + } + return metas + +} + +// Delete returns a mutator that deletes +// the value associated with k. +func Delete(k Key) Mutator { + return &mutator{ + fn: func(m *Map) (*Map, error) { + m.delete(k) + return m, nil + }, + } +} + +// New returns a new context that contains a tag map +// originated from the incoming context and modified +// with the provided mutators. +func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { + m := newMap() + orig := FromContext(ctx) + if orig != nil { + for k, v := range orig.m { + if !checkKeyName(k.Name()) { + return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) + } + if !checkValue(v.value) { + return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) + } + m.insert(k, v.value, v.m) + } + } + var err error + for _, mod := range mutator { + m, err = mod.Mutate(m) + if err != nil { + return ctx, err + } + } + return NewContext(ctx, m), nil +} + +// Do is similar to pprof.Do: a convenience for installing the tags +// from the context as Go profiler labels. This allows you to +// correlated runtime profiling with stats. +// +// It converts the key/values from the given map to Go profiler labels +// and calls pprof.Do. +// +// Do is going to do nothing if your Go version is below 1.9. +func Do(ctx context.Context, f func(ctx context.Context)) { + do(ctx, f) +} + +type mutator struct { + fn func(t *Map) (*Map, error) +} + +func (m *mutator) Mutate(t *Map) (*Map, error) { + return m.fn(t) +} diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go new file mode 100644 index 00000000000..c242e695c8c --- /dev/null +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -0,0 +1,239 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "encoding/binary" + "fmt" +) + +// KeyType defines the types of keys allowed. Currently only keyTypeString is +// supported. +type keyType byte + +const ( + keyTypeString keyType = iota + keyTypeInt64 + keyTypeTrue + keyTypeFalse + + tagsVersionID = byte(0) +) + +type encoderGRPC struct { + buf []byte + writeIdx, readIdx int +} + +// writeKeyString writes the fieldID '0' followed by the key string and value +// string. +func (eg *encoderGRPC) writeTagString(k, v string) { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k) + eg.writeStringWithVarintLen(v) +} + +func (eg *encoderGRPC) writeTagUint64(k string, i uint64) { + eg.writeByte(byte(keyTypeInt64)) + eg.writeStringWithVarintLen(k) + eg.writeUint64(i) +} + +func (eg *encoderGRPC) writeTagTrue(k string) { + eg.writeByte(byte(keyTypeTrue)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeTagFalse(k string) { + eg.writeByte(byte(keyTypeFalse)) + eg.writeStringWithVarintLen(k) +} + +func (eg *encoderGRPC) writeBytesWithVarintLen(bytes []byte) { + length := len(bytes) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], bytes) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeStringWithVarintLen(s string) { + length := len(s) + + eg.growIfRequired(binary.MaxVarintLen64 + length) + eg.writeIdx += binary.PutUvarint(eg.buf[eg.writeIdx:], uint64(length)) + copy(eg.buf[eg.writeIdx:], s) + eg.writeIdx += length +} + +func (eg *encoderGRPC) writeByte(v byte) { + eg.growIfRequired(1) + eg.buf[eg.writeIdx] = v + eg.writeIdx++ +} + +func (eg *encoderGRPC) writeUint32(i uint32) { + eg.growIfRequired(4) + binary.LittleEndian.PutUint32(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 4 +} + +func (eg *encoderGRPC) writeUint64(i uint64) { + eg.growIfRequired(8) + binary.LittleEndian.PutUint64(eg.buf[eg.writeIdx:], i) + eg.writeIdx += 8 +} + +func (eg *encoderGRPC) readByte() byte { + b := eg.buf[eg.readIdx] + eg.readIdx++ + return b +} + +func (eg *encoderGRPC) readUint32() uint32 { + i := binary.LittleEndian.Uint32(eg.buf[eg.readIdx:]) + eg.readIdx += 4 + return i +} + +func (eg *encoderGRPC) readUint64() uint64 { + i := binary.LittleEndian.Uint64(eg.buf[eg.readIdx:]) + eg.readIdx += 8 + return i +} + +func (eg *encoderGRPC) readBytesWithVarintLen() ([]byte, error) { + if eg.readEnded() { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + length, valueStart := binary.Uvarint(eg.buf[eg.readIdx:]) + if valueStart <= 0 { + return nil, fmt.Errorf("unexpected end while readBytesWithVarintLen '%x' starting at idx '%v'", eg.buf, eg.readIdx) + } + + valueStart += eg.readIdx + valueEnd := valueStart + int(length) + if valueEnd > len(eg.buf) { + return nil, fmt.Errorf("malformed encoding: length:%v, upper:%v, maxLength:%v", length, valueEnd, len(eg.buf)) + } + + eg.readIdx = valueEnd + return eg.buf[valueStart:valueEnd], nil +} + +func (eg *encoderGRPC) readStringWithVarintLen() (string, error) { + bytes, err := eg.readBytesWithVarintLen() + if err != nil { + return "", err + } + return string(bytes), nil +} + +func (eg *encoderGRPC) growIfRequired(expected int) { + if len(eg.buf)-eg.writeIdx < expected { + tmp := make([]byte, 2*(len(eg.buf)+1)+expected) + copy(tmp, eg.buf) + eg.buf = tmp + } +} + +func (eg *encoderGRPC) readEnded() bool { + return eg.readIdx >= len(eg.buf) +} + +func (eg *encoderGRPC) bytes() []byte { + return eg.buf[:eg.writeIdx] +} + +// Encode encodes the tag map into a []byte. It is useful to propagate +// the tag maps on wire in binary format. +func Encode(m *Map) []byte { + if m == nil { + return nil + } + eg := &encoderGRPC{ + buf: make([]byte, len(m.m)), + } + eg.writeByte(tagsVersionID) + for k, v := range m.m { + if v.m.ttl.ttl == valueTTLUnlimitedPropagation { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v.value)) + } + } + return eg.bytes() +} + +// Decode decodes the given []byte into a tag map. +func Decode(bytes []byte) (*Map, error) { + ts := newMap() + err := DecodeEach(bytes, ts.upsert) + if err != nil { + // no partial failures + return nil, err + } + return ts, nil +} + +// DecodeEach decodes the given serialized tag map, calling handler for each +// tag key and value decoded. +func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { + eg := &encoderGRPC{ + buf: bytes, + } + if len(eg.buf) == 0 { + return nil + } + + version := eg.readByte() + if version > tagsVersionID { + return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) + } + + for !eg.readEnded() { + typ := keyType(eg.readByte()) + + if typ != keyTypeString { + return fmt.Errorf("cannot decode: invalid key type: %q", typ) + } + + k, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + v, err := eg.readBytesWithVarintLen() + if err != nil { + return err + } + + key, err := NewKey(string(k)) + if err != nil { + return err + } + val := string(v) + if !checkValue(val) { + return errInvalidValue + } + fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/go.opencensus.io/tag/metadata.go b/vendor/go.opencensus.io/tag/metadata.go new file mode 100644 index 00000000000..6571a583ea6 --- /dev/null +++ b/vendor/go.opencensus.io/tag/metadata.go @@ -0,0 +1,52 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +const ( + // valueTTLNoPropagation prevents tag from propagating. + valueTTLNoPropagation = 0 + + // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. + valueTTLUnlimitedPropagation = -1 +) + +// TTL is metadata that specifies number of hops a tag can propagate. +// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata +type TTL struct { + ttl int +} + +var ( + // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. + TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} + + // TTLNoPropagation is TTL metadata that prevents tag from propagating. + TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} +) + +type metadatas struct { + ttl TTL +} + +// Metadata applies metadatas specified by the function. +type Metadata func(*metadatas) + +// WithTTL applies metadata with provided ttl. +func WithTTL(ttl TTL) Metadata { + return func(m *metadatas) { + m.ttl = ttl + } +} diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go new file mode 100644 index 00000000000..8fb17226fe3 --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -0,0 +1,32 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.9 +// +build go1.9 + +package tag + +import ( + "context" + "runtime/pprof" +) + +func do(ctx context.Context, f func(ctx context.Context)) { + m := FromContext(ctx) + keyvals := make([]string, 0, 2*len(m.m)) + for k, v := range m.m { + keyvals = append(keyvals, k.Name(), v.value) + } + pprof.Do(ctx, pprof.Labels(keyvals...), f) +} diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go new file mode 100644 index 00000000000..e28cf13cde9 --- /dev/null +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -0,0 +1,24 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.9 +// +build !go1.9 + +package tag + +import "context" + +func do(ctx context.Context, f func(ctx context.Context)) { + f(ctx) +} diff --git a/vendor/go.opencensus.io/tag/validate.go b/vendor/go.opencensus.io/tag/validate.go new file mode 100644 index 00000000000..0939fc67483 --- /dev/null +++ b/vendor/go.opencensus.io/tag/validate.go @@ -0,0 +1,56 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tag + +import "errors" + +const ( + maxKeyLength = 255 + + // valid are restricted to US-ASCII subset (range 0x20 (' ') to 0x7e ('~')). + validKeyValueMin = 32 + validKeyValueMax = 126 +) + +var ( + errInvalidKeyName = errors.New("invalid key name: only ASCII characters accepted; max length must be 255 characters") + errInvalidValue = errors.New("invalid value: only ASCII characters accepted; max length must be 255 characters") +) + +func checkKeyName(name string) bool { + if len(name) == 0 { + return false + } + if len(name) > maxKeyLength { + return false + } + return isASCII(name) +} + +func isASCII(s string) bool { + for _, c := range s { + if (c < validKeyValueMin) || (c > validKeyValueMax) { + return false + } + } + return true +} + +func checkValue(v string) bool { + if len(v) > maxKeyLength { + return false + } + return isASCII(v) +} diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go new file mode 100644 index 00000000000..c8e26ed6355 --- /dev/null +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -0,0 +1,129 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "fmt" + "time" +) + +type ( + // TraceID is a 16-byte identifier for a set of spans. + TraceID [16]byte + + // SpanID is an 8-byte identifier for a single span. + SpanID [8]byte +) + +func (t TraceID) String() string { + return fmt.Sprintf("%02x", t[:]) +} + +func (s SpanID) String() string { + return fmt.Sprintf("%02x", s[:]) +} + +// Annotation represents a text annotation with a set of attributes and a timestamp. +type Annotation struct { + Time time.Time + Message string + Attributes map[string]interface{} +} + +// Attribute represents a key-value pair on a span, link or annotation. +// Construct with one of: BoolAttribute, Int64Attribute, or StringAttribute. +type Attribute struct { + key string + value interface{} +} + +// Key returns the attribute's key +func (a *Attribute) Key() string { + return a.key +} + +// Value returns the attribute's value +func (a *Attribute) Value() interface{} { + return a.value +} + +// BoolAttribute returns a bool-valued attribute. +func BoolAttribute(key string, value bool) Attribute { + return Attribute{key: key, value: value} +} + +// Int64Attribute returns an int64-valued attribute. +func Int64Attribute(key string, value int64) Attribute { + return Attribute{key: key, value: value} +} + +// Float64Attribute returns a float64-valued attribute. +func Float64Attribute(key string, value float64) Attribute { + return Attribute{key: key, value: value} +} + +// StringAttribute returns a string-valued attribute. +func StringAttribute(key string, value string) Attribute { + return Attribute{key: key, value: value} +} + +// LinkType specifies the relationship between the span that had the link +// added, and the linked span. +type LinkType int32 + +// LinkType values. +const ( + LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. + LinkTypeChild // The linked span is a child of the current span. + LinkTypeParent // The linked span is the parent of the current span. +) + +// Link represents a reference from one span to another span. +type Link struct { + TraceID TraceID + SpanID SpanID + Type LinkType + // Attributes is a set of attributes on the link. + Attributes map[string]interface{} +} + +// MessageEventType specifies the type of message event. +type MessageEventType int32 + +// MessageEventType values. +const ( + MessageEventTypeUnspecified MessageEventType = iota // Unknown event type. + MessageEventTypeSent // Indicates a sent RPC message. + MessageEventTypeRecv // Indicates a received RPC message. +) + +// MessageEvent represents an event describing a message sent or received on the network. +type MessageEvent struct { + Time time.Time + EventType MessageEventType + MessageID int64 + UncompressedByteSize int64 + CompressedByteSize int64 +} + +// Status is the status of a Span. +type Status struct { + // Code is a status code. Zero indicates success. + // + // If Code will be propagated to Google APIs, it ideally should be a value from + // https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto . + Code int32 + Message string +} diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go new file mode 100644 index 00000000000..775f8274faa --- /dev/null +++ b/vendor/go.opencensus.io/trace/config.go @@ -0,0 +1,86 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + + "go.opencensus.io/trace/internal" +) + +// Config represents the global tracing configuration. +type Config struct { + // DefaultSampler is the default sampler used when creating new spans. + DefaultSampler Sampler + + // IDGenerator is for internal use only. + IDGenerator internal.IDGenerator + + // MaxAnnotationEventsPerSpan is max number of annotation events per span + MaxAnnotationEventsPerSpan int + + // MaxMessageEventsPerSpan is max number of message events per span + MaxMessageEventsPerSpan int + + // MaxAnnotationEventsPerSpan is max number of attributes per span + MaxAttributesPerSpan int + + // MaxLinksPerSpan is max number of links per span + MaxLinksPerSpan int +} + +var configWriteMu sync.Mutex + +const ( + // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span + DefaultMaxAnnotationEventsPerSpan = 32 + + // DefaultMaxMessageEventsPerSpan is default max number of message events per span + DefaultMaxMessageEventsPerSpan = 128 + + // DefaultMaxAttributesPerSpan is default max number of attributes per span + DefaultMaxAttributesPerSpan = 32 + + // DefaultMaxLinksPerSpan is default max number of links per span + DefaultMaxLinksPerSpan = 32 +) + +// ApplyConfig applies changes to the global tracing configuration. +// +// Fields not provided in the given config are going to be preserved. +func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() + c := *config.Load().(*Config) + if cfg.DefaultSampler != nil { + c.DefaultSampler = cfg.DefaultSampler + } + if cfg.IDGenerator != nil { + c.IDGenerator = cfg.IDGenerator + } + if cfg.MaxAnnotationEventsPerSpan > 0 { + c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan + } + if cfg.MaxMessageEventsPerSpan > 0 { + c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan + } + if cfg.MaxAttributesPerSpan > 0 { + c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan + } + if cfg.MaxLinksPerSpan > 0 { + c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + } + config.Store(&c) +} diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go new file mode 100644 index 00000000000..7a1616a55c5 --- /dev/null +++ b/vendor/go.opencensus.io/trace/doc.go @@ -0,0 +1,52 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenCensus distributed tracing. + +The following assumes a basic familiarity with OpenCensus concepts. +See http://opencensus.io + +# Exporting Traces + +To export collected tracing data, register at least one exporter. You can use +one of the provided exporters or write your own. + + trace.RegisterExporter(exporter) + +By default, traces will be sampled relatively rarely. To change the sampling +frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler +to sample a subset of traces, or use AlwaysSample to collect a trace on every run: + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. + +# Adding Spans to a Trace + +A trace consists of a tree of spans. In Go, the current span is carried in a +context.Context. + +It is common to want to capture all the activity of a function call in a span. For +this to work, the function must take a context.Context as a parameter. Add these two +lines to the top of the function: + + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() + +StartSpan will create a new top-level span if the context +doesn't contain another span, otherwise it will create a child span. +*/ +package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go new file mode 100644 index 00000000000..ffc264f23d2 --- /dev/null +++ b/vendor/go.opencensus.io/trace/evictedqueue.go @@ -0,0 +1,38 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) *evictedQueue { + eq := &evictedQueue{ + capacity: capacity, + queue: make([]interface{}, 0), + } + + return eq +} + +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + eq.queue = eq.queue[1:] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go new file mode 100644 index 00000000000..e0d9a4b99e9 --- /dev/null +++ b/vendor/go.opencensus.io/trace/export.go @@ -0,0 +1,97 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "sync/atomic" + "time" +) + +// Exporter is a type for functions that receive sampled trace spans. +// +// The ExportSpan method should be safe for concurrent use and should return +// quickly; if an Exporter takes a significant amount of time to process a +// SpanData, that work should be done on another goroutine. +// +// The SpanData should not be modified, but a pointer to it can be kept. +type Exporter interface { + ExportSpan(s *SpanData) +} + +type exportersMap map[Exporter]struct{} + +var ( + exporterMu sync.Mutex + exporters atomic.Value +) + +// RegisterExporter adds to the list of Exporters that will receive sampled +// trace spans. +// +// Binaries can register exporters, libraries shouldn't register exporters. +func RegisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() +} + +// UnregisterExporter removes from the list of Exporters the Exporter that was +// registered with the given name. +func UnregisterExporter(e Exporter) { + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() +} + +// SpanData contains all the information collected by a Span. +type SpanData struct { + SpanContext + ParentSpanID SpanID + SpanKind int + Name string + StartTime time.Time + // The wall clock time of EndTime will be adjusted to always be offset + // from StartTime by the duration of the span. + EndTime time.Time + // The values of Attributes each have type string, bool, or int64. + Attributes map[string]interface{} + Annotations []Annotation + MessageEvents []MessageEvent + Status + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int + DroppedMessageEventCount int + DroppedLinkCount int + + // ChildSpanCount holds the number of child span created for this span. + ChildSpanCount int +} diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go new file mode 100644 index 00000000000..7e808d8f30e --- /dev/null +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -0,0 +1,22 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides trace internals. +package internal + +// IDGenerator allows custom generators for TraceId and SpanId. +type IDGenerator interface { + NewTraceID() [16]byte + NewSpanID() [8]byte +} diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go new file mode 100644 index 00000000000..80095a5f6c0 --- /dev/null +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -0,0 +1,61 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "github.com/golang/groupcache/lru" +) + +// A simple lru.Cache wrapper that tracks the keys of the current contents and +// the cumulative number of evicted items. +type lruMap struct { + cacheKeys map[lru.Key]bool + cache *lru.Cache + droppedCount int +} + +func newLruMap(size int) *lruMap { + lm := &lruMap{ + cacheKeys: make(map[lru.Key]bool), + cache: lru.New(size), + droppedCount: 0, + } + lm.cache.OnEvicted = func(key lru.Key, value interface{}) { + delete(lm.cacheKeys, key) + lm.droppedCount++ + } + return lm +} + +func (lm lruMap) len() int { + return lm.cache.Len() +} + +func (lm lruMap) keys() []interface{} { + keys := make([]interface{}, 0, len(lm.cacheKeys)) + for k := range lm.cacheKeys { + keys = append(keys, k) + } + return keys +} + +func (lm *lruMap) add(key, value interface{}) { + lm.cacheKeys[lru.Key(key)] = true + lm.cache.Add(lru.Key(key), value) +} + +func (lm *lruMap) get(key interface{}) (interface{}, bool) { + return lm.cache.Get(key) +} diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go new file mode 100644 index 00000000000..1eb190a96a3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/propagation/propagation.go @@ -0,0 +1,108 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package propagation implements the binary trace context format. +package propagation // import "go.opencensus.io/trace/propagation" + +// TODO: link to external spec document. + +// BinaryFormat format: +// +// Binary value: +// version_id: 1 byte representing the version id. +// +// For version_id = 0: +// +// version_format: +// field_format: +// +// Fields: +// +// TraceId: (field_id = 0, len = 16, default = "0000000000000000") - 16-byte array representing the trace_id. +// SpanId: (field_id = 1, len = 8, default = "00000000") - 8-byte array representing the span_id. +// TraceOptions: (field_id = 2, len = 1, default = "0") - 1-byte array representing the trace_options. +// +// Fields MUST be encoded using the field id order (smaller to higher). +// +// Valid value example: +// +// {0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, +// 98, 99, 100, 101, 102, 103, 104, 2, 1} +// +// version_id = 0; +// trace_id = {64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79} +// span_id = {97, 98, 99, 100, 101, 102, 103, 104}; +// trace_options = {1}; + +import ( + "net/http" + + "go.opencensus.io/trace" +) + +// Binary returns the binary format representation of a SpanContext. +// +// If sc is the zero value, Binary returns nil. +func Binary(sc trace.SpanContext) []byte { + if sc == (trace.SpanContext{}) { + return nil + } + var b [29]byte + copy(b[2:18], sc.TraceID[:]) + b[18] = 1 + copy(b[19:27], sc.SpanID[:]) + b[27] = 2 + b[28] = uint8(sc.TraceOptions) + return b[:] +} + +// FromBinary returns the SpanContext represented by b. +// +// If b has an unsupported version ID or contains no TraceID, FromBinary +// returns with ok==false. +func FromBinary(b []byte) (sc trace.SpanContext, ok bool) { + if len(b) == 0 || b[0] != 0 { + return trace.SpanContext{}, false + } + b = b[1:] + if len(b) >= 17 && b[0] == 0 { + copy(sc.TraceID[:], b[1:17]) + b = b[17:] + } else { + return trace.SpanContext{}, false + } + if len(b) >= 9 && b[0] == 1 { + copy(sc.SpanID[:], b[1:9]) + b = b[9:] + } + if len(b) >= 2 && b[0] == 2 { + sc.TraceOptions = trace.TraceOptions(b[1]) + } + return sc, true +} + +// HTTPFormat implementations propagate span contexts +// in HTTP requests. +// +// SpanContextFromRequest extracts a span context from incoming +// requests. +// +// SpanContextToRequest modifies the given request to include the given +// span context. +type HTTPFormat interface { + SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) + SpanContextToRequest(sc trace.SpanContext, req *http.Request) +} + +// TODO(jbd): Find a more representative but short name for HTTPFormat. diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go new file mode 100644 index 00000000000..71c10f9e3b4 --- /dev/null +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -0,0 +1,75 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/binary" +) + +const defaultSamplingProbability = 1e-4 + +// Sampler decides whether a trace should be sampled and exported. +type Sampler func(SamplingParameters) SamplingDecision + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext SpanContext + TraceID TraceID + SpanID SpanID + Name string + HasRemoteParent bool +} + +// SamplingDecision is the value returned by a Sampler. +type SamplingDecision struct { + Sample bool +} + +// ProbabilitySampler returns a Sampler that samples a given fraction of traces. +// +// It also samples spans whose parents are sampled. +func ProbabilitySampler(fraction float64) Sampler { + if !(fraction >= 0) { + fraction = 0 + } else if fraction >= 1 { + return AlwaysSample() + } + + traceIDUpperBound := uint64(fraction * (1 << 63)) + return Sampler(func(p SamplingParameters) SamplingDecision { + if p.ParentContext.IsSampled() { + return SamplingDecision{Sample: true} + } + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + return SamplingDecision{Sample: x < traceIDUpperBound} + }) +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: true} + } +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return func(p SamplingParameters) SamplingDecision { + return SamplingDecision{Sample: false} + } +} diff --git a/vendor/go.opencensus.io/trace/spanbucket.go b/vendor/go.opencensus.io/trace/spanbucket.go new file mode 100644 index 00000000000..fbabad34c00 --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanbucket.go @@ -0,0 +1,130 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "time" +) + +// samplePeriod is the minimum time between accepting spans in a single bucket. +const samplePeriod = time.Second + +// defaultLatencies contains the default latency bucket bounds. +// TODO: consider defaults, make configurable +var defaultLatencies = [...]time.Duration{ + 10 * time.Microsecond, + 100 * time.Microsecond, + time.Millisecond, + 10 * time.Millisecond, + 100 * time.Millisecond, + time.Second, + 10 * time.Second, + time.Minute, +} + +// bucket is a container for a set of spans for a particular error code or latency range. +type bucket struct { + nextTime time.Time // next time we can accept a span + buffer []*SpanData // circular buffer of spans + nextIndex int // location next SpanData should be placed in buffer + overflow bool // whether the circular buffer has wrapped around +} + +func makeBucket(bufferSize int) bucket { + return bucket{ + buffer: make([]*SpanData, bufferSize), + } +} + +// add adds a span to the bucket, if nextTime has been reached. +func (b *bucket) add(s *SpanData) { + if s.EndTime.Before(b.nextTime) { + return + } + if len(b.buffer) == 0 { + return + } + b.nextTime = s.EndTime.Add(samplePeriod) + b.buffer[b.nextIndex] = s + b.nextIndex++ + if b.nextIndex == len(b.buffer) { + b.nextIndex = 0 + b.overflow = true + } +} + +// size returns the number of spans in the bucket. +func (b *bucket) size() int { + if b.overflow { + return len(b.buffer) + } + return b.nextIndex +} + +// span returns the ith span in the bucket. +func (b *bucket) span(i int) *SpanData { + if !b.overflow { + return b.buffer[i] + } + if i < len(b.buffer)-b.nextIndex { + return b.buffer[b.nextIndex+i] + } + return b.buffer[b.nextIndex+i-len(b.buffer)] +} + +// resize changes the size of the bucket to n, keeping up to n existing spans. +func (b *bucket) resize(n int) { + cur := b.size() + newBuffer := make([]*SpanData, n) + if cur < n { + for i := 0; i < cur; i++ { + newBuffer[i] = b.span(i) + } + b.buffer = newBuffer + b.nextIndex = cur + b.overflow = false + return + } + for i := 0; i < n; i++ { + newBuffer[i] = b.span(i + cur - n) + } + b.buffer = newBuffer + b.nextIndex = 0 + b.overflow = true +} + +// latencyBucket returns the appropriate bucket number for a given latency. +func latencyBucket(latency time.Duration) int { + i := 0 + for i < len(defaultLatencies) && latency >= defaultLatencies[i] { + i++ + } + return i +} + +// latencyBucketBounds returns the lower and upper bounds for a latency bucket +// number. +// +// The lower bound is inclusive, the upper bound is exclusive (except for the +// last bucket.) +func latencyBucketBounds(index int) (lower time.Duration, upper time.Duration) { + if index == 0 { + return 0, defaultLatencies[index] + } + if index == len(defaultLatencies) { + return defaultLatencies[index-1], 1<<63 - 1 + } + return defaultLatencies[index-1], defaultLatencies[index] +} diff --git a/vendor/go.opencensus.io/trace/spanstore.go b/vendor/go.opencensus.io/trace/spanstore.go new file mode 100644 index 00000000000..e601f76f2c8 --- /dev/null +++ b/vendor/go.opencensus.io/trace/spanstore.go @@ -0,0 +1,308 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "sync" + "time" + + "go.opencensus.io/internal" +) + +const ( + maxBucketSize = 100000 + defaultBucketSize = 10 +) + +var ( + ssmu sync.RWMutex // protects spanStores + spanStores = make(map[string]*spanStore) +) + +// This exists purely to avoid exposing internal methods used by z-Pages externally. +type internalOnly struct{} + +func init() { + //TODO(#412): remove + internal.Trace = &internalOnly{} +} + +// ReportActiveSpans returns the active spans for the given name. +func (i internalOnly) ReportActiveSpans(name string) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for activeSpan := range s.active { + if s, ok := activeSpan.(*span); ok { + out = append(out, s.makeSpanData()) + } + } + return out +} + +// ReportSpansByError returns a sample of error spans. +// +// If code is nonzero, only spans with that status code are returned. +func (i internalOnly) ReportSpansByError(name string, code int32) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + if code != 0 { + if b, ok := s.errors[code]; ok { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } else { + for _, b := range s.errors { + for _, sd := range b.buffer { + if sd == nil { + break + } + out = append(out, sd) + } + } + } + return out +} + +// ConfigureBucketSizes sets the number of spans to keep per latency and error +// bucket for different span names. +func (i internalOnly) ConfigureBucketSizes(bcs []internal.BucketConfiguration) { + for _, bc := range bcs { + latencyBucketSize := bc.MaxRequestsSucceeded + if latencyBucketSize < 0 { + latencyBucketSize = 0 + } + if latencyBucketSize > maxBucketSize { + latencyBucketSize = maxBucketSize + } + errorBucketSize := bc.MaxRequestsErrors + if errorBucketSize < 0 { + errorBucketSize = 0 + } + if errorBucketSize > maxBucketSize { + errorBucketSize = maxBucketSize + } + spanStoreSetSize(bc.Name, latencyBucketSize, errorBucketSize) + } +} + +// ReportSpansPerMethod returns a summary of what spans are being stored for each span name. +func (i internalOnly) ReportSpansPerMethod() map[string]internal.PerMethodSummary { + out := make(map[string]internal.PerMethodSummary) + ssmu.RLock() + defer ssmu.RUnlock() + for name, s := range spanStores { + s.mu.Lock() + p := internal.PerMethodSummary{ + Active: len(s.active), + } + for code, b := range s.errors { + p.ErrorBuckets = append(p.ErrorBuckets, internal.ErrorBucketSummary{ + ErrorCode: code, + Size: b.size(), + }) + } + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + p.LatencyBuckets = append(p.LatencyBuckets, internal.LatencyBucketSummary{ + MinLatency: min, + MaxLatency: max, + Size: b.size(), + }) + } + s.mu.Unlock() + out[name] = p + } + return out +} + +// ReportSpansByLatency returns a sample of successful spans. +// +// minLatency is the minimum latency of spans to be returned. +// maxLatency, if nonzero, is the maximum latency of spans to be returned. +func (i internalOnly) ReportSpansByLatency(name string, minLatency, maxLatency time.Duration) []*SpanData { + s := spanStoreForName(name) + if s == nil { + return nil + } + var out []*SpanData + s.mu.Lock() + defer s.mu.Unlock() + for i, b := range s.latency { + min, max := latencyBucketBounds(i) + if i+1 != len(s.latency) && max <= minLatency { + continue + } + if maxLatency != 0 && maxLatency < min { + continue + } + for _, sd := range b.buffer { + if sd == nil { + break + } + if minLatency != 0 || maxLatency != 0 { + d := sd.EndTime.Sub(sd.StartTime) + if d < minLatency { + continue + } + if maxLatency != 0 && d > maxLatency { + continue + } + } + out = append(out, sd) + } + } + return out +} + +// spanStore keeps track of spans stored for a particular span name. +// +// It contains all active spans; a sample of spans for failed requests, +// categorized by error code; and a sample of spans for successful requests, +// bucketed by latency. +type spanStore struct { + mu sync.Mutex // protects everything below. + active map[SpanInterface]struct{} + errors map[int32]*bucket + latency []bucket + maxSpansPerErrorBucket int +} + +// newSpanStore creates a span store. +func newSpanStore(name string, latencyBucketSize int, errorBucketSize int) *spanStore { + s := &spanStore{ + active: make(map[SpanInterface]struct{}), + latency: make([]bucket, len(defaultLatencies)+1), + maxSpansPerErrorBucket: errorBucketSize, + } + for i := range s.latency { + s.latency[i] = makeBucket(latencyBucketSize) + } + return s +} + +// spanStoreForName returns the spanStore for the given name. +// +// It returns nil if it doesn't exist. +func spanStoreForName(name string) *spanStore { + var s *spanStore + ssmu.RLock() + s, _ = spanStores[name] + ssmu.RUnlock() + return s +} + +// spanStoreForNameCreateIfNew returns the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreForNameCreateIfNew(name string) *spanStore { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + return s + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + return s + } + s = newSpanStore(name, defaultBucketSize, defaultBucketSize) + spanStores[name] = s + return s +} + +// spanStoreSetSize resizes the spanStore for the given name. +// +// It creates it if it didn't exist. +func spanStoreSetSize(name string, latencyBucketSize int, errorBucketSize int) { + ssmu.RLock() + s, ok := spanStores[name] + ssmu.RUnlock() + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + ssmu.Lock() + defer ssmu.Unlock() + s, ok = spanStores[name] + if ok { + s.resize(latencyBucketSize, errorBucketSize) + return + } + s = newSpanStore(name, latencyBucketSize, errorBucketSize) + spanStores[name] = s +} + +func (s *spanStore) resize(latencyBucketSize int, errorBucketSize int) { + s.mu.Lock() + for i := range s.latency { + s.latency[i].resize(latencyBucketSize) + } + for _, b := range s.errors { + b.resize(errorBucketSize) + } + s.maxSpansPerErrorBucket = errorBucketSize + s.mu.Unlock() +} + +// add adds a span to the active bucket of the spanStore. +func (s *spanStore) add(span SpanInterface) { + s.mu.Lock() + s.active[span] = struct{}{} + s.mu.Unlock() +} + +// finished removes a span from the active set, and adds a corresponding +// SpanData to a latency or error bucket. +func (s *spanStore) finished(span SpanInterface, sd *SpanData) { + latency := sd.EndTime.Sub(sd.StartTime) + if latency < 0 { + latency = 0 + } + code := sd.Status.Code + + s.mu.Lock() + delete(s.active, span) + if code == 0 { + s.latency[latencyBucket(latency)].add(sd) + } else { + if s.errors == nil { + s.errors = make(map[int32]*bucket) + } + if b := s.errors[code]; b != nil { + b.add(sd) + } else { + b := makeBucket(s.maxSpansPerErrorBucket) + s.errors[code] = &b + b.add(sd) + } + } + s.mu.Unlock() +} diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go new file mode 100644 index 00000000000..ec60effd108 --- /dev/null +++ b/vendor/go.opencensus.io/trace/status_codes.go @@ -0,0 +1,37 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// Status codes for use with Span.SetStatus. These correspond to the status +// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +const ( + StatusCodeOK = 0 + StatusCodeCancelled = 1 + StatusCodeUnknown = 2 + StatusCodeInvalidArgument = 3 + StatusCodeDeadlineExceeded = 4 + StatusCodeNotFound = 5 + StatusCodeAlreadyExists = 6 + StatusCodePermissionDenied = 7 + StatusCodeResourceExhausted = 8 + StatusCodeFailedPrecondition = 9 + StatusCodeAborted = 10 + StatusCodeOutOfRange = 11 + StatusCodeUnimplemented = 12 + StatusCodeInternal = 13 + StatusCodeUnavailable = 14 + StatusCodeDataLoss = 15 + StatusCodeUnauthenticated = 16 +) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go new file mode 100644 index 00000000000..861df9d3913 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace.go @@ -0,0 +1,595 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" +) + +type tracer struct{} + +var _ Tracer = &tracer{} + +// Span represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type span struct { + // data contains information recorded about the span. + // + // It will be non-nil if we are exporting the span or recording events for it. + // Otherwise, data is nil, and the Span is simply a carrier for the + // SpanContext, so that the trace ID is propagated. + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext + + // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry + // is removed to create room for a new entry. + lruAttributes *lruMap + + // annotations are stored in FIFO queue capped by configured limit. + annotations *evictedQueue + + // messageEvents are stored in FIFO queue capped by configured limit. + messageEvents *evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links *evictedQueue + + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. + *spanStore + endOnce sync.Once + + executionTracerTaskEnd func() // ends the execution tracer span +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.data != nil +} + +// TraceOptions contains options associated with a trace span. +type TraceOptions uint32 + +// IsSampled returns true if the span will be exported. +func (sc SpanContext) IsSampled() bool { + return sc.TraceOptions.IsSampled() +} + +// setIsSampled sets the TraceOptions bit that determines whether the span will be exported. +func (sc *SpanContext) setIsSampled(sampled bool) { + if sampled { + sc.TraceOptions |= 1 + } else { + sc.TraceOptions &= ^TraceOptions(1) + } +} + +// IsSampled returns true if the span will be exported. +func (t TraceOptions) IsSampled() bool { + return t&1 == 1 +} + +// SpanContext contains the state that must propagate across process boundaries. +// +// SpanContext is not an implementation of context.Context. +// TODO: add reference to external Census docs for SpanContext. +type SpanContext struct { + TraceID TraceID + SpanID SpanID + TraceOptions TraceOptions + Tracestate *tracestate.Tracestate +} + +type contextKey struct{} + +// FromContext returns the Span stored in a context, or nil if there isn't one. +func (t *tracer) FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +// NewContext returns a new context with the given Span attached. +func (t *tracer) NewContext(parent context.Context, s *Span) context.Context { + return context.WithValue(parent, contextKey{}, s) +} + +// All available span kinds. Span kind must be either one of these values. +const ( + SpanKindUnspecified = iota + SpanKindServer + SpanKindClient +) + +// StartOptions contains options concerning how a span is started. +type StartOptions struct { + // Sampler to consult for this Span. If provided, it is always consulted. + // + // If not provided, then the behavior differs based on whether + // the parent of this Span is remote, local, or there is no parent. + // In the case of a remote parent or no parent, the + // default sampler (see Config) will be consulted. Otherwise, + // when there is a non-remote parent, no new sampling decision will be made: + // we will preserve the sampling of the parent. + Sampler Sampler + + // SpanKind represents the kind of a span. If none is set, + // SpanKindUnspecified is used. + SpanKind int +} + +// StartOption apply changes to StartOptions. +type StartOption func(*StartOptions) + +// WithSpanKind makes new spans to be created with the given kind. +func WithSpanKind(spanKind int) StartOption { + return func(o *StartOptions) { + o.SpanKind = spanKind + } +} + +// WithSampler makes new spans to be be created with a custom sampler. +// Otherwise, the global sampler is used. +func WithSampler(sampler Sampler) StartOption { + return func(o *StartOptions) { + o.Sampler = sampler + } +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func (t *tracer) StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + var parent SpanContext + if p := t.FromContext(ctx); p != nil { + if ps, ok := p.internal.(*span); ok { + ps.addChild() + } + parent = p.SpanContext() + } + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) + + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func (t *tracer) StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + extSpan := NewSpan(span) + return t.NewContext(ctx, extSpan), extSpan +} + +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *span { + s := &span{} + s.spanContext = parent + + cfg := config.Load().(*Config) + if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { + // lazy initialization + gen.init() + } + + if !hasParent { + s.spanContext.TraceID = cfg.IDGenerator.NewTraceID() + } + s.spanContext.SpanID = cfg.IDGenerator.NewSpanID() + sampler := cfg.DefaultSampler + + if !hasParent || remoteParent || o.Sampler != nil { + // If this span is the child of a local span and no Sampler is set in the + // options, keep the parent's TraceOptions. + // + // Otherwise, consult the Sampler in the options if it is non-nil, otherwise + // the default sampler. + if o.Sampler != nil { + sampler = o.Sampler + } + s.spanContext.setIsSampled(sampler(SamplingParameters{ + ParentContext: parent, + TraceID: s.spanContext.TraceID, + SpanID: s.spanContext.SpanID, + Name: name, + HasRemoteParent: remoteParent}).Sample) + } + + if !internal.LocalSpanStoreEnabled && !s.spanContext.IsSampled() { + return s + } + + s.data = &SpanData{ + SpanContext: s.spanContext, + StartTime: time.Now(), + SpanKind: o.SpanKind, + Name: name, + HasRemoteParent: remoteParent, + } + s.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + s.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + s.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + s.links = newEvictedQueue(cfg.MaxLinksPerSpan) + + if hasParent { + s.data.ParentSpanID = parent.SpanID + } + if internal.LocalSpanStoreEnabled { + var ss *spanStore + ss = spanStoreForNameCreateIfNew(name) + if ss != nil { + s.spanStore = ss + ss.add(s) + } + } + + return s +} + +// End ends the span. +func (s *span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + if !s.IsRecordingEvents() { + return + } + s.endOnce.Do(func() { + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } + } + } + }) +} + +// makeSpanData produces a SpanData representing the current state of the Span. +// It requires that s.data is non-nil. +func (s *span) makeSpanData() *SpanData { + var sd SpanData + s.mu.Lock() + sd = *s.data + if s.lruAttributes.len() > 0 { + sd.Attributes = s.lruAttributesToAttributeMap() + sd.DroppedAttributeCount = s.lruAttributes.droppedCount + } + if len(s.annotations.queue) > 0 { + sd.Annotations = s.interfaceArrayToAnnotationArray() + sd.DroppedAnnotationCount = s.annotations.droppedCount + } + if len(s.messageEvents.queue) > 0 { + sd.MessageEvents = s.interfaceArrayToMessageEventArray() + sd.DroppedMessageEventCount = s.messageEvents.droppedCount + } + if len(s.links.queue) > 0 { + sd.Links = s.interfaceArrayToLinksArray() + sd.DroppedLinkCount = s.links.droppedCount + } + s.mu.Unlock() + return &sd +} + +// SpanContext returns the SpanContext of the span. +func (s *span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.spanContext +} + +// SetName sets the name of the span, if it is recording events. +func (s *span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Name = name + s.mu.Unlock() +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Status = status + s.mu.Unlock() +} + +func (s *span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0, len(s.links.queue)) + for _, value := range s.links.queue { + linksArr = append(linksArr, value.(Link)) + } + return linksArr +} + +func (s *span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0, len(s.messageEvents.queue)) + for _, value := range s.messageEvents.queue { + messageEventArr = append(messageEventArr, value.(MessageEvent)) + } + return messageEventArr +} + +func (s *span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0, len(s.annotations.queue)) + for _, value := range s.annotations.queue { + annotationArr = append(annotationArr, value.(Annotation)) + } + return annotationArr +} + +func (s *span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}, s.lruAttributes.len()) + for _, key := range s.lruAttributes.keys() { + value, ok := s.lruAttributes.get(key) + if ok { + keyStr := key.(string) + attributes[keyStr] = value + } + } + return attributes +} + +func (s *span) copyToCappedAttributes(attributes []Attribute) { + for _, a := range attributes { + s.lruAttributes.add(a.key, a.value) + } +} + +func (s *span) addChild() { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.ChildSpanCount++ + s.mu.Unlock() +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.copyToCappedAttributes(attributes) + s.mu.Unlock() +} + +func (s *span) printStringInternal(attributes []Attribute, str string) { + now := time.Now() + var am map[string]interface{} + if len(attributes) != 0 { + am = make(map[string]interface{}, len(attributes)) + for _, attr := range attributes { + am[attr.key] = attr.value + } + } + s.mu.Lock() + s.annotations.add(Annotation{ + Time: now, + Message: str, + Attributes: am, + }) + s.mu.Unlock() +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.printStringInternal(attributes, fmt.Sprintf(format, a...)) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeSent, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + now := time.Now() + s.mu.Lock() + s.messageEvents.add(MessageEvent{ + Time: now, + EventType: MessageEventTypeRecv, + MessageID: messageID, + UncompressedByteSize: uncompressedByteSize, + CompressedByteSize: compressedByteSize, + }) + s.mu.Unlock() +} + +// AddLink adds a link to the span. +func (s *span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() +} + +func (s *span) String() string { + if s == nil { + return "" + } + if s.data == nil { + return fmt.Sprintf("span %s", s.spanContext.SpanID) + } + s.mu.Lock() + str := fmt.Sprintf("span %s %q", s.spanContext.SpanID, s.data.Name) + s.mu.Unlock() + return str +} + +var config atomic.Value // access atomically + +func init() { + config.Store(&Config{ + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: &defaultIDGenerator{}, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, + }) +} + +type defaultIDGenerator struct { + sync.Mutex + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + + traceIDAdd [2]uint64 + traceIDRand *rand.Rand + + initOnce sync.Once +} + +// init initializes the generator on the first call to avoid consuming entropy +// unnecessarily. +func (gen *defaultIDGenerator) init() { + gen.initOnce.Do(func() { + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + }) +} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *defaultIDGenerator) NewSpanID() [8]byte { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) + } + var sid [8]byte + binary.LittleEndian.PutUint64(sid[:], id) + return sid +} + +// NewTraceID returns a non-zero trace ID from a randomly-chosen sequence. +// mu should be held while this function is called. +func (gen *defaultIDGenerator) NewTraceID() [16]byte { + var tid [16]byte + // Construct the trace ID from two outputs of traceIDRand, with a constant + // added to each half for additional entropy. + gen.Lock() + binary.LittleEndian.PutUint64(tid[0:8], gen.traceIDRand.Uint64()+gen.traceIDAdd[0]) + binary.LittleEndian.PutUint64(tid[8:16], gen.traceIDRand.Uint64()+gen.traceIDAdd[1]) + gen.Unlock() + return tid +} diff --git a/vendor/go.opencensus.io/trace/trace_api.go b/vendor/go.opencensus.io/trace/trace_api.go new file mode 100644 index 00000000000..9e2c3a99926 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_api.go @@ -0,0 +1,265 @@ +// Copyright 2020, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" +) + +// DefaultTracer is the tracer used when package-level exported functions are invoked. +var DefaultTracer Tracer = &tracer{} + +// Tracer can start spans and access context functions. +type Tracer interface { + + // StartSpan starts a new child span of the current span in the context. If + // there is no span in the context, creates a new trace and span. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) + + // StartSpanWithRemoteParent starts a new child span of the span from the given parent. + // + // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is + // preferred for cases where the parent is propagated via an incoming request. + // + // Returned context contains the newly created span. You can use it to + // propagate the returned span in process. + StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) + + // FromContext returns the Span stored in a context, or nil if there isn't one. + FromContext(ctx context.Context) *Span + + // NewContext returns a new context with the given Span attached. + NewContext(parent context.Context, s *Span) context.Context +} + +// StartSpan starts a new child span of the current span in the context. If +// there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpan(ctx, name, o...) +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. +// +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + return DefaultTracer.StartSpanWithRemoteParent(ctx, name, parent, o...) +} + +// FromContext returns the Span stored in a context, or a Span that is not +// recording events if there isn't one. +func FromContext(ctx context.Context) *Span { + return DefaultTracer.FromContext(ctx) +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { + return DefaultTracer.NewContext(parent, s) +} + +// SpanInterface represents a span of a trace. It has an associated SpanContext, and +// stores data accumulated while the span is active. +// +// Ideally users should interact with Spans by calling the functions in this +// package that take a Context parameter. +type SpanInterface interface { + + // IsRecordingEvents returns true if events are being recorded for this span. + // Use this check to avoid computing expensive annotations when they will never + // be used. + IsRecordingEvents() bool + + // End ends the span. + End() + + // SpanContext returns the SpanContext of the span. + SpanContext() SpanContext + + // SetName sets the name of the span, if it is recording events. + SetName(name string) + + // SetStatus sets the status of the span, if it is recording events. + SetStatus(status Status) + + // AddAttributes sets attributes in the span. + // + // Existing attributes whose keys appear in the attributes parameter are overwritten. + AddAttributes(attributes ...Attribute) + + // Annotate adds an annotation with attributes. + // Attributes can be nil. + Annotate(attributes []Attribute, str string) + + // Annotatef adds an annotation with attributes. + Annotatef(attributes []Attribute, format string, a ...interface{}) + + // AddMessageSendEvent adds a message send event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddMessageReceiveEvent adds a message receive event to the span. + // + // messageID is an identifier for the message, which is recommended to be + // unique in this span and the same between the send event and the receive + // event (this allows to identify a message between the sender and receiver). + // For example, this could be a sequence id. + AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) + + // AddLink adds a link to the span. + AddLink(l Link) + + // String prints a string representation of a span. + String() string +} + +// NewSpan is a convenience function for creating a *Span out of a *span +func NewSpan(s SpanInterface) *Span { + return &Span{internal: s} +} + +// Span is a struct wrapper around the SpanInt interface, which allows correctly handling +// nil spans, while also allowing the SpanInterface implementation to be swapped out. +type Span struct { + internal SpanInterface +} + +// Internal returns the underlying implementation of the Span +func (s *Span) Internal() SpanInterface { + return s.internal +} + +// IsRecordingEvents returns true if events are being recorded for this span. +// Use this check to avoid computing expensive annotations when they will never +// be used. +func (s *Span) IsRecordingEvents() bool { + if s == nil { + return false + } + return s.internal.IsRecordingEvents() +} + +// End ends the span. +func (s *Span) End() { + if s == nil { + return + } + s.internal.End() +} + +// SpanContext returns the SpanContext of the span. +func (s *Span) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + return s.internal.SpanContext() +} + +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetName(name) +} + +// SetStatus sets the status of the span, if it is recording events. +func (s *Span) SetStatus(status Status) { + if !s.IsRecordingEvents() { + return + } + s.internal.SetStatus(status) +} + +// AddAttributes sets attributes in the span. +// +// Existing attributes whose keys appear in the attributes parameter are overwritten. +func (s *Span) AddAttributes(attributes ...Attribute) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddAttributes(attributes...) +} + +// Annotate adds an annotation with attributes. +// Attributes can be nil. +func (s *Span) Annotate(attributes []Attribute, str string) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotate(attributes, str) +} + +// Annotatef adds an annotation with attributes. +func (s *Span) Annotatef(attributes []Attribute, format string, a ...interface{}) { + if !s.IsRecordingEvents() { + return + } + s.internal.Annotatef(attributes, format, a...) +} + +// AddMessageSendEvent adds a message send event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageSendEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddMessageReceiveEvent adds a message receive event to the span. +// +// messageID is an identifier for the message, which is recommended to be +// unique in this span and the same between the send event and the receive +// event (this allows to identify a message between the sender and receiver). +// For example, this could be a sequence id. +func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize int64) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddMessageReceiveEvent(messageID, uncompressedByteSize, compressedByteSize) +} + +// AddLink adds a link to the span. +func (s *Span) AddLink(l Link) { + if !s.IsRecordingEvents() { + return + } + s.internal.AddLink(l) +} + +// String prints a string representation of a span. +func (s *Span) String() string { + if s == nil { + return "" + } + return s.internal.String() +} diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go new file mode 100644 index 00000000000..b8fc1e495a9 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -0,0 +1,33 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.11 +// +build go1.11 + +package trace + +import ( + "context" + t "runtime/trace" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + if !t.IsEnabled() { + // Avoid additional overhead if + // runtime/trace is not enabled. + return ctx, func() {} + } + nctx, task := t.NewTask(ctx, name) + return nctx, task.End +} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go new file mode 100644 index 00000000000..da488fc8740 --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -0,0 +1,26 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.11 +// +build !go1.11 + +package trace + +import ( + "context" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + return ctx, func() {} +} diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go new file mode 100644 index 00000000000..2d6c713eb3a --- /dev/null +++ b/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -0,0 +1,147 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +} diff --git a/vendor/go.starlark.net/starlark/hashtable.go b/vendor/go.starlark.net/starlark/hashtable.go index 40f72bb635b..e1bbeaafc61 100644 --- a/vendor/go.starlark.net/starlark/hashtable.go +++ b/vendor/go.starlark.net/starlark/hashtable.go @@ -6,8 +6,8 @@ package starlark import ( "fmt" + "hash/maphash" "math/big" - _ "unsafe" // for go:linkname hack ) // hashtable is used to represent Starlark dict and set values. @@ -416,21 +416,29 @@ func (it *keyIterator) Done() { } } -// TODO(adonovan): use go1.19's maphash.String. +// entries is a go1.23 iterator over the entries of the hash table. +func (ht *hashtable) entries(yield func(k, v Value) bool) { + if !ht.frozen { + ht.itercount++ + defer func() { ht.itercount-- }() + } + for e := ht.head; e != nil && yield(e.key, e.value); e = e.next { + } +} + +var seed = maphash.MakeSeed() // hashString computes the hash of s. func hashString(s string) uint32 { if len(s) >= 12 { // Call the Go runtime's optimized hash implementation, - // which uses the AESENC instruction on amd64 machines. - return uint32(goStringHash(s, 0)) + // which uses the AES instructions on amd64 and arm64 machines. + h := maphash.String(seed, s) + return uint32(h>>32) | uint32(h) } return softHashString(s) } -//go:linkname goStringHash runtime.stringHash -func goStringHash(s string, seed uintptr) uintptr - // softHashString computes the 32-bit FNV-1a hash of s in software. func softHashString(s string) uint32 { var h uint32 = 2166136261 diff --git a/vendor/go.starlark.net/starlark/value.go b/vendor/go.starlark.net/starlark/value.go index 22a37c8a82d..af16239e090 100644 --- a/vendor/go.starlark.net/starlark/value.go +++ b/vendor/go.starlark.net/starlark/value.go @@ -254,12 +254,17 @@ var ( // // Example usage: // -// iter := iterable.Iterator() +// var seq Iterator = ... +// iter := seq.Iterate() // defer iter.Done() -// var x Value -// for iter.Next(&x) { +// var elem Value +// for iter.Next(elem) { // ... // } +// +// Or, using go1.23 iterators: +// +// for elem := range Elements(seq) { ... } type Iterator interface { // If the iterator is exhausted, Next returns false. // Otherwise it sets *p to the current element of the sequence, @@ -283,6 +288,8 @@ type Mapping interface { } // An IterableMapping is a mapping that supports key enumeration. +// +// See [Entries] for example use. type IterableMapping interface { Mapping Iterate() Iterator // see Iterable interface @@ -847,6 +854,7 @@ func (d *Dict) Type() string { return "dict" func (d *Dict) Freeze() { d.ht.freeze() } func (d *Dict) Truth() Bool { return d.Len() > 0 } func (d *Dict) Hash() (uint32, error) { return 0, fmt.Errorf("unhashable type: dict") } +func (d *Dict) Entries(yield func(k, v Value) bool) { d.ht.entries(yield) } func (x *Dict) Union(y *Dict) *Dict { z := new(Dict) @@ -954,6 +962,23 @@ func (l *List) Iterate() Iterator { return &listIterator{l: l} } +// Elements is a go1.23 iterator over the elements of the list. +// +// Example: +// +// for elem := range list.Elements { ... } +func (l *List) Elements(yield func(Value) bool) { + if !l.frozen { + l.itercount++ + defer func() { l.itercount-- }() + } + for _, x := range l.elems { + if !yield(x) { + break + } + } +} + func (x *List) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { y := y_.(*List) // It's tempting to check x == y as an optimization here, @@ -1053,6 +1078,20 @@ func (t Tuple) Slice(start, end, step int) Value { } func (t Tuple) Iterate() Iterator { return &tupleIterator{elems: t} } + +// Elements is a go1.23 iterator over the elements of the tuple. +// +// (A Tuple is a slice, so it is of course directly iterable. This +// method exists to provide a fast path for the [Elements] standalone +// function.) +func (t Tuple) Elements(yield func(Value) bool) { + for _, x := range t { + if !yield(x) { + break + } + } +} + func (t Tuple) Freeze() { for _, elem := range t { elem.Freeze() @@ -1124,6 +1163,9 @@ func (s *Set) Truth() Bool { return s.Len() > 0 } func (s *Set) Attr(name string) (Value, error) { return builtinAttr(s, name, setMethods) } func (s *Set) AttrNames() []string { return builtinAttrNames(setMethods) } +func (s *Set) Elements(yield func(k Value) bool) { + s.ht.entries(func(k, _ Value) bool { return yield(k) }) +} func (x *Set) CompareSameType(op syntax.Token, y_ Value, depth int) (bool, error) { y := y_.(*Set) @@ -1561,6 +1603,74 @@ func Iterate(x Value) Iterator { return nil } +// Elements returns an iterator for the elements of the iterable value. +// +// Example of go1.23 iteration: +// +// for elem := range Elements(iterable) { ... } +// +// Push iterators are provided as a convience for Go client code. The +// core iteration behavior of Starlark for-loops is defined by the +// [Iterable] interface. +// +// TODO(adonovan): change return type to go1.23 iter.Seq[Value]. +func Elements(iterable Iterable) func(yield func(Value) bool) { + // Use specialized push iterator if available (*List, Tuple, *Set). + type hasElements interface { + Elements(yield func(k Value) bool) + } + if iterable, ok := iterable.(hasElements); ok { + return iterable.Elements + } + + iter := iterable.Iterate() + return func(yield func(Value) bool) { + defer iter.Done() + var x Value + for iter.Next(&x) && yield(x) { + } + } +} + +// Entries returns an iterator over the entries (key/value pairs) of +// the iterable mapping. +// +// Example of go1.23 iteration: +// +// for k, v := range Entries(mapping) { ... } +// +// Push iterators are provided as a convience for Go client code. The +// core iteration behavior of Starlark for-loops is defined by the +// [Iterable] interface. +// +// TODO(adonovan): change return type to go1.23 iter.Seq2[Value, Value]. +func Entries(mapping IterableMapping) func(yield func(k, v Value) bool) { + // If available (e.g. *Dict), use specialized push iterator, + // as it gets k and v in one shot. + type hasEntries interface { + Entries(yield func(k, v Value) bool) + } + if mapping, ok := mapping.(hasEntries); ok { + return mapping.Entries + } + + iter := mapping.Iterate() + return func(yield func(k, v Value) bool) { + defer iter.Done() + var k Value + for iter.Next(&k) { + v, found, err := mapping.Get(k) + if err != nil || !found { + panic(fmt.Sprintf("Iterate and Get are inconsistent (mapping=%v, key=%v)", + mapping.Type(), k.Type())) + } + if !yield(k, v) { + break + } + } + } +} + // Bytes is the type of a Starlark binary string. // // A Bytes encapsulates an immutable sequence of bytes. diff --git a/vendor/go.starlark.net/syntax/parse.go b/vendor/go.starlark.net/syntax/parse.go index 1183a03bf58..cc1dc1fa6ae 100644 --- a/vendor/go.starlark.net/syntax/parse.go +++ b/vendor/go.starlark.net/syntax/parse.go @@ -857,7 +857,9 @@ func (p *parser) parsePrimary() Expr { X: x, } } - p.in.errorf(p.in.pos, "got %#v, want primary expression", p.tok) + + // Report start pos of final token as it may be a NEWLINE (#532). + p.in.errorf(p.tokval.pos, "got %#v, want primary expression", p.tok) panic("unreachable") } diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml new file mode 100644 index 00000000000..8e5ca7d3e2b --- /dev/null +++ b/vendor/go.uber.org/zap/.codecov.yml @@ -0,0 +1,17 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 95% # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure +ignore: + - internal/readme/readme.go + diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore new file mode 100644 index 00000000000..da9d9d00b47 --- /dev/null +++ b/vendor/go.uber.org/zap/.gitignore @@ -0,0 +1,32 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +vendor + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.pprof +*.out +*.log + +/bin +cover.out +cover.html diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml new file mode 100644 index 00000000000..2346df13517 --- /dev/null +++ b/vendor/go.uber.org/zap/.golangci.yml @@ -0,0 +1,77 @@ +output: + # Make output more digestible with quickfix in vim/emacs/etc. + sort-results: true + print-issued-lines: false + +linters: + # We'll track the golangci-lint default linters manually + # instead of letting them change without our control. + disable-all: true + enable: + # golangci-lint defaults: + - errcheck + - gosimple + - govet + - ineffassign + - staticcheck + - unused + + # Our own extras: + - gofumpt + - nolintlint # lints nolint directives + - revive + +linters-settings: + govet: + # These govet checks are disabled by default, but they're useful. + enable: + - niliness + - reflectvaluecompare + - sortslice + - unusedwrite + + errcheck: + exclude-functions: + # These methods can not fail. + # They operate on an in-memory buffer. + - (*go.uber.org/zap/buffer.Buffer).Write + - (*go.uber.org/zap/buffer.Buffer).WriteByte + - (*go.uber.org/zap/buffer.Buffer).WriteString + + - (*go.uber.org/zap/zapio.Writer).Close + - (*go.uber.org/zap/zapio.Writer).Sync + - (*go.uber.org/zap/zapio.Writer).Write + # Write to zapio.Writer cannot fail, + # so io.WriteString on it cannot fail. + - io.WriteString(*go.uber.org/zap/zapio.Writer) + + # Writing a plain string to a fmt.State cannot fail. + - io.WriteString(fmt.State) + +issues: + # Print all issues reported by all linters. + max-issues-per-linter: 0 + max-same-issues: 0 + + # Don't ignore some of the issues that golangci-lint considers okay. + # This includes documenting all exported entities. + exclude-use-default: false + + exclude-rules: + # Don't warn on unused parameters. + # Parameter names are useful; replacing them with '_' is undesirable. + - linters: [revive] + text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _' + + # staticcheck already has smarter checks for empty blocks. + # revive's empty-block linter has false positives. + # For example, as of writing this, the following is not allowed. + # for foo() { } + - linters: [revive] + text: 'empty-block: this block is empty, you can remove it' + + # Ignore logger.Sync() errcheck failures in example_test.go + # since those are intended to be uncomplicated examples. + - linters: [errcheck] + path: example_test.go + text: 'Error return value of `logger.Sync` is not checked' diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl new file mode 100644 index 00000000000..4fea3027af3 --- /dev/null +++ b/vendor/go.uber.org/zap/.readme.tmpl @@ -0,0 +1,117 @@ +# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ +Blazing fast, structured, leveled logging in Go. + +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +{{.BenchmarkAddingFields}} + +Log a message with a logger that already has 10 fields of context: + +{{.BenchmarkAccumulatedContext}} + +Log a static string, without any context or `printf`-style templating: + +{{.BenchmarkWithoutFields}} + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md new file mode 100644 index 00000000000..6d6cd5f4d70 --- /dev/null +++ b/vendor/go.uber.org/zap/CHANGELOG.md @@ -0,0 +1,687 @@ +# Changelog +All notable changes to this project will be documented in this file. + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 1.27.0 (20 Feb 2024) +Enhancements: +* [#1378][]: Add `WithLazy` method for `SugaredLogger`. +* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`. +* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`. +* [#1416][]: Add `WithPanicHook` option for testing panic logs. + +Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release. + +[#1378]: https://github.com/uber-go/zap/pull/1378 +[#1399]: https://github.com/uber-go/zap/pull/1399 +[#1406]: https://github.com/uber-go/zap/pull/1406 +[#1416]: https://github.com/uber-go/zap/pull/1416 + +## 1.26.0 (14 Sep 2023) +Enhancements: +* [#1297][]: Add Dict as a Field. +* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured +context. +* [#1350][]: String encoding is much (~50%) faster now. + +Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release. + +[#1297]: https://github.com/uber-go/zap/pull/1297 +[#1319]: https://github.com/uber-go/zap/pull/1319 +[#1350]: https://github.com/uber-go/zap/pull/1350 + +## 1.25.0 (1 Aug 2023) + +This release contains several improvements including performance, API additions, +and two new experimental packages whose APIs are unstable and may change in the +future. + +Enhancements: +* [#1246][]: Add `zap/exp/zapslog` package for integration with slog. +* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set. +* [#1281][]: Add `zap/exp/expfield` package which contains helper methods +`Str` and `Strs` for constructing String-like zap.Fields. +* [#1310][]: Reduce stack size on `Any`. + +Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions +to this release. + +[#1246]: https://github.com/uber-go/zap/pull/1246 +[#1273]: https://github.com/uber-go/zap/pull/1273 +[#1281]: https://github.com/uber-go/zap/pull/1281 +[#1310]: https://github.com/uber-go/zap/pull/1310 + +## 1.24.0 (30 Nov 2022) + +Enhancements: +* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the + current minimum enabled log level. +* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically. + +Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their +contributions to this release. + +[#1148]: https://github.coml/uber-go/zap/pull/1148 +[#1185]: https://github.coml/uber-go/zap/pull/1185 + +## 1.23.0 (24 Aug 2022) + +Enhancements: +* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a + `LevelEnabler` or `Core`. +* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects + that implement `String() string`. + +[#1147]: https://github.com/uber-go/zap/pull/1147 +[#1155]: https://github.com/uber-go/zap/pull/1155 + +## 1.22.0 (8 Aug 2022) + +Enhancements: +* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log + arrays of objects. With these two constructors, you don't need to implement + `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement + `zapcore.ObjectMarshaler`. +* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing + `SugaredLogger` with the provided options applied. +* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level. + These functions provide a string joining behavior similar to `fmt.Println`. +* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the + logger for `Fatal`-level log entries. This defaults to exiting the program. +* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or + `NewDevelopment` to panic if the system was unable to build the logger. +* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for + a statement dynamically. + +Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun +for their contributions to this release. + +[#1071]: https://github.com/uber-go/zap/pull/1071 +[#1079]: https://github.com/uber-go/zap/pull/1079 +[#1080]: https://github.com/uber-go/zap/pull/1080 +[#1088]: https://github.com/uber-go/zap/pull/1088 +[#1108]: https://github.com/uber-go/zap/pull/1108 +[#1118]: https://github.com/uber-go/zap/pull/1118 + +## 1.21.0 (7 Feb 2022) + +Enhancements: +* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string. +* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a + string. + +Bugfixes: +* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset. + +Other changes: +* [#1052][]: Improve encoding performance when the `AddCaller` and + `AddStacktrace` options are used together. + +[#1047]: https://github.com/uber-go/zap/pull/1047 +[#1048]: https://github.com/uber-go/zap/pull/1048 +[#1052]: https://github.com/uber-go/zap/pull/1052 +[#1058]: https://github.com/uber-go/zap/pull/1058 + +Thanks to @aerosol and @Techassi for their contributions to this release. + +## 1.20.0 (4 Jan 2022) + +Enhancements: +* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline + characters between log statements. +* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON + encoding of reflected log fields. + +Bugfixes: +* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON. +* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject` + methods when the methods return. +* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero. + +Other changes: +* [#1028][]: Drop support for Go < 1.15. + +[#554]: https://github.com/uber-go/zap/pull/554 +[#989]: https://github.com/uber-go/zap/pull/989 +[#1011]: https://github.com/uber-go/zap/pull/1011 +[#1017]: https://github.com/uber-go/zap/pull/1017 +[#1028]: https://github.com/uber-go/zap/pull/1028 +[#1033]: https://github.com/uber-go/zap/pull/1033 +[#1039]: https://github.com/uber-go/zap/pull/1039 + +Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release. + +## 1.19.1 (8 Sep 2021) + +Bugfixes: +* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon. +* [#1003][]: JSON: Fix inaccurate precision when encoding float32. + +[#1001]: https://github.com/uber-go/zap/pull/1001 +[#1003]: https://github.com/uber-go/zap/pull/1003 + +## 1.19.0 (9 Aug 2021) + +Enhancements: +* [#975][]: Avoid panicking in Sampler core if the level is out of bounds. +* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields + better. + +[#975]: https://github.com/uber-go/zap/pull/975 +[#984]: https://github.com/uber-go/zap/pull/984 + +Thanks to @lancoLiu and @thockin for their contributions to this release. + +## 1.18.1 (28 Jun 2021) + +Bugfixes: +* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`. + +[#974]: https://github.com/uber-go/zap/pull/974 + +## 1.18.0 (28 Jun 2021) + +Enhancements: +* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers + messages in-memory and flushes them periodically. +* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`. +* [#897][]: Add `zap.WithClock` option to control the source of time via the + new `zapcore.Clock` interface. +* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w` + methods don't match expectations. +* [#943][]: Add support for filtering by level or arbitrary matcher function to + `zaptest/observer`. +* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's + `buffer.Buffer`. + +Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee +for their contributions to this release. + +[#691]: https://github.com/uber-go/zap/pull/691 +[#897]: https://github.com/uber-go/zap/pull/897 +[#943]: https://github.com/uber-go/zap/pull/943 +[#949]: https://github.com/uber-go/zap/pull/949 +[#961]: https://github.com/uber-go/zap/pull/961 +[#971]: https://github.com/uber-go/zap/pull/971 + +## 1.17.0 (25 May 2021) + +Bugfixes: +* [#867][]: Encode `` for nil `error` instead of a panic. +* [#931][], [#936][]: Update minimum version constraints to address + vulnerabilities in dependencies. + +Enhancements: +* [#865][]: Improve alignment of fields of the Logger struct, reducing its + size from 96 to 80 bytes. +* [#881][]: Support `grpclog.LoggerV2` in zapgrpc. +* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler + with the `application/x-www-form-urlencoded` content type. +* [#912][]: Support multi-field encoding with `zap.Inline`. +* [#913][]: Speed up SugaredLogger for calls with a single string. +* [#928][]: Add support for filtering by field name to `zaptest/observer`. + +Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release. + +[#865]: https://github.com/uber-go/zap/pull/865 +[#867]: https://github.com/uber-go/zap/pull/867 +[#881]: https://github.com/uber-go/zap/pull/881 +[#903]: https://github.com/uber-go/zap/pull/903 +[#912]: https://github.com/uber-go/zap/pull/912 +[#913]: https://github.com/uber-go/zap/pull/913 +[#928]: https://github.com/uber-go/zap/pull/928 +[#931]: https://github.com/uber-go/zap/pull/931 +[#936]: https://github.com/uber-go/zap/pull/936 + +## 1.16.0 (1 Sep 2020) + +Bugfixes: +* [#828][]: Fix missing newline in IncreaseLevel error messages. +* [#835][]: Fix panic in JSON encoder when encoding times or durations + without specifying a time or duration encoder. +* [#843][]: Honor CallerSkip when taking stack traces. +* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead. +* [#854][]: Encode `` for nil `Stringer` instead of a panic error log. + +Enhancements: +* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders + for custom layouts. +* [#697][]: Added support for a configurable delimiter in the console encoder. +* [#852][]: Optimize console encoder by pooling the underlying JSON encoder. +* [#844][]: Add ability to include the calling function as part of logs. +* [#843][]: Add `StackSkip` for including truncated stacks as a field. +* [#861][]: Add options to customize Fatal behaviour for better testability. + +Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release. + +[#629]: https://github.com/uber-go/zap/pull/629 +[#697]: https://github.com/uber-go/zap/pull/697 +[#828]: https://github.com/uber-go/zap/pull/828 +[#835]: https://github.com/uber-go/zap/pull/835 +[#843]: https://github.com/uber-go/zap/pull/843 +[#844]: https://github.com/uber-go/zap/pull/844 +[#852]: https://github.com/uber-go/zap/pull/852 +[#854]: https://github.com/uber-go/zap/pull/854 +[#861]: https://github.com/uber-go/zap/pull/861 +[#862]: https://github.com/uber-go/zap/pull/862 + +## 1.15.0 (23 Apr 2020) + +Bugfixes: +* [#804][]: Fix handling of `Time` values out of `UnixNano` range. +* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`. + +Enhancements: +* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This + allows disabling annotation of log entries with caller information if + previously enabled with `AddCaller`. +* [#813][]: Deprecate `NewSampler` constructor in favor of + `NewSamplerWithOptions` which supports a `SamplerHook` option. This option + adds support for monitoring sampling decisions through a hook. + +Thanks to @danielbprice for their contributions to this release. + +[#804]: https://github.com/uber-go/zap/pull/804 +[#812]: https://github.com/uber-go/zap/pull/812 +[#806]: https://github.com/uber-go/zap/pull/806 +[#813]: https://github.com/uber-go/zap/pull/813 + +## 1.14.1 (14 Mar 2020) + +Bugfixes: +* [#791][]: Fix panic on attempting to build a logger with an invalid Config. +* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's + development-time dependencies. +* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to + be generated for arrays of `time.Time` objects when using string-based time + formats. + +Thanks to @YashishDua for their contributions to this release. + +[#791]: https://github.com/uber-go/zap/pull/791 +[#795]: https://github.com/uber-go/zap/pull/795 +[#799]: https://github.com/uber-go/zap/pull/799 + +## 1.14.0 (20 Feb 2020) + +Enhancements: +* [#771][]: Optimize calls for disabled log levels. +* [#773][]: Add millisecond duration encoder. +* [#775][]: Add option to increase the level of a logger. +* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible. + +Thanks to @caibirdme for their contributions to this release. + +[#771]: https://github.com/uber-go/zap/pull/771 +[#773]: https://github.com/uber-go/zap/pull/773 +[#775]: https://github.com/uber-go/zap/pull/775 +[#786]: https://github.com/uber-go/zap/pull/786 + +## 1.13.0 (13 Nov 2019) + +Enhancements: +* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors + to log pointers to primitives with support for `nil` values. + +Thanks to @jbizzle for their contributions to this release. + +[#758]: https://github.com/uber-go/zap/pull/758 + +## 1.12.0 (29 Oct 2019) + +Enhancements: +* [#751][]: Migrate to Go modules. + +[#751]: https://github.com/uber-go/zap/pull/751 + +## 1.11.0 (21 Oct 2019) + +Enhancements: +* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`. +* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders. + +Thanks to @juicemia, @uhthomas for their contributions to this release. + +[#725]: https://github.com/uber-go/zap/pull/725 +[#736]: https://github.com/uber-go/zap/pull/736 + +## 1.10.0 (29 Apr 2019) + +Bugfixes: +* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a + string. +* [#706][]: Fix incorrect call depth to determine caller in Go 1.12. + +Enhancements: +* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test + loggers. +* [#675][]: Don't panic when encoding a String field. +* [#704][]: Disable HTML escaping for JSON objects encoded using the + reflect-based encoder. + +Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions +to this release. + +[#657]: https://github.com/uber-go/zap/pull/657 +[#706]: https://github.com/uber-go/zap/pull/706 +[#610]: https://github.com/uber-go/zap/pull/610 +[#675]: https://github.com/uber-go/zap/pull/675 +[#704]: https://github.com/uber-go/zap/pull/704 + +## 1.9.1 (06 Aug 2018) + +Bugfixes: + +* [#614][]: MapObjectEncoder should not ignore empty slices. + +[#614]: https://github.com/uber-go/zap/pull/614 + +## 1.9.0 (19 Jul 2018) + +Enhancements: +* [#602][]: Reduce number of allocations when logging with reflection. +* [#572][], [#606][]: Expose a registry for third-party logging sinks. + +Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and +@dimroc for their contributions to this release. + +[#602]: https://github.com/uber-go/zap/pull/602 +[#572]: https://github.com/uber-go/zap/pull/572 +[#606]: https://github.com/uber-go/zap/pull/606 + +## 1.8.0 (13 Apr 2018) + +Enhancements: +* [#508][]: Make log level configurable when redirecting the standard + library's logger. +* [#518][]: Add a logger that writes to a `*testing.TB`. +* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc. + +Bugfixes: +* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`. + +Thanks to @DiSiqueira and @djui for their contributions to this release. + +[#508]: https://github.com/uber-go/zap/pull/508 +[#518]: https://github.com/uber-go/zap/pull/518 +[#577]: https://github.com/uber-go/zap/pull/577 +[#574]: https://github.com/uber-go/zap/pull/574 + +## 1.7.1 (25 Sep 2017) + +Bugfixes: +* [#504][]: Store strings when using AddByteString with the map encoder. + +[#504]: https://github.com/uber-go/zap/pull/504 + +## 1.7.0 (21 Sep 2017) + +Enhancements: + +* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user + to specify the level of the logged messages. + +[#487]: https://github.com/uber-go/zap/pull/487 + +## 1.6.0 (30 Aug 2017) + +Enhancements: + +* [#491][]: Omit zap stack frames from stacktraces. +* [#490][]: Add a `ContextMap` method to observer logs for simpler + field validation in tests. + +[#490]: https://github.com/uber-go/zap/pull/490 +[#491]: https://github.com/uber-go/zap/pull/491 + +## 1.5.0 (22 Jul 2017) + +Enhancements: + +* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`. +* [#465][]: Support user-supplied encoders for logger names. + +Bugfixes: + +* [#477][]: Fix a bug that incorrectly truncated deep stacktraces. + +Thanks to @richard-tunein and @pavius for their contributions to this release. + +[#477]: https://github.com/uber-go/zap/pull/477 +[#465]: https://github.com/uber-go/zap/pull/465 +[#460]: https://github.com/uber-go/zap/pull/460 +[#470]: https://github.com/uber-go/zap/pull/470 + +## 1.4.1 (08 Jun 2017) + +This release fixes two bugs. + +Bugfixes: + +* [#435][]: Support a variety of case conventions when unmarshaling levels. +* [#444][]: Fix a panic in the observer. + +[#435]: https://github.com/uber-go/zap/pull/435 +[#444]: https://github.com/uber-go/zap/pull/444 + +## 1.4.0 (12 May 2017) + +This release adds a few small features and is fully backward-compatible. + +Enhancements: + +* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to + override the Unix-style default. +* [#425][]: Preserve time zones when logging times. +* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a + variety of operations a bit simpler. + +[#424]: https://github.com/uber-go/zap/pull/424 +[#425]: https://github.com/uber-go/zap/pull/425 +[#431]: https://github.com/uber-go/zap/pull/431 + +## 1.3.0 (25 Apr 2017) + +This release adds an enhancement to zap's testing helpers as well as the +ability to marshal an AtomicLevel. It is fully backward-compatible. + +Enhancements: + +* [#415][]: Add a substring-filtering helper to zap's observer. This is + particularly useful when testing the `SugaredLogger`. +* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`. + +[#415]: https://github.com/uber-go/zap/pull/415 +[#416]: https://github.com/uber-go/zap/pull/416 + +## 1.2.0 (13 Apr 2017) + +This release adds a gRPC compatibility wrapper. It is fully backward-compatible. + +Enhancements: + +* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements + `grpclog.Logger`. + +[#402]: https://github.com/uber-go/zap/pull/402 + +## 1.1.0 (31 Mar 2017) + +This release fixes two bugs and adds some enhancements to zap's testing helpers. +It is fully backward-compatible. + +Bugfixes: + +* [#385][]: Fix caller path trimming on Windows. +* [#396][]: Fix a panic when attempting to use non-existent directories with + zap's configuration struct. + +Enhancements: + +* [#386][]: Add filtering helpers to zaptest's observing logger. + +Thanks to @moitias for contributing to this release. + +[#385]: https://github.com/uber-go/zap/pull/385 +[#396]: https://github.com/uber-go/zap/pull/396 +[#386]: https://github.com/uber-go/zap/pull/386 + +## 1.0.0 (14 Mar 2017) + +This is zap's first stable release. All exported APIs are now final, and no +further breaking changes will be made in the 1.x release series. Anyone using a +semver-aware dependency manager should now pin to `^1`. + +Breaking changes: + +* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without + casting from `[]byte` to `string`. +* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`, + `zap.Logger`, and `zap.SugaredLogger`. +* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to + clash with other testing helpers. + +Bugfixes: + +* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier + for tab-separated console output. +* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to + work with concurrency-safe `WriteSyncer` implementations. +* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux + systems. +* [#373][]: Report the correct caller from zap's standard library + interoperability wrappers. + +Enhancements: + +* [#348][]: Add a registry allowing third-party encodings to work with zap's + built-in `Config`. +* [#327][]: Make the representation of logger callers configurable (like times, + levels, and durations). +* [#376][]: Allow third-party encoders to use their own buffer pools, which + removes the last performance advantage that zap's encoders have over plugins. +* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple + `WriteSyncer`s and lock the result. +* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in + Go 1.9). +* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it + easier for particularly punctilious users to unit test their application's + logging. + +Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their +contributions to this release. + +[#366]: https://github.com/uber-go/zap/pull/366 +[#364]: https://github.com/uber-go/zap/pull/364 +[#371]: https://github.com/uber-go/zap/pull/371 +[#362]: https://github.com/uber-go/zap/pull/362 +[#369]: https://github.com/uber-go/zap/pull/369 +[#347]: https://github.com/uber-go/zap/pull/347 +[#373]: https://github.com/uber-go/zap/pull/373 +[#348]: https://github.com/uber-go/zap/pull/348 +[#327]: https://github.com/uber-go/zap/pull/327 +[#376]: https://github.com/uber-go/zap/pull/376 +[#346]: https://github.com/uber-go/zap/pull/346 +[#365]: https://github.com/uber-go/zap/pull/365 +[#372]: https://github.com/uber-go/zap/pull/372 + +## 1.0.0-rc.3 (7 Mar 2017) + +This is the third release candidate for zap's stable release. There are no +breaking changes. + +Bugfixes: + +* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs + rather than `[]uint8`. + +Enhancements: + +* [#307][]: Users can opt into colored output for log levels. +* [#353][]: In addition to hijacking the output of the standard library's + package-global logging functions, users can now construct a zap-backed + `log.Logger` instance. +* [#311][]: Frames from common runtime functions and some of zap's internal + machinery are now omitted from stacktraces. + +Thanks to @ansel1 and @suyash for their contributions to this release. + +[#339]: https://github.com/uber-go/zap/pull/339 +[#307]: https://github.com/uber-go/zap/pull/307 +[#353]: https://github.com/uber-go/zap/pull/353 +[#311]: https://github.com/uber-go/zap/pull/311 + +## 1.0.0-rc.2 (21 Feb 2017) + +This is the second release candidate for zap's stable release. It includes two +breaking changes. + +Breaking changes: + +* [#316][]: Zap's global loggers are now fully concurrency-safe + (previously, users had to ensure that `ReplaceGlobals` was called before the + loggers were in use). However, they must now be accessed via the `L()` and + `S()` functions. Users can update their projects with + + ``` + gofmt -r "zap.L -> zap.L()" -w . + gofmt -r "zap.S -> zap.S()" -w . + ``` +* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid + JSON and YAML struct tags on all config structs. This release fixes the tags + and adds static analysis to prevent similar bugs in the future. + +Bugfixes: + +* [#321][]: Redirecting the standard library's `log` output now + correctly reports the logger's caller. + +Enhancements: + +* [#325][] and [#333][]: Zap now transparently supports non-standard, rich + errors like those produced by `github.com/pkg/errors`. +* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is + now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) -> + zap.NewNop()' -w .`. +* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a + more informative error. + +Thanks to @skipor and @chapsuk for their contributions to this release. + +[#316]: https://github.com/uber-go/zap/pull/316 +[#309]: https://github.com/uber-go/zap/pull/309 +[#317]: https://github.com/uber-go/zap/pull/317 +[#321]: https://github.com/uber-go/zap/pull/321 +[#325]: https://github.com/uber-go/zap/pull/325 +[#333]: https://github.com/uber-go/zap/pull/333 +[#326]: https://github.com/uber-go/zap/pull/326 +[#300]: https://github.com/uber-go/zap/pull/300 + +## 1.0.0-rc.1 (14 Feb 2017) + +This is the first release candidate for zap's stable release. There are multiple +breaking changes and improvements from the pre-release version. Most notably: + +* **Zap's import path is now "go.uber.org/zap"** — all users will + need to update their code. +* User-facing types and functions remain in the `zap` package. Code relevant + largely to extension authors is now in the `zapcore` package. +* The `zapcore.Core` type makes it easy for third-party packages to use zap's + internals but provide a different user-facing API. +* `Logger` is now a concrete type instead of an interface. +* A less verbose (though slower) logging API is included by default. +* Package-global loggers `L` and `S` are included. +* A human-friendly console encoder is included. +* A declarative config struct allows common logger configurations to be managed + as configuration instead of code. +* Sampling is more accurate, and doesn't depend on the standard library's shared + timer heap. + +## 0.1.0-beta.1 (6 Feb 2017) + +This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and +upgrade at their leisure. Since this is the first tagged release, there are no +backward compatibility concerns and all functionality is new. + +Early zap adopters should pin to the 0.1.x minor version until they're ready to +upgrade to the upcoming stable release. diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md new file mode 100644 index 00000000000..e327d9aa5cd --- /dev/null +++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md @@ -0,0 +1,75 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, +body size, disability, ethnicity, gender identity and expression, level of +experience, nationality, personal appearance, race, religion, or sexual +identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an +appointed representative at an online or offline event. Representation of a +project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at oss-conduct@uber.com. The project +team will review and investigate all complaints, and will respond in a way +that it deems appropriate to the circumstances. The project team is obligated +to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 1.4, available at +[http://contributor-covenant.org/version/1/4][version]. + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md new file mode 100644 index 00000000000..ea02f3cae2d --- /dev/null +++ b/vendor/go.uber.org/zap/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing + +We'd love your help making zap the very best structured logging library in Go! + +If you'd like to add new exported APIs, please [open an issue][open-issue] +describing your proposal — discussing API changes ahead of time makes +pull request review much smoother. In your issue, pull request, and any other +communications, please remember to treat your fellow contributors with +respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously. + +Note that you'll need to sign [Uber's Contributor License Agreement][cla] +before we can accept any of your contributions. If necessary, a bot will remind +you to accept the CLA when you open your pull request. + +## Setup + +[Fork][fork], then clone the repository: + +```bash +mkdir -p $GOPATH/src/go.uber.org +cd $GOPATH/src/go.uber.org +git clone git@github.com:your_github_username/zap.git +cd zap +git remote add upstream https://github.com/uber-go/zap.git +git fetch upstream +``` + +Make sure that the tests and the linters pass: + +```bash +make test +make lint +``` + +## Making Changes + +Start by creating a new branch for your changes: + +```bash +cd $GOPATH/src/go.uber.org/zap +git checkout master +git fetch upstream +git rebase upstream/master +git checkout -b cool_new_feature +``` + +Make your changes, then ensure that `make lint` and `make test` still pass. If +you're satisfied with your changes, push them to your fork. + +```bash +git push origin cool_new_feature +``` + +Then use the GitHub UI to open a pull request. + +At this point, you're waiting on us to review your changes. We _try_ to respond +to issues and pull requests within a few business days, and we may suggest some +improvements or alternatives. Once your changes are approved, one of the +project maintainers will merge them. + +We're much more likely to approve your changes if you: + +- Add tests for new functionality. +- Write a [good commit message][commit-message]. +- Maintain backward compatibility. + +[fork]: https://github.com/uber-go/zap/fork +[open-issue]: https://github.com/uber-go/zap/issues/new +[cla]: https://cla-assistant.io/uber-go/zap +[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md new file mode 100644 index 00000000000..b183b20bc13 --- /dev/null +++ b/vendor/go.uber.org/zap/FAQ.md @@ -0,0 +1,164 @@ +# Frequently Asked Questions + +## Design + +### Why spend so much effort on logger performance? + +Of course, most applications won't notice the impact of a slow logger: they +already take tens or hundreds of milliseconds for each operation, so an extra +millisecond doesn't matter. + +On the other hand, why *not* make structured logging fast? The `SugaredLogger` +isn't any harder to use than other logging packages, and the `Logger` makes +structured logging possible in performance-sensitive contexts. Across a fleet +of Go microservices, making each application even slightly more efficient adds +up quickly. + +### Why aren't `Logger` and `SugaredLogger` interfaces? + +Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and +`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points +out][go-proverbs], "The bigger the interface, the weaker the abstraction." +Interfaces are also rigid — *any* change requires releasing a new major +version, since it breaks all third-party implementations. + +Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much +abstraction, and it lets us add methods without introducing breaking changes. +Your applications should define and depend upon an interface that includes +just the methods you use. + +### Why are some of my logs missing? + +Logs are dropped intentionally by zap when sampling is enabled. The production +configuration (as returned by `NewProductionConfig()` enables sampling which will +cause repeated logs within a second to be sampled. See more details on why sampling +is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs). + +### Why sample application logs? + +Applications often experience runs of errors, either because of a bug or +because of a misbehaving user. Logging errors is usually a good idea, but it +can easily make this bad situation worse: not only is your application coping +with a flood of errors, it's also spending extra CPU cycles and I/O logging +those errors. Since writes are typically serialized, logging limits throughput +when you need it most. + +Sampling fixes this problem by dropping repetitive log entries. Under normal +conditions, your application writes out every entry. When similar entries are +logged hundreds or thousands of times each second, though, zap begins dropping +duplicates to preserve throughput. + +### Why do the structured logging APIs take a message in addition to fields? + +Subjectively, we find it helpful to accompany structured context with a brief +description. This isn't critical during development, but it makes debugging +and operating unfamiliar systems much easier. + +More concretely, zap's sampling algorithm uses the message to identify +duplicate entries. In our experience, this is a practical middle ground +between random sampling (which often drops the exact entry that you need while +debugging) and hashing the complete entry (which is prohibitively expensive). + +### Why include package-global loggers? + +Since so many other logging packages include a global logger, many +applications aren't designed to accept loggers as explicit parameters. +Changing function signatures is often a breaking change, so zap includes +global loggers to simplify migration. + +Avoid them where possible. + +### Why include dedicated Panic and Fatal log levels? + +In general, application code should handle errors gracefully instead of using +`panic` or `os.Exit`. However, every rule has exceptions, and it's common to +crash when an error is truly unrecoverable. To avoid losing any information +— especially the reason for the crash — the logger must flush any +buffered entries before the process exits. + +Zap makes this easy by offering `Panic` and `Fatal` logging methods that +automatically flush before exiting. Of course, this doesn't guarantee that +logs will never be lost, but it eliminates a common error. + +See the discussion in uber-go/zap#207 for more details. + +### What's `DPanic`? + +`DPanic` stands for "panic in development." In development, it logs at +`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to +catch errors that are theoretically possible, but shouldn't actually happen, +*without* crashing in production. + +If you've ever written code like this, you need `DPanic`: + +```go +if err != nil { + panic(fmt.Sprintf("shouldn't ever get here: %v", err)) +} +``` + +## Installation + +### What does the error `expects import "go.uber.org/zap"` mean? + +Either zap was installed incorrectly or you're referencing the wrong package +name in your code. + +Zap's source code happens to be hosted on GitHub, but the [import +path][import-path] is `go.uber.org/zap`. This gives us, the project +maintainers, the freedom to move the source code if necessary. However, it +means that you need to take a little care when installing and using the +package. + +If you follow two simple rules, everything should work: install zap with `go +get -u go.uber.org/zap`, and always import it in your code with `import +"go.uber.org/zap"`. Your code shouldn't contain *any* references to +`github.com/uber-go/zap`. + +## Usage + +### Does zap support log rotation? + +Zap doesn't natively support rotating log files, since we prefer to leave this +to an external program like `logrotate`. + +However, it's easy to integrate a log rotation package like +[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`. + +```go +// lumberjack.Logger is already safe for concurrent use, so we don't need to +// lock it. +w := zapcore.AddSync(&lumberjack.Logger{ + Filename: "/var/log/myapp/foo.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days +}) +core := zapcore.NewCore( + zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()), + w, + zap.InfoLevel, +) +logger := zap.New(core) +``` + +## Extensions + +We'd love to support every logging need within zap itself, but we're only +familiar with a handful of log ingestion systems, flag-parsing packages, and +the like. Rather than merging code that we can't effectively debug and +support, we'd rather grow an ecosystem of zap extensions. + +We're aware of the following extensions, but haven't used them ourselves: + +| Package | Integration | +| --- | --- | +| `github.com/tchap/zapext` | Sentry, syslog | +| `github.com/fgrosse/zaptest` | Ginkgo | +| `github.com/blendle/zapdriver` | Stackdriver | +| `github.com/moul/zapgorm` | Gorm | +| `github.com/moul/zapfilter` | Advanced filtering rules | + +[go-proverbs]: https://go-proverbs.github.io/ +[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths +[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2 diff --git a/vendor/go.uber.org/zap/LICENSE b/vendor/go.uber.org/zap/LICENSE new file mode 100644 index 00000000000..6652bed45f4 --- /dev/null +++ b/vendor/go.uber.org/zap/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile new file mode 100644 index 00000000000..eb1cee53bd5 --- /dev/null +++ b/vendor/go.uber.org/zap/Makefile @@ -0,0 +1,76 @@ +# Directory containing the Makefile. +PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +export GOBIN ?= $(PROJECT_ROOT)/bin +export PATH := $(GOBIN):$(PATH) + +GOVULNCHECK = $(GOBIN)/govulncheck +BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem + +# Directories containing independent Go modules. +MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test + +# Directories that we want to track coverage for. +COVER_DIRS = . ./exp + +.PHONY: all +all: lint test + +.PHONY: lint +lint: golangci-lint tidy-lint license-lint + +.PHONY: golangci-lint +golangci-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] golangci-lint: $(mod)" && \ + golangci-lint run --path-prefix $(mod)) &&) true + +.PHONY: tidy +tidy: + @$(foreach dir,$(MODULE_DIRS), \ + (cd $(dir) && go mod tidy) &&) true + +.PHONY: tidy-lint +tidy-lint: + @$(foreach mod,$(MODULE_DIRS), \ + (cd $(mod) && \ + echo "[lint] tidy: $(mod)" && \ + go mod tidy && \ + git diff --exit-code -- go.mod go.sum) &&) true + + +.PHONY: license-lint +license-lint: + ./checklicense.sh + +$(GOVULNCHECK): + cd tools && go install golang.org/x/vuln/cmd/govulncheck + +.PHONY: test +test: + @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true + +.PHONY: cover +cover: + @$(foreach dir,$(COVER_DIRS), ( \ + cd $(dir) && \ + go test -race -coverprofile=cover.out -coverpkg=./... ./... \ + && go tool cover -html=cover.out -o cover.html) &&) true + +.PHONY: bench +BENCH ?= . +bench: + @$(foreach dir,$(MODULE_DIRS), ( \ + cd $(dir) && \ + go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \ + ) &&) true + +.PHONY: updatereadme +updatereadme: + rm -f README.md + cat .readme.tmpl | go run internal/readme/readme.go > README.md + +.PHONY: vulncheck +vulncheck: $(GOVULNCHECK) + $(GOVULNCHECK) ./... diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md new file mode 100644 index 00000000000..a17035cb6f7 --- /dev/null +++ b/vendor/go.uber.org/zap/README.md @@ -0,0 +1,149 @@ +# :zap: zap + + +
+ +Blazing fast, structured, leveled logging in Go. + +![Zap logo](assets/logo.png) + +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +
+ +## Installation + +`go get -u go.uber.org/zap` + +Note that zap only supports the two most recent minor versions of Go. + +## Quick Start + +In contexts where performance is nice, but not critical, use the +`SugaredLogger`. It's 4-10x faster than other structured logging +packages and includes both structured and `printf`-style APIs. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() // flushes buffer, if any +sugar := logger.Sugar() +sugar.Infow("failed to fetch URL", + // Structured context as loosely typed key-value pairs. + "url", url, + "attempt", 3, + "backoff", time.Second, +) +sugar.Infof("Failed to fetch URL: %s", url) +``` + +When performance and type safety are critical, use the `Logger`. It's even +faster than the `SugaredLogger` and allocates far less, but it only supports +structured logging. + +```go +logger, _ := zap.NewProduction() +defer logger.Sync() +logger.Info("failed to fetch URL", + // Structured context as strongly typed Field values. + zap.String("url", url), + zap.Int("attempt", 3), + zap.Duration("backoff", time.Second), +) +``` + +See the [documentation][doc] and [FAQ](FAQ.md) for more details. + +## Performance + +For applications that log in the hot path, reflection-based serialization and +string formatting are prohibitively expensive — they're CPU-intensive +and make many small allocations. Put differently, using `encoding/json` and +`fmt.Fprintf` to log tons of `interface{}`s makes your application slow. + +Zap takes a different approach. It includes a reflection-free, zero-allocation +JSON encoder, and the base `Logger` strives to avoid serialization overhead +and allocations wherever possible. By building the high-level `SugaredLogger` +on that foundation, zap lets users *choose* when they need to count every +allocation and when they'd prefer a more familiar, loosely typed API. + +As measured by its own [benchmarking suite][], not only is zap more performant +than comparable structured logging packages — it's also faster than the +standard library. Like all benchmarks, take these with a grain of salt.[1](#footnote-versions) + +Log a message and 10 fields: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 656 ns/op | +0% | 5 allocs/op +| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op +| zerolog | 380 ns/op | -42% | 1 allocs/op +| go-kit | 2249 ns/op | +243% | 57 allocs/op +| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op +| slog | 2481 ns/op | +278% | 42 allocs/op +| apex/log | 9591 ns/op | +1362% | 63 allocs/op +| log15 | 11393 ns/op | +1637% | 75 allocs/op +| logrus | 11654 ns/op | +1677% | 79 allocs/op + +Log a message with a logger that already has 10 fields of context: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 67 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op +| zerolog | 35 ns/op | -48% | 0 allocs/op +| slog | 193 ns/op | +188% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op +| go-kit | 2460 ns/op | +3572% | 56 allocs/op +| log15 | 9038 ns/op | +13390% | 70 allocs/op +| apex/log | 9068 ns/op | +13434% | 53 allocs/op +| logrus | 10521 ns/op | +15603% | 68 allocs/op + +Log a static string, without any context or `printf`-style templating: + +| Package | Time | Time % to zap | Objects Allocated | +| :------ | :--: | :-----------: | :---------------: | +| :zap: zap | 63 ns/op | +0% | 0 allocs/op +| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op +| zerolog | 32 ns/op | -49% | 0 allocs/op +| standard library | 124 ns/op | +97% | 1 allocs/op +| slog | 196 ns/op | +211% | 0 allocs/op +| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op +| go-kit | 213 ns/op | +238% | 9 allocs/op +| apex/log | 771 ns/op | +1124% | 5 allocs/op +| logrus | 1439 ns/op | +2184% | 23 allocs/op +| log15 | 2069 ns/op | +3184% | 20 allocs/op + +## Development Status: Stable + +All APIs are finalized, and no breaking changes will be made in the 1.x series +of releases. Users of semver-aware dependency management systems should pin +zap to `^1`. + +## Contributing + +We encourage and support an active, healthy community of contributors — +including you! Details are in the [contribution guide](CONTRIBUTING.md) and +the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on +issues and pull requests, but you can also report any negative conduct to +oss-conduct@uber.com. That email list is a private, safe space; even the zap +maintainers don't have access, so don't hesitate to hold us to a high +standard. + +
+ +Released under the [MIT License](LICENSE). + +1 In particular, keep in mind that we may be +benchmarking against slightly older versions of other packages. Versions are +pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions) + +[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap +[doc]: https://pkg.go.dev/go.uber.org/zap +[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg +[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml +[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/uber-go/zap +[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks +[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod + diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go new file mode 100644 index 00000000000..abfccb566d5 --- /dev/null +++ b/vendor/go.uber.org/zap/array.go @@ -0,0 +1,447 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "time" + + "go.uber.org/zap/zapcore" +) + +// Array constructs a field with the given key and ArrayMarshaler. It provides +// a flexible, but still type-safe and efficient, way to add array-like types +// to the logging context. The struct's MarshalLogArray method is called lazily. +func Array(key string, val zapcore.ArrayMarshaler) Field { + return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val} +} + +// Bools constructs a field that carries a slice of bools. +func Bools(key string, bs []bool) Field { + return Array(key, bools(bs)) +} + +// ByteStrings constructs a field that carries a slice of []byte, each of which +// must be UTF-8 encoded text. +func ByteStrings(key string, bss [][]byte) Field { + return Array(key, byteStringsArray(bss)) +} + +// Complex128s constructs a field that carries a slice of complex numbers. +func Complex128s(key string, nums []complex128) Field { + return Array(key, complex128s(nums)) +} + +// Complex64s constructs a field that carries a slice of complex numbers. +func Complex64s(key string, nums []complex64) Field { + return Array(key, complex64s(nums)) +} + +// Durations constructs a field that carries a slice of time.Durations. +func Durations(key string, ds []time.Duration) Field { + return Array(key, durations(ds)) +} + +// Float64s constructs a field that carries a slice of floats. +func Float64s(key string, nums []float64) Field { + return Array(key, float64s(nums)) +} + +// Float32s constructs a field that carries a slice of floats. +func Float32s(key string, nums []float32) Field { + return Array(key, float32s(nums)) +} + +// Ints constructs a field that carries a slice of integers. +func Ints(key string, nums []int) Field { + return Array(key, ints(nums)) +} + +// Int64s constructs a field that carries a slice of integers. +func Int64s(key string, nums []int64) Field { + return Array(key, int64s(nums)) +} + +// Int32s constructs a field that carries a slice of integers. +func Int32s(key string, nums []int32) Field { + return Array(key, int32s(nums)) +} + +// Int16s constructs a field that carries a slice of integers. +func Int16s(key string, nums []int16) Field { + return Array(key, int16s(nums)) +} + +// Int8s constructs a field that carries a slice of integers. +func Int8s(key string, nums []int8) Field { + return Array(key, int8s(nums)) +} + +// Objects constructs a field with the given key, holding a list of the +// provided objects that can be marshaled by Zap. +// +// Note that these objects must implement zapcore.ObjectMarshaler directly. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the Request type, not its pointer (*Request). +// If it's on the pointer, use ObjectValues. +// +// Given an object that implements MarshalLogObject on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Author struct{ ... } +// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var authors []Author = ... +// logger.Info("loading article", zap.Objects("authors", authors)) +// +// Similarly, given a type that implements MarshalLogObject on its pointer +// receiver, you can log a slice of pointers to that object with Objects like +// so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +// +// If instead, you have a slice of values of such an object, use the +// ObjectValues constructor. +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field { + return Array(key, objects[T](values)) +} + +type objects[T zapcore.ObjectMarshaler] []T + +func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + if err := arr.AppendObject(o); err != nil { + return err + } + } + return nil +} + +// ObjectMarshalerPtr is a constraint that specifies that the given type +// implements zapcore.ObjectMarshaler on a pointer receiver. +type ObjectMarshalerPtr[T any] interface { + *T + zapcore.ObjectMarshaler +} + +// ObjectValues constructs a field with the given key, holding a list of the +// provided objects, where pointers to these objects can be marshaled by Zap. +// +// Note that pointers to these objects must implement zapcore.ObjectMarshaler. +// That is, if you're trying to marshal a []Request, the MarshalLogObject +// method must be declared on the *Request type, not the value (Request). +// If it's on the value, use Objects. +// +// Given an object that implements MarshalLogObject on the pointer receiver, +// you can log a slice of those objects with ObjectValues like so: +// +// type Request struct{ ... } +// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error +// +// var requests []Request = ... +// logger.Info("sending requests", zap.ObjectValues("requests", requests)) +// +// If instead, you have a slice of pointers of such an object, use the Objects +// field constructor. +// +// var requests []*Request = ... +// logger.Info("sending requests", zap.Objects("requests", requests)) +func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field { + return Array(key, objectValues[T, P](values)) +} + +type objectValues[T any, P ObjectMarshalerPtr[T]] []T + +func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range os { + // It is necessary for us to explicitly reference the "P" type. + // We cannot simply pass "&os[i]" to AppendObject because its type + // is "*T", which the type system does not consider as + // implementing ObjectMarshaler. + // Only the type "P" satisfies ObjectMarshaler, which we have + // to convert "*T" to explicitly. + var p P = &os[i] + if err := arr.AppendObject(p); err != nil { + return err + } + } + return nil +} + +// Strings constructs a field that carries a slice of strings. +func Strings(key string, ss []string) Field { + return Array(key, stringArray(ss)) +} + +// Stringers constructs a field with the given key, holding a list of the +// output provided by the value's String method +// +// Given an object that implements String on the value receiver, you +// can log a slice of those objects with Objects like so: +// +// type Request struct{ ... } +// func (a Request) String() string +// +// var requests []Request = ... +// logger.Info("sending requests", zap.Stringers("requests", requests)) +// +// Note that these objects must implement fmt.Stringer directly. +// That is, if you're trying to marshal a []Request, the String method +// must be declared on the Request type, not its pointer (*Request). +func Stringers[T fmt.Stringer](key string, values []T) Field { + return Array(key, stringers[T](values)) +} + +type stringers[T fmt.Stringer] []T + +func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for _, o := range os { + arr.AppendString(o.String()) + } + return nil +} + +// Times constructs a field that carries a slice of time.Times. +func Times(key string, ts []time.Time) Field { + return Array(key, times(ts)) +} + +// Uints constructs a field that carries a slice of unsigned integers. +func Uints(key string, nums []uint) Field { + return Array(key, uints(nums)) +} + +// Uint64s constructs a field that carries a slice of unsigned integers. +func Uint64s(key string, nums []uint64) Field { + return Array(key, uint64s(nums)) +} + +// Uint32s constructs a field that carries a slice of unsigned integers. +func Uint32s(key string, nums []uint32) Field { + return Array(key, uint32s(nums)) +} + +// Uint16s constructs a field that carries a slice of unsigned integers. +func Uint16s(key string, nums []uint16) Field { + return Array(key, uint16s(nums)) +} + +// Uint8s constructs a field that carries a slice of unsigned integers. +func Uint8s(key string, nums []uint8) Field { + return Array(key, uint8s(nums)) +} + +// Uintptrs constructs a field that carries a slice of pointer addresses. +func Uintptrs(key string, us []uintptr) Field { + return Array(key, uintptrs(us)) +} + +// Errors constructs a field that carries a slice of errors. +func Errors(key string, errs []error) Field { + return Array(key, errArray(errs)) +} + +type bools []bool + +func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bs { + arr.AppendBool(bs[i]) + } + return nil +} + +type byteStringsArray [][]byte + +func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range bss { + arr.AppendByteString(bss[i]) + } + return nil +} + +type complex128s []complex128 + +func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex128(nums[i]) + } + return nil +} + +type complex64s []complex64 + +func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendComplex64(nums[i]) + } + return nil +} + +type durations []time.Duration + +func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ds { + arr.AppendDuration(ds[i]) + } + return nil +} + +type float64s []float64 + +func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat64(nums[i]) + } + return nil +} + +type float32s []float32 + +func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendFloat32(nums[i]) + } + return nil +} + +type ints []int + +func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt(nums[i]) + } + return nil +} + +type int64s []int64 + +func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt64(nums[i]) + } + return nil +} + +type int32s []int32 + +func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt32(nums[i]) + } + return nil +} + +type int16s []int16 + +func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt16(nums[i]) + } + return nil +} + +type int8s []int8 + +func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendInt8(nums[i]) + } + return nil +} + +type stringArray []string + +func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ss { + arr.AppendString(ss[i]) + } + return nil +} + +type times []time.Time + +func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range ts { + arr.AppendTime(ts[i]) + } + return nil +} + +type uints []uint + +func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint(nums[i]) + } + return nil +} + +type uint64s []uint64 + +func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint64(nums[i]) + } + return nil +} + +type uint32s []uint32 + +func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint32(nums[i]) + } + return nil +} + +type uint16s []uint16 + +func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint16(nums[i]) + } + return nil +} + +type uint8s []uint8 + +func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUint8(nums[i]) + } + return nil +} + +type uintptrs []uintptr + +func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range nums { + arr.AppendUintptr(nums[i]) + } + return nil +} diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go new file mode 100644 index 00000000000..0b8540c2138 --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/buffer.go @@ -0,0 +1,146 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package buffer provides a thin wrapper around a byte slice. Unlike the +// standard library's bytes.Buffer, it supports a portion of the strconv +// package's zero-allocation formatters. +package buffer // import "go.uber.org/zap/buffer" + +import ( + "strconv" + "time" +) + +const _size = 1024 // by default, create 1 KiB buffers + +// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so +// the only way to construct one is via a Pool. +type Buffer struct { + bs []byte + pool Pool +} + +// AppendByte writes a single byte to the Buffer. +func (b *Buffer) AppendByte(v byte) { + b.bs = append(b.bs, v) +} + +// AppendBytes writes the given slice of bytes to the Buffer. +func (b *Buffer) AppendBytes(v []byte) { + b.bs = append(b.bs, v...) +} + +// AppendString writes a string to the Buffer. +func (b *Buffer) AppendString(s string) { + b.bs = append(b.bs, s...) +} + +// AppendInt appends an integer to the underlying buffer (assuming base 10). +func (b *Buffer) AppendInt(i int64) { + b.bs = strconv.AppendInt(b.bs, i, 10) +} + +// AppendTime appends the time formatted using the specified layout. +func (b *Buffer) AppendTime(t time.Time, layout string) { + b.bs = t.AppendFormat(b.bs, layout) +} + +// AppendUint appends an unsigned integer to the underlying buffer (assuming +// base 10). +func (b *Buffer) AppendUint(i uint64) { + b.bs = strconv.AppendUint(b.bs, i, 10) +} + +// AppendBool appends a bool to the underlying buffer. +func (b *Buffer) AppendBool(v bool) { + b.bs = strconv.AppendBool(b.bs, v) +} + +// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN +// or +/- Inf. +func (b *Buffer) AppendFloat(f float64, bitSize int) { + b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize) +} + +// Len returns the length of the underlying byte slice. +func (b *Buffer) Len() int { + return len(b.bs) +} + +// Cap returns the capacity of the underlying byte slice. +func (b *Buffer) Cap() int { + return cap(b.bs) +} + +// Bytes returns a mutable reference to the underlying byte slice. +func (b *Buffer) Bytes() []byte { + return b.bs +} + +// String returns a string copy of the underlying byte slice. +func (b *Buffer) String() string { + return string(b.bs) +} + +// Reset resets the underlying byte slice. Subsequent writes re-use the slice's +// backing array. +func (b *Buffer) Reset() { + b.bs = b.bs[:0] +} + +// Write implements io.Writer. +func (b *Buffer) Write(bs []byte) (int, error) { + b.bs = append(b.bs, bs...) + return len(bs), nil +} + +// WriteByte writes a single byte to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteByte(v byte) error { + b.AppendByte(v) + return nil +} + +// WriteString writes a string to the Buffer. +// +// Error returned is always nil, function signature is compatible +// with bytes.Buffer and bufio.Writer +func (b *Buffer) WriteString(s string) (int, error) { + b.AppendString(s) + return len(s), nil +} + +// TrimNewline trims any final "\n" byte from the end of the buffer. +func (b *Buffer) TrimNewline() { + if i := len(b.bs) - 1; i >= 0 { + if b.bs[i] == '\n' { + b.bs = b.bs[:i] + } + } +} + +// Free returns the Buffer to its Pool. +// +// Callers must not retain references to the Buffer after calling Free. +func (b *Buffer) Free() { + b.pool.put(b) +} diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go new file mode 100644 index 00000000000..846323360ee --- /dev/null +++ b/vendor/go.uber.org/zap/buffer/pool.go @@ -0,0 +1,53 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package buffer + +import ( + "go.uber.org/zap/internal/pool" +) + +// A Pool is a type-safe wrapper around a sync.Pool. +type Pool struct { + p *pool.Pool[*Buffer] +} + +// NewPool constructs a new Pool. +func NewPool() Pool { + return Pool{ + p: pool.New(func() *Buffer { + return &Buffer{ + bs: make([]byte, 0, _size), + } + }), + } +} + +// Get retrieves a Buffer from the pool, creating one if necessary. +func (p Pool) Get() *Buffer { + buf := p.p.Get() + buf.Reset() + buf.pool = p + return buf +} + +func (p Pool) put(buf *Buffer) { + p.p.Put(buf) +} diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh new file mode 100644 index 00000000000..345ac8b89ab --- /dev/null +++ b/vendor/go.uber.org/zap/checklicense.sh @@ -0,0 +1,17 @@ +#!/bin/bash -e + +ERROR_COUNT=0 +while read -r file +do + case "$(head -1 "${file}")" in + *"Copyright (c) "*" Uber Technologies, Inc.") + # everything's cool + ;; + *) + echo "$file is missing license header." + (( ERROR_COUNT++ )) + ;; + esac +done < <(git ls-files "*\.go") + +exit $ERROR_COUNT diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go new file mode 100644 index 00000000000..e76e4e64fbe --- /dev/null +++ b/vendor/go.uber.org/zap/config.go @@ -0,0 +1,330 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "sort" + "time" + + "go.uber.org/zap/zapcore" +) + +// SamplingConfig sets a sampling strategy for the logger. Sampling caps the +// global CPU and I/O load that logging puts on your process while attempting +// to preserve a representative subset of your logs. +// +// If specified, the Sampler will invoke the Hook after each decision. +// +// Values configured here are per-second. See zapcore.NewSamplerWithOptions for +// details. +type SamplingConfig struct { + Initial int `json:"initial" yaml:"initial"` + Thereafter int `json:"thereafter" yaml:"thereafter"` + Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"` +} + +// Config offers a declarative way to construct a logger. It doesn't do +// anything that can't be done with New, Options, and the various +// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to +// toggle common options. +// +// Note that Config intentionally supports only the most common options. More +// unusual logging setups (logging to network connections or message queues, +// splitting output between multiple files, etc.) are possible, but require +// direct use of the zapcore package. For sample code, see the package-level +// BasicConfiguration and AdvancedConfiguration examples. +// +// For an example showing runtime log level changes, see the documentation for +// AtomicLevel. +type Config struct { + // Level is the minimum enabled logging level. Note that this is a dynamic + // level, so calling Config.Level.SetLevel will atomically change the log + // level of all loggers descended from this config. + Level AtomicLevel `json:"level" yaml:"level"` + // Development puts the logger in development mode, which changes the + // behavior of DPanicLevel and takes stacktraces more liberally. + Development bool `json:"development" yaml:"development"` + // DisableCaller stops annotating logs with the calling function's file + // name and line number. By default, all logs are annotated. + DisableCaller bool `json:"disableCaller" yaml:"disableCaller"` + // DisableStacktrace completely disables automatic stacktrace capturing. By + // default, stacktraces are captured for WarnLevel and above logs in + // development and ErrorLevel and above in production. + DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"` + // Sampling sets a sampling policy. A nil SamplingConfig disables sampling. + Sampling *SamplingConfig `json:"sampling" yaml:"sampling"` + // Encoding sets the logger's encoding. Valid values are "json" and + // "console", as well as any third-party encodings registered via + // RegisterEncoder. + Encoding string `json:"encoding" yaml:"encoding"` + // EncoderConfig sets options for the chosen encoder. See + // zapcore.EncoderConfig for details. + EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"` + // OutputPaths is a list of URLs or file paths to write logging output to. + // See Open for details. + OutputPaths []string `json:"outputPaths" yaml:"outputPaths"` + // ErrorOutputPaths is a list of URLs to write internal logger errors to. + // The default is standard error. + // + // Note that this setting only affects internal errors; for sample code that + // sends error-level logs to a different location from info- and debug-level + // logs, see the package-level AdvancedConfiguration example. + ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"` + // InitialFields is a collection of fields to add to the root logger. + InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"` +} + +// NewProductionEncoderConfig returns an opinionated EncoderConfig for +// production environments. +// +// Messages encoded with this configuration will be JSON-formatted +// and will have the following keys by default: +// +// - "level": The logging level (e.g. "info", "error"). +// - "ts": The current time in number of seconds since the Unix epoch. +// - "msg": The message passed to the log statement. +// - "caller": If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - "stacktrace": If available, a stack trace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted as floating-point number of seconds since the Unix +// epoch. +// - Duration is formatted as floating-point number of seconds. +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewProductionEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder +func NewProductionEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + TimeKey: "ts", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + FunctionKey: zapcore.OmitKey, + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.EpochTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewProductionConfig builds a reasonable default production logging +// configuration. +// Logging is enabled at InfoLevel and above, and uses a JSON encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of ErrorLevel and above. +// DPanicLevel logs will not panic, but will write a stacktrace. +// +// Sampling is enabled at 100:100 by default, +// meaning that after the first 100 log entries +// with the same level and message in the same second, +// it will log every 100th entry +// with the same level and message in the same second. +// You may disable this behavior by setting Sampling to nil. +// +// See [NewProductionEncoderConfig] for information +// on the default encoder configuration. +func NewProductionConfig() Config { + return Config{ + Level: NewAtomicLevelAt(InfoLevel), + Development: false, + Sampling: &SamplingConfig{ + Initial: 100, + Thereafter: 100, + }, + Encoding: "json", + EncoderConfig: NewProductionEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for +// development environments. +// +// Messages encoded with this configuration will use Zap's console encoder +// intended to print human-readable output. +// It will print log messages with the following information: +// +// - The log level (e.g. "INFO", "ERROR"). +// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - The message passed to the log statement. +// - If available, a short path to the file and line number +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// - If available, a stacktrace from the line +// where the log statement was issued. +// The logger configuration determines whether this field is captured. +// +// By default, the following formats are used for different types: +// +// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z"). +// - Duration is formatted as a string (e.g. "1.234s"). +// +// You may change these by setting the appropriate fields in the returned +// object. +// For example, use the following to change the time encoding format: +// +// cfg := zap.NewDevelopmentEncoderConfig() +// cfg.EncodeTime = zapcore.ISO8601TimeEncoder +func NewDevelopmentEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "T", + LevelKey: "L", + NameKey: "N", + CallerKey: "C", + FunctionKey: zapcore.OmitKey, + MessageKey: "M", + StacktraceKey: "S", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// NewDevelopmentConfig builds a reasonable default development logging +// configuration. +// Logging is enabled at DebugLevel and above, and uses a console encoder. +// Logs are written to standard error. +// Stacktraces are included on logs of WarnLevel and above. +// DPanicLevel logs will panic. +// +// See [NewDevelopmentEncoderConfig] for information +// on the default encoder configuration. +func NewDevelopmentConfig() Config { + return Config{ + Level: NewAtomicLevelAt(DebugLevel), + Development: true, + Encoding: "console", + EncoderConfig: NewDevelopmentEncoderConfig(), + OutputPaths: []string{"stderr"}, + ErrorOutputPaths: []string{"stderr"}, + } +} + +// Build constructs a logger from the Config and Options. +func (cfg Config) Build(opts ...Option) (*Logger, error) { + enc, err := cfg.buildEncoder() + if err != nil { + return nil, err + } + + sink, errSink, err := cfg.openSinks() + if err != nil { + return nil, err + } + + if cfg.Level == (AtomicLevel{}) { + return nil, errors.New("missing Level") + } + + log := New( + zapcore.NewCore(enc, sink, cfg.Level), + cfg.buildOptions(errSink)..., + ) + if len(opts) > 0 { + log = log.WithOptions(opts...) + } + return log, nil +} + +func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option { + opts := []Option{ErrorOutput(errSink)} + + if cfg.Development { + opts = append(opts, Development()) + } + + if !cfg.DisableCaller { + opts = append(opts, AddCaller()) + } + + stackLevel := ErrorLevel + if cfg.Development { + stackLevel = WarnLevel + } + if !cfg.DisableStacktrace { + opts = append(opts, AddStacktrace(stackLevel)) + } + + if scfg := cfg.Sampling; scfg != nil { + opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core { + var samplerOpts []zapcore.SamplerOption + if scfg.Hook != nil { + samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook)) + } + return zapcore.NewSamplerWithOptions( + core, + time.Second, + cfg.Sampling.Initial, + cfg.Sampling.Thereafter, + samplerOpts..., + ) + })) + } + + if len(cfg.InitialFields) > 0 { + fs := make([]Field, 0, len(cfg.InitialFields)) + keys := make([]string, 0, len(cfg.InitialFields)) + for k := range cfg.InitialFields { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + fs = append(fs, Any(k, cfg.InitialFields[k])) + } + opts = append(opts, Fields(fs...)) + } + + return opts +} + +func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) { + sink, closeOut, err := Open(cfg.OutputPaths...) + if err != nil { + return nil, nil, err + } + errSink, _, err := Open(cfg.ErrorOutputPaths...) + if err != nil { + closeOut() + return nil, nil, err + } + return sink, errSink, nil +} + +func (cfg Config) buildEncoder() (zapcore.Encoder, error) { + return newEncoder(cfg.Encoding, cfg.EncoderConfig) +} diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go new file mode 100644 index 00000000000..3c50d7b4d3f --- /dev/null +++ b/vendor/go.uber.org/zap/doc.go @@ -0,0 +1,117 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zap provides fast, structured, leveled logging. +// +// For applications that log in the hot path, reflection-based serialization +// and string formatting are prohibitively expensive - they're CPU-intensive +// and make many small allocations. Put differently, using json.Marshal and +// fmt.Fprintf to log tons of interface{} makes your application slow. +// +// Zap takes a different approach. It includes a reflection-free, +// zero-allocation JSON encoder, and the base Logger strives to avoid +// serialization overhead and allocations wherever possible. By building the +// high-level SugaredLogger on that foundation, zap lets users choose when +// they need to count every allocation and when they'd prefer a more familiar, +// loosely typed API. +// +// # Choosing a Logger +// +// In contexts where performance is nice, but not critical, use the +// SugaredLogger. It's 4-10x faster than other structured logging packages and +// supports both structured and printf-style logging. Like log15 and go-kit, +// the SugaredLogger's structured logging APIs are loosely typed and accept a +// variadic number of key-value pairs. (For more advanced use cases, they also +// accept strongly typed fields - see the SugaredLogger.With documentation for +// details.) +// +// sugar := zap.NewExample().Sugar() +// defer sugar.Sync() +// sugar.Infow("failed to fetch URL", +// "url", "http://example.com", +// "attempt", 3, +// "backoff", time.Second, +// ) +// sugar.Infof("failed to fetch URL: %s", "http://example.com") +// +// By default, loggers are unbuffered. However, since zap's low-level APIs +// allow buffering, calling Sync before letting your process exit is a good +// habit. +// +// In the rare contexts where every microsecond and every allocation matter, +// use the Logger. It's even faster than the SugaredLogger and allocates far +// less, but it only supports strongly-typed, structured logging. +// +// logger := zap.NewExample() +// defer logger.Sync() +// logger.Info("failed to fetch URL", +// zap.String("url", "http://example.com"), +// zap.Int("attempt", 3), +// zap.Duration("backoff", time.Second), +// ) +// +// Choosing between the Logger and SugaredLogger doesn't need to be an +// application-wide decision: converting between the two is simple and +// inexpensive. +// +// logger := zap.NewExample() +// defer logger.Sync() +// sugar := logger.Sugar() +// plain := sugar.Desugar() +// +// # Configuring Zap +// +// The simplest way to build a Logger is to use zap's opinionated presets: +// NewExample, NewProduction, and NewDevelopment. These presets build a logger +// with a single function call: +// +// logger, err := zap.NewProduction() +// if err != nil { +// log.Fatalf("can't initialize zap logger: %v", err) +// } +// defer logger.Sync() +// +// Presets are fine for small projects, but larger projects and organizations +// naturally require a bit more customization. For most users, zap's Config +// struct strikes the right balance between flexibility and convenience. See +// the package-level BasicConfiguration example for sample code. +// +// More unusual configurations (splitting output between files, sending logs +// to a message queue, etc.) are possible, but require direct use of +// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration +// example for sample code. +// +// # Extending Zap +// +// The zap package itself is a relatively thin wrapper around the interfaces +// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g., +// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an +// exception aggregation service, like Sentry or Rollbar) typically requires +// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core +// interfaces. See the zapcore documentation for details. +// +// Similarly, package authors can use the high-performance Encoder and Core +// implementations in the zapcore package to build their own loggers. +// +// # Frequently Asked Questions +// +// An FAQ covering everything from installation errors to design decisions is +// available at https://github.com/uber-go/zap/blob/master/FAQ.md. +package zap // import "go.uber.org/zap" diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go new file mode 100644 index 00000000000..caa04ceefd8 --- /dev/null +++ b/vendor/go.uber.org/zap/encoder.go @@ -0,0 +1,79 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "sync" + + "go.uber.org/zap/zapcore" +) + +var ( + errNoEncoderNameSpecified = errors.New("no encoder name specified") + + _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){ + "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewConsoleEncoder(encoderConfig), nil + }, + "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + return zapcore.NewJSONEncoder(encoderConfig), nil + }, + } + _encoderMutex sync.RWMutex +) + +// RegisterEncoder registers an encoder constructor, which the Config struct +// can then reference. By default, the "json" and "console" encoders are +// registered. +// +// Attempting to register an encoder whose name is already taken returns an +// error. +func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error { + _encoderMutex.Lock() + defer _encoderMutex.Unlock() + if name == "" { + return errNoEncoderNameSpecified + } + if _, ok := _encoderNameToConstructor[name]; ok { + return fmt.Errorf("encoder already registered for name %q", name) + } + _encoderNameToConstructor[name] = constructor + return nil +} + +func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) { + if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil { + return nil, errors.New("missing EncodeTime in EncoderConfig") + } + + _encoderMutex.RLock() + defer _encoderMutex.RUnlock() + if name == "" { + return nil, errNoEncoderNameSpecified + } + constructor, ok := _encoderNameToConstructor[name] + if !ok { + return nil, fmt.Errorf("no encoder registered for name %q", name) + } + return constructor(encoderConfig) +} diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go new file mode 100644 index 00000000000..45f7b838dc1 --- /dev/null +++ b/vendor/go.uber.org/zap/error.go @@ -0,0 +1,82 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "go.uber.org/zap/internal/pool" + "go.uber.org/zap/zapcore" +) + +var _errArrayElemPool = pool.New(func() *errArrayElem { + return &errArrayElem{} +}) + +// Error is shorthand for the common idiom NamedError("error", err). +func Error(err error) Field { + return NamedError("error", err) +} + +// NamedError constructs a field that lazily stores err.Error() under the +// provided key. Errors which also implement fmt.Formatter (like those produced +// by github.com/pkg/errors) will also have their verbose representation stored +// under key+"Verbose". If passed a nil error, the field is a no-op. +// +// For the common case in which the key is simply "error", the Error function +// is shorter and less repetitive. +func NamedError(key string, err error) Field { + if err == nil { + return Skip() + } + return Field{Key: key, Type: zapcore.ErrorType, Interface: err} +} + +type errArray []error + +func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + // To represent each error as an object with an "error" attribute and + // potentially an "errorVerbose" attribute, we need to wrap it in a + // type that implements LogObjectMarshaler. To prevent this from + // allocating, pool the wrapper type. + elem := _errArrayElemPool.Get() + elem.error = errs[i] + err := arr.AppendObject(elem) + elem.error = nil + _errArrayElemPool.Put(elem) + if err != nil { + return err + } + } + return nil +} + +type errArrayElem struct { + error +} + +func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error { + // Re-use the error field's logic, which supports non-standard error types. + Error(e.error).AddTo(enc) + return nil +} diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go new file mode 100644 index 00000000000..6743930b823 --- /dev/null +++ b/vendor/go.uber.org/zap/field.go @@ -0,0 +1,615 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "math" + "time" + + "go.uber.org/zap/internal/stacktrace" + "go.uber.org/zap/zapcore" +) + +// Field is an alias for Field. Aliasing this type dramatically +// improves the navigability of this package's API documentation. +type Field = zapcore.Field + +var ( + _minTimeInt64 = time.Unix(0, math.MinInt64) + _maxTimeInt64 = time.Unix(0, math.MaxInt64) +) + +// Skip constructs a no-op field, which is often useful when handling invalid +// inputs in other Field constructors. +func Skip() Field { + return Field{Type: zapcore.SkipType} +} + +// nilField returns a field which will marshal explicitly as nil. See motivation +// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking +// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the +// implementation here should be changed to reflect that. +func nilField(key string) Field { return Reflect(key, nil) } + +// Binary constructs a field that carries an opaque binary blob. +// +// Binary data is serialized in an encoding-appropriate format. For example, +// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text, +// use ByteString. +func Binary(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.BinaryType, Interface: val} +} + +// Bool constructs a field that carries a bool. +func Bool(key string, val bool) Field { + var ival int64 + if val { + ival = 1 + } + return Field{Key: key, Type: zapcore.BoolType, Integer: ival} +} + +// Boolp constructs a field that carries a *bool. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Boolp(key string, val *bool) Field { + if val == nil { + return nilField(key) + } + return Bool(key, *val) +} + +// ByteString constructs a field that carries UTF-8 encoded text as a []byte. +// To log opaque binary blobs (which aren't necessarily valid UTF-8), use +// Binary. +func ByteString(key string, val []byte) Field { + return Field{Key: key, Type: zapcore.ByteStringType, Interface: val} +} + +// Complex128 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex128 to +// interface{}). +func Complex128(key string, val complex128) Field { + return Field{Key: key, Type: zapcore.Complex128Type, Interface: val} +} + +// Complex128p constructs a field that carries a *complex128. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex128p(key string, val *complex128) Field { + if val == nil { + return nilField(key) + } + return Complex128(key, *val) +} + +// Complex64 constructs a field that carries a complex number. Unlike most +// numeric fields, this costs an allocation (to convert the complex64 to +// interface{}). +func Complex64(key string, val complex64) Field { + return Field{Key: key, Type: zapcore.Complex64Type, Interface: val} +} + +// Complex64p constructs a field that carries a *complex64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Complex64p(key string, val *complex64) Field { + if val == nil { + return nilField(key) + } + return Complex64(key, *val) +} + +// Float64 constructs a field that carries a float64. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float64(key string, val float64) Field { + return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))} +} + +// Float64p constructs a field that carries a *float64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float64p(key string, val *float64) Field { + if val == nil { + return nilField(key) + } + return Float64(key, *val) +} + +// Float32 constructs a field that carries a float32. The way the +// floating-point value is represented is encoder-dependent, so marshaling is +// necessarily lazy. +func Float32(key string, val float32) Field { + return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))} +} + +// Float32p constructs a field that carries a *float32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Float32p(key string, val *float32) Field { + if val == nil { + return nilField(key) + } + return Float32(key, *val) +} + +// Int constructs a field with the given key and value. +func Int(key string, val int) Field { + return Int64(key, int64(val)) +} + +// Intp constructs a field that carries a *int. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Intp(key string, val *int) Field { + if val == nil { + return nilField(key) + } + return Int(key, *val) +} + +// Int64 constructs a field with the given key and value. +func Int64(key string, val int64) Field { + return Field{Key: key, Type: zapcore.Int64Type, Integer: val} +} + +// Int64p constructs a field that carries a *int64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int64p(key string, val *int64) Field { + if val == nil { + return nilField(key) + } + return Int64(key, *val) +} + +// Int32 constructs a field with the given key and value. +func Int32(key string, val int32) Field { + return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)} +} + +// Int32p constructs a field that carries a *int32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int32p(key string, val *int32) Field { + if val == nil { + return nilField(key) + } + return Int32(key, *val) +} + +// Int16 constructs a field with the given key and value. +func Int16(key string, val int16) Field { + return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)} +} + +// Int16p constructs a field that carries a *int16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int16p(key string, val *int16) Field { + if val == nil { + return nilField(key) + } + return Int16(key, *val) +} + +// Int8 constructs a field with the given key and value. +func Int8(key string, val int8) Field { + return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)} +} + +// Int8p constructs a field that carries a *int8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Int8p(key string, val *int8) Field { + if val == nil { + return nilField(key) + } + return Int8(key, *val) +} + +// String constructs a field with the given key and value. +func String(key string, val string) Field { + return Field{Key: key, Type: zapcore.StringType, String: val} +} + +// Stringp constructs a field that carries a *string. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Stringp(key string, val *string) Field { + if val == nil { + return nilField(key) + } + return String(key, *val) +} + +// Uint constructs a field with the given key and value. +func Uint(key string, val uint) Field { + return Uint64(key, uint64(val)) +} + +// Uintp constructs a field that carries a *uint. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintp(key string, val *uint) Field { + if val == nil { + return nilField(key) + } + return Uint(key, *val) +} + +// Uint64 constructs a field with the given key and value. +func Uint64(key string, val uint64) Field { + return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)} +} + +// Uint64p constructs a field that carries a *uint64. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint64p(key string, val *uint64) Field { + if val == nil { + return nilField(key) + } + return Uint64(key, *val) +} + +// Uint32 constructs a field with the given key and value. +func Uint32(key string, val uint32) Field { + return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)} +} + +// Uint32p constructs a field that carries a *uint32. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint32p(key string, val *uint32) Field { + if val == nil { + return nilField(key) + } + return Uint32(key, *val) +} + +// Uint16 constructs a field with the given key and value. +func Uint16(key string, val uint16) Field { + return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)} +} + +// Uint16p constructs a field that carries a *uint16. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint16p(key string, val *uint16) Field { + if val == nil { + return nilField(key) + } + return Uint16(key, *val) +} + +// Uint8 constructs a field with the given key and value. +func Uint8(key string, val uint8) Field { + return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)} +} + +// Uint8p constructs a field that carries a *uint8. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uint8p(key string, val *uint8) Field { + if val == nil { + return nilField(key) + } + return Uint8(key, *val) +} + +// Uintptr constructs a field with the given key and value. +func Uintptr(key string, val uintptr) Field { + return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)} +} + +// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Uintptrp(key string, val *uintptr) Field { + if val == nil { + return nilField(key) + } + return Uintptr(key, *val) +} + +// Reflect constructs a field with the given key and an arbitrary object. It uses +// an encoding-appropriate, reflection-based function to lazily serialize nearly +// any object into the logging context, but it's relatively slow and +// allocation-heavy. Outside tests, Any is always a better choice. +// +// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect +// includes the error message in the final log output. +func Reflect(key string, val interface{}) Field { + return Field{Key: key, Type: zapcore.ReflectType, Interface: val} +} + +// Namespace creates a named, isolated scope within the logger's context. All +// subsequent fields will be added to the new namespace. +// +// This helps prevent key collisions when injecting loggers into sub-components +// or third-party libraries. +func Namespace(key string) Field { + return Field{Key: key, Type: zapcore.NamespaceType} +} + +// Stringer constructs a field with the given key and the output of the value's +// String method. The Stringer's String method is called lazily. +func Stringer(key string, val fmt.Stringer) Field { + return Field{Key: key, Type: zapcore.StringerType, Interface: val} +} + +// Time constructs a Field with the given key and value. The encoder +// controls how the time is serialized. +func Time(key string, val time.Time) Field { + if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) { + return Field{Key: key, Type: zapcore.TimeFullType, Interface: val} + } + return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()} +} + +// Timep constructs a field that carries a *time.Time. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Timep(key string, val *time.Time) Field { + if val == nil { + return nilField(key) + } + return Time(key, *val) +} + +// Stack constructs a field that stores a stacktrace of the current goroutine +// under provided key. Keep in mind that taking a stacktrace is eager and +// expensive (relatively speaking); this function both makes an allocation and +// takes about two microseconds. +func Stack(key string) Field { + return StackSkip(key, 1) // skip Stack +} + +// StackSkip constructs a field similarly to Stack, but also skips the given +// number of frames from the top of the stacktrace. +func StackSkip(key string, skip int) Field { + // Returning the stacktrace as a string costs an allocation, but saves us + // from expanding the zapcore.Field union struct to include a byte slice. Since + // taking a stacktrace is already so expensive (~10us), the extra allocation + // is okay. + return String(key, stacktrace.Take(skip+1)) // skip StackSkip +} + +// Duration constructs a field with the given key and value. The encoder +// controls how the duration is serialized. +func Duration(key string, val time.Duration) Field { + return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)} +} + +// Durationp constructs a field that carries a *time.Duration. The returned Field will safely +// and explicitly represent `nil` when appropriate. +func Durationp(key string, val *time.Duration) Field { + if val == nil { + return nilField(key) + } + return Duration(key, *val) +} + +// Object constructs a field with the given key and ObjectMarshaler. It +// provides a flexible, but still type-safe and efficient, way to add map- or +// struct-like user-defined types to the logging context. The struct's +// MarshalLogObject method is called lazily. +func Object(key string, val zapcore.ObjectMarshaler) Field { + return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val} +} + +// Inline constructs a Field that is similar to Object, but it +// will add the elements of the provided ObjectMarshaler to the +// current namespace. +func Inline(val zapcore.ObjectMarshaler) Field { + return zapcore.Field{ + Type: zapcore.InlineMarshalerType, + Interface: val, + } +} + +// Dict constructs a field containing the provided key-value pairs. +// It acts similar to [Object], but with the fields specified as arguments. +func Dict(key string, val ...Field) Field { + return dictField(key, val) +} + +// We need a function with the signature (string, T) for zap.Any. +func dictField(key string, val []Field) Field { + return Object(key, dictObject(val)) +} + +type dictObject []Field + +func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error { + for _, f := range d { + f.AddTo(enc) + } + return nil +} + +// We discovered an issue where zap.Any can cause a performance degradation +// when used in new goroutines. +// +// This happens because the compiler assigns 4.8kb (one zap.Field per arm of +// switch statement) of stack space for zap.Any when it takes the form: +// +// switch v := v.(type) { +// case string: +// return String(key, v) +// case int: +// return Int(key, v) +// // ... +// default: +// return Reflect(key, v) +// } +// +// To avoid this, we use the type switch to assign a value to a single local variable +// and then call a function on it. +// The local variable is just a function reference so it doesn't allocate +// when converted to an interface{}. +// +// A fair bit of experimentation went into this. +// See also: +// +// - https://github.com/uber-go/zap/pull/1301 +// - https://github.com/uber-go/zap/pull/1303 +// - https://github.com/uber-go/zap/pull/1304 +// - https://github.com/uber-go/zap/pull/1305 +// - https://github.com/uber-go/zap/pull/1308 +// +// See https://github.com/golang/go/issues/62077 for upstream issue. +type anyFieldC[T any] func(string, T) Field + +func (f anyFieldC[T]) Any(key string, val any) Field { + v, _ := val.(T) + // val is guaranteed to be a T, except when it's nil. + return f(key, v) +} + +// Any takes a key and an arbitrary value and chooses the best way to represent +// them as a field, falling back to a reflection-based approach only if +// necessary. +// +// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between +// them. To minimize surprises, []byte values are treated as binary blobs, byte +// values are treated as uint8, and runes are always treated as integers. +func Any(key string, value interface{}) Field { + var c interface{ Any(string, any) Field } + + switch value.(type) { + case zapcore.ObjectMarshaler: + c = anyFieldC[zapcore.ObjectMarshaler](Object) + case zapcore.ArrayMarshaler: + c = anyFieldC[zapcore.ArrayMarshaler](Array) + case []Field: + c = anyFieldC[[]Field](dictField) + case bool: + c = anyFieldC[bool](Bool) + case *bool: + c = anyFieldC[*bool](Boolp) + case []bool: + c = anyFieldC[[]bool](Bools) + case complex128: + c = anyFieldC[complex128](Complex128) + case *complex128: + c = anyFieldC[*complex128](Complex128p) + case []complex128: + c = anyFieldC[[]complex128](Complex128s) + case complex64: + c = anyFieldC[complex64](Complex64) + case *complex64: + c = anyFieldC[*complex64](Complex64p) + case []complex64: + c = anyFieldC[[]complex64](Complex64s) + case float64: + c = anyFieldC[float64](Float64) + case *float64: + c = anyFieldC[*float64](Float64p) + case []float64: + c = anyFieldC[[]float64](Float64s) + case float32: + c = anyFieldC[float32](Float32) + case *float32: + c = anyFieldC[*float32](Float32p) + case []float32: + c = anyFieldC[[]float32](Float32s) + case int: + c = anyFieldC[int](Int) + case *int: + c = anyFieldC[*int](Intp) + case []int: + c = anyFieldC[[]int](Ints) + case int64: + c = anyFieldC[int64](Int64) + case *int64: + c = anyFieldC[*int64](Int64p) + case []int64: + c = anyFieldC[[]int64](Int64s) + case int32: + c = anyFieldC[int32](Int32) + case *int32: + c = anyFieldC[*int32](Int32p) + case []int32: + c = anyFieldC[[]int32](Int32s) + case int16: + c = anyFieldC[int16](Int16) + case *int16: + c = anyFieldC[*int16](Int16p) + case []int16: + c = anyFieldC[[]int16](Int16s) + case int8: + c = anyFieldC[int8](Int8) + case *int8: + c = anyFieldC[*int8](Int8p) + case []int8: + c = anyFieldC[[]int8](Int8s) + case string: + c = anyFieldC[string](String) + case *string: + c = anyFieldC[*string](Stringp) + case []string: + c = anyFieldC[[]string](Strings) + case uint: + c = anyFieldC[uint](Uint) + case *uint: + c = anyFieldC[*uint](Uintp) + case []uint: + c = anyFieldC[[]uint](Uints) + case uint64: + c = anyFieldC[uint64](Uint64) + case *uint64: + c = anyFieldC[*uint64](Uint64p) + case []uint64: + c = anyFieldC[[]uint64](Uint64s) + case uint32: + c = anyFieldC[uint32](Uint32) + case *uint32: + c = anyFieldC[*uint32](Uint32p) + case []uint32: + c = anyFieldC[[]uint32](Uint32s) + case uint16: + c = anyFieldC[uint16](Uint16) + case *uint16: + c = anyFieldC[*uint16](Uint16p) + case []uint16: + c = anyFieldC[[]uint16](Uint16s) + case uint8: + c = anyFieldC[uint8](Uint8) + case *uint8: + c = anyFieldC[*uint8](Uint8p) + case []byte: + c = anyFieldC[[]byte](Binary) + case uintptr: + c = anyFieldC[uintptr](Uintptr) + case *uintptr: + c = anyFieldC[*uintptr](Uintptrp) + case []uintptr: + c = anyFieldC[[]uintptr](Uintptrs) + case time.Time: + c = anyFieldC[time.Time](Time) + case *time.Time: + c = anyFieldC[*time.Time](Timep) + case []time.Time: + c = anyFieldC[[]time.Time](Times) + case time.Duration: + c = anyFieldC[time.Duration](Duration) + case *time.Duration: + c = anyFieldC[*time.Duration](Durationp) + case []time.Duration: + c = anyFieldC[[]time.Duration](Durations) + case error: + c = anyFieldC[error](NamedError) + case []error: + c = anyFieldC[[]error](Errors) + case fmt.Stringer: + c = anyFieldC[fmt.Stringer](Stringer) + default: + c = anyFieldC[any](Reflect) + } + + return c.Any(key, value) +} diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go new file mode 100644 index 00000000000..1312875072f --- /dev/null +++ b/vendor/go.uber.org/zap/flag.go @@ -0,0 +1,39 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "flag" + + "go.uber.org/zap/zapcore" +) + +// LevelFlag uses the standard library's flag.Var to declare a global flag +// with the specified name, default, and usage guidance. The returned value is +// a pointer to the value of the flag. +// +// If you don't want to use the flag package's global state, you can use any +// non-nil *Level as a flag.Value with your own *flag.FlagSet. +func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level { + lvl := defaultLevel + flag.Var(&lvl, name, usage) + return &lvl +} diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml new file mode 100644 index 00000000000..8e1d05e9abd --- /dev/null +++ b/vendor/go.uber.org/zap/glide.yaml @@ -0,0 +1,34 @@ +package: go.uber.org/zap +license: MIT +import: +- package: go.uber.org/atomic + version: ^1 +- package: go.uber.org/multierr + version: ^1 +testImport: +- package: github.com/satori/go.uuid +- package: github.com/sirupsen/logrus +- package: github.com/apex/log + subpackages: + - handlers/json +- package: github.com/go-kit/kit + subpackages: + - log +- package: github.com/stretchr/testify + subpackages: + - assert + - require +- package: gopkg.in/inconshreveable/log15.v2 +- package: github.com/mattn/goveralls +- package: github.com/pborman/uuid +- package: github.com/pkg/errors +- package: github.com/rs/zerolog +- package: golang.org/x/tools + subpackages: + - cover +- package: golang.org/x/lint + subpackages: + - golint +- package: github.com/axw/gocov + subpackages: + - gocov diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go new file mode 100644 index 00000000000..3cb46c9e0ac --- /dev/null +++ b/vendor/go.uber.org/zap/global.go @@ -0,0 +1,169 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "bytes" + "fmt" + "log" + "os" + "sync" + + "go.uber.org/zap/zapcore" +) + +const ( + _stdLogDefaultDepth = 1 + _loggerWriterDepth = 2 + _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " + + "https://github.com/uber-go/zap/issues/new and reference this error: %v" +) + +var ( + _globalMu sync.RWMutex + _globalL = NewNop() + _globalS = _globalL.Sugar() +) + +// L returns the global Logger, which can be reconfigured with ReplaceGlobals. +// It's safe for concurrent use. +func L() *Logger { + _globalMu.RLock() + l := _globalL + _globalMu.RUnlock() + return l +} + +// S returns the global SugaredLogger, which can be reconfigured with +// ReplaceGlobals. It's safe for concurrent use. +func S() *SugaredLogger { + _globalMu.RLock() + s := _globalS + _globalMu.RUnlock() + return s +} + +// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a +// function to restore the original values. It's safe for concurrent use. +func ReplaceGlobals(logger *Logger) func() { + _globalMu.Lock() + prev := _globalL + _globalL = logger + _globalS = logger.Sugar() + _globalMu.Unlock() + return func() { ReplaceGlobals(prev) } +} + +// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at +// InfoLevel. To redirect the standard library's package-global logging +// functions, use RedirectStdLog instead. +func NewStdLog(l *Logger) *log.Logger { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + f := logger.Info + return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */) +} + +// NewStdLogAt returns *log.Logger which writes to supplied zap logger at +// required level. +func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) { + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil +} + +// RedirectStdLog redirects output from the standard library's package-global +// logger to the supplied logger at InfoLevel. Since zap already handles caller +// annotations, timestamps, etc., it automatically disables the standard +// library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLog(l *Logger) func() { + f, err := redirectStdLogAt(l, InfoLevel) + if err != nil { + // Can't get here, since passing InfoLevel to redirectStdLogAt always + // works. + panic(fmt.Sprintf(_programmerErrorTemplate, err)) + } + return f +} + +// RedirectStdLogAt redirects output from the standard library's package-global +// logger to the supplied logger at the specified level. Since zap already +// handles caller annotations, timestamps, etc., it automatically disables the +// standard library's annotations and prefixing. +// +// It returns a function to restore the original prefix and flags and reset the +// standard library's output to os.Stderr. +func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + return redirectStdLogAt(l, level) +} + +func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) { + flags := log.Flags() + prefix := log.Prefix() + log.SetFlags(0) + log.SetPrefix("") + logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth)) + logFunc, err := levelToFunc(logger, level) + if err != nil { + return nil, err + } + log.SetOutput(&loggerWriter{logFunc}) + return func() { + log.SetFlags(flags) + log.SetPrefix(prefix) + log.SetOutput(os.Stderr) + }, nil +} + +func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) { + switch lvl { + case DebugLevel: + return logger.Debug, nil + case InfoLevel: + return logger.Info, nil + case WarnLevel: + return logger.Warn, nil + case ErrorLevel: + return logger.Error, nil + case DPanicLevel: + return logger.DPanic, nil + case PanicLevel: + return logger.Panic, nil + case FatalLevel: + return logger.Fatal, nil + } + return nil, fmt.Errorf("unrecognized level: %q", lvl) +} + +type loggerWriter struct { + logFunc func(msg string, fields ...Field) +} + +func (l *loggerWriter) Write(p []byte) (int, error) { + p = bytes.TrimSpace(p) + l.logFunc(string(p)) + return len(p), nil +} diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go new file mode 100644 index 00000000000..2be8f651500 --- /dev/null +++ b/vendor/go.uber.org/zap/http_handler.go @@ -0,0 +1,140 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + + "go.uber.org/zap/zapcore" +) + +// ServeHTTP is a simple JSON endpoint that can report on or change the current +// logging level. +// +// # GET +// +// The GET request returns a JSON description of the current logging level like: +// +// {"level":"info"} +// +// # PUT +// +// The PUT request changes the logging level. It is perfectly safe to change the +// logging level while a program is running. Two content types are supported: +// +// Content-Type: application/x-www-form-urlencoded +// +// With this content type, the level can be provided through the request body or +// a query parameter. The log level is URL encoded like: +// +// level=debug +// +// The request body takes precedence over the query parameter, if both are +// specified. +// +// This content type is the default for a curl PUT request. Following are two +// example curl requests that both set the logging level to debug. +// +// curl -X PUT localhost:8080/log/level?level=debug +// curl -X PUT localhost:8080/log/level -d level=debug +// +// For any other content type, the payload is expected to be JSON encoded and +// look like: +// +// {"level":"info"} +// +// An example curl request could look like this: +// +// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}' +func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if err := lvl.serveHTTP(w, r); err != nil { + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, "internal error: %v", err) + } +} + +func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error { + type errorResponse struct { + Error string `json:"error"` + } + type payload struct { + Level zapcore.Level `json:"level"` + } + + enc := json.NewEncoder(w) + + switch r.Method { + case http.MethodGet: + return enc.Encode(payload{Level: lvl.Level()}) + + case http.MethodPut: + requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return enc.Encode(errorResponse{Error: err.Error()}) + } + lvl.SetLevel(requestedLvl) + return enc.Encode(payload{Level: lvl.Level()}) + + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return enc.Encode(errorResponse{ + Error: "Only GET and PUT are supported.", + }) + } +} + +// Decodes incoming PUT requests and returns the requested logging level. +func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) { + if contentType == "application/x-www-form-urlencoded" { + return decodePutURL(r) + } + return decodePutJSON(r.Body) +} + +func decodePutURL(r *http.Request) (zapcore.Level, error) { + lvl := r.FormValue("level") + if lvl == "" { + return 0, errors.New("must specify logging level") + } + var l zapcore.Level + if err := l.UnmarshalText([]byte(lvl)); err != nil { + return 0, err + } + return l, nil +} + +func decodePutJSON(body io.Reader) (zapcore.Level, error) { + var pld struct { + Level *zapcore.Level `json:"level"` + } + if err := json.NewDecoder(body).Decode(&pld); err != nil { + return 0, fmt.Errorf("malformed request body: %v", err) + } + if pld.Level == nil { + return 0, errors.New("must specify logging level") + } + return *pld.Level, nil +} diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go new file mode 100644 index 00000000000..dad583aaa5f --- /dev/null +++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go @@ -0,0 +1,31 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package bufferpool houses zap's shared internal buffer pool. Third-party +// packages can recreate the same functionality with buffers.NewPool. +package bufferpool + +import "go.uber.org/zap/buffer" + +var ( + _pool = buffer.NewPool() + // Get retrieves a buffer from the pool, creating one if necessary. + Get = _pool.Get +) diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go new file mode 100644 index 00000000000..c4d5d02abcc --- /dev/null +++ b/vendor/go.uber.org/zap/internal/color/color.go @@ -0,0 +1,44 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package color adds coloring functionality for TTY output. +package color + +import "fmt" + +// Foreground colors. +const ( + Black Color = iota + 30 + Red + Green + Yellow + Blue + Magenta + Cyan + White +) + +// Color represents a text color. +type Color uint8 + +// Add adds the coloring to the given string. +func (c Color) Add(s string) string { + return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s) +} diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go new file mode 100644 index 00000000000..f673f9947b8 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/exit/exit.go @@ -0,0 +1,66 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package exit provides stubs so that unit tests can exercise code that calls +// os.Exit(1). +package exit + +import "os" + +var _exit = os.Exit + +// With terminates the process by calling os.Exit(code). If the package is +// stubbed, it instead records a call in the testing spy. +func With(code int) { + _exit(code) +} + +// A StubbedExit is a testing fake for os.Exit. +type StubbedExit struct { + Exited bool + Code int + prev func(code int) +} + +// Stub substitutes a fake for the call to os.Exit(1). +func Stub() *StubbedExit { + s := &StubbedExit{prev: _exit} + _exit = s.exit + return s +} + +// WithStub runs the supplied function with Exit stubbed. It returns the stub +// used, so that users can test whether the process would have crashed. +func WithStub(f func()) *StubbedExit { + s := Stub() + defer s.Unstub() + f() + return s +} + +// Unstub restores the previous exit function. +func (se *StubbedExit) Unstub() { + _exit = se.prev +} + +func (se *StubbedExit) exit(code int) { + se.Exited = true + se.Code = code +} diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go new file mode 100644 index 00000000000..40bfed81e6e --- /dev/null +++ b/vendor/go.uber.org/zap/internal/level_enabler.go @@ -0,0 +1,37 @@ +// Copyright (c) 2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package internal and its subpackages hold types and functionality +// that are not part of Zap's public API. +package internal + +import "go.uber.org/zap/zapcore" + +// LeveledEnabler is an interface satisfied by LevelEnablers that are able to +// report their own level. +// +// This interface is defined to use more conveniently in tests and non-zapcore +// packages. +// This cannot be imported from zapcore because of the cyclic dependency. +type LeveledEnabler interface { + zapcore.LevelEnabler + + Level() zapcore.Level +} diff --git a/vendor/go.uber.org/zap/internal/pool/pool.go b/vendor/go.uber.org/zap/internal/pool/pool.go new file mode 100644 index 00000000000..60e9d2c432d --- /dev/null +++ b/vendor/go.uber.org/zap/internal/pool/pool.go @@ -0,0 +1,58 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package pool provides internal pool utilities. +package pool + +import ( + "sync" +) + +// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed +// object pooling. +// +// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will +// not be detected, so all internal pool use must take care to only store +// pointer types. +type Pool[T any] struct { + pool sync.Pool +} + +// New returns a new [Pool] for T, and will use fn to construct new Ts when +// the pool is empty. +func New[T any](fn func() T) *Pool[T] { + return &Pool[T]{ + pool: sync.Pool{ + New: func() any { + return fn() + }, + }, + } +} + +// Get gets a T from the pool, or creates a new one if the pool is empty. +func (p *Pool[T]) Get() T { + return p.pool.Get().(T) +} + +// Put returns x into the pool. +func (p *Pool[T]) Put(x T) { + p.pool.Put(x) +} diff --git a/vendor/go.uber.org/zap/internal/stacktrace/stack.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go new file mode 100644 index 00000000000..82af7551f93 --- /dev/null +++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go @@ -0,0 +1,181 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package stacktrace provides support for gathering stack traces +// efficiently. +package stacktrace + +import ( + "runtime" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" +) + +var _stackPool = pool.New(func() *Stack { + return &Stack{ + storage: make([]uintptr, 64), + } +}) + +// Stack is a captured stack trace. +type Stack struct { + pcs []uintptr // program counters; always a subslice of storage + frames *runtime.Frames + + // The size of pcs varies depending on requirements: + // it will be one if the only the first frame was requested, + // and otherwise it will reflect the depth of the call stack. + // + // storage decouples the slice we need (pcs) from the slice we pool. + // We will always allocate a reasonably large storage, but we'll use + // only as much of it as we need. + storage []uintptr +} + +// Depth specifies how deep of a stack trace should be captured. +type Depth int + +const ( + // First captures only the first frame. + First Depth = iota + + // Full captures the entire call stack, allocating more + // storage for it if needed. + Full +) + +// Capture captures a stack trace of the specified depth, skipping +// the provided number of frames. skip=0 identifies the caller of +// Capture. +// +// The caller must call Free on the returned stacktrace after using it. +func Capture(skip int, depth Depth) *Stack { + stack := _stackPool.Get() + + switch depth { + case First: + stack.pcs = stack.storage[:1] + case Full: + stack.pcs = stack.storage + } + + // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers + // itself. +2 to skip captureStacktrace and runtime.Callers. + numFrames := runtime.Callers( + skip+2, + stack.pcs, + ) + + // runtime.Callers truncates the recorded stacktrace if there is no + // room in the provided slice. For the full stack trace, keep expanding + // storage until there are fewer frames than there is room. + if depth == Full { + pcs := stack.pcs + for numFrames == len(pcs) { + pcs = make([]uintptr, len(pcs)*2) + numFrames = runtime.Callers(skip+2, pcs) + } + + // Discard old storage instead of returning it to the pool. + // This will adjust the pool size over time if stack traces are + // consistently very deep. + stack.storage = pcs + stack.pcs = pcs[:numFrames] + } else { + stack.pcs = stack.pcs[:numFrames] + } + + stack.frames = runtime.CallersFrames(stack.pcs) + return stack +} + +// Free releases resources associated with this stacktrace +// and returns it back to the pool. +func (st *Stack) Free() { + st.frames = nil + st.pcs = nil + _stackPool.Put(st) +} + +// Count reports the total number of frames in this stacktrace. +// Count DOES NOT change as Next is called. +func (st *Stack) Count() int { + return len(st.pcs) +} + +// Next returns the next frame in the stack trace, +// and a boolean indicating whether there are more after it. +func (st *Stack) Next() (_ runtime.Frame, more bool) { + return st.frames.Next() +} + +// Take returns a string representation of the current stacktrace. +// +// skip is the number of frames to skip before recording the stack trace. +// skip=0 identifies the caller of Take. +func Take(skip int) string { + stack := Capture(skip+1, Full) + defer stack.Free() + + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := NewFormatter(buffer) + stackfmt.FormatStack(stack) + return buffer.String() +} + +// Formatter formats a stack trace into a readable string representation. +type Formatter struct { + b *buffer.Buffer + nonEmpty bool // whehther we've written at least one frame already +} + +// NewFormatter builds a new Formatter. +func NewFormatter(b *buffer.Buffer) Formatter { + return Formatter{b: b} +} + +// FormatStack formats all remaining frames in the provided stacktrace -- minus +// the final runtime.main/runtime.goexit frame. +func (sf *Formatter) FormatStack(stack *Stack) { + // Note: On the last iteration, frames.Next() returns false, with a valid + // frame, but we ignore this frame. The last frame is a runtime frame which + // adds noise, since it's only either runtime.main or runtime.goexit. + for frame, more := stack.Next(); more; frame, more = stack.Next() { + sf.FormatFrame(frame) + } +} + +// FormatFrame formats the given frame. +func (sf *Formatter) FormatFrame(frame runtime.Frame) { + if sf.nonEmpty { + sf.b.AppendByte('\n') + } + sf.nonEmpty = true + sf.b.AppendString(frame.Function) + sf.b.AppendByte('\n') + sf.b.AppendByte('\t') + sf.b.AppendString(frame.File) + sf.b.AppendByte(':') + sf.b.AppendInt(int64(frame.Line)) +} diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go new file mode 100644 index 00000000000..155b208bd3c --- /dev/null +++ b/vendor/go.uber.org/zap/level.go @@ -0,0 +1,153 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "sync/atomic" + + "go.uber.org/zap/internal" + "go.uber.org/zap/zapcore" +) + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel = zapcore.DebugLevel + // InfoLevel is the default logging priority. + InfoLevel = zapcore.InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel = zapcore.WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel = zapcore.ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel = zapcore.DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel = zapcore.PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel = zapcore.FatalLevel +) + +// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with +// an anonymous function. +// +// It's particularly useful when splitting log output between different +// outputs (e.g., standard error and standard out). For sample code, see the +// package-level AdvancedConfiguration example. +type LevelEnablerFunc func(zapcore.Level) bool + +// Enabled calls the wrapped function. +func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) } + +// An AtomicLevel is an atomically changeable, dynamic logging level. It lets +// you safely change the log level of a tree of loggers (the root logger and +// any children created by adding context) at runtime. +// +// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to +// alter its level. +// +// AtomicLevels must be created with the NewAtomicLevel constructor to allocate +// their internal atomic pointer. +type AtomicLevel struct { + l *atomic.Int32 +} + +var _ internal.LeveledEnabler = AtomicLevel{} + +// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging +// enabled. +func NewAtomicLevel() AtomicLevel { + lvl := AtomicLevel{l: new(atomic.Int32)} + lvl.l.Store(int32(InfoLevel)) + return lvl +} + +// NewAtomicLevelAt is a convenience function that creates an AtomicLevel +// and then calls SetLevel with the given level. +func NewAtomicLevelAt(l zapcore.Level) AtomicLevel { + a := NewAtomicLevel() + a.SetLevel(l) + return a +} + +// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseAtomicLevel(text string) (AtomicLevel, error) { + a := NewAtomicLevel() + l, err := zapcore.ParseLevel(text) + if err != nil { + return a, err + } + + a.SetLevel(l) + return a, nil +} + +// Enabled implements the zapcore.LevelEnabler interface, which allows the +// AtomicLevel to be used in place of traditional static levels. +func (lvl AtomicLevel) Enabled(l zapcore.Level) bool { + return lvl.Level().Enabled(l) +} + +// Level returns the minimum enabled log level. +func (lvl AtomicLevel) Level() zapcore.Level { + return zapcore.Level(int8(lvl.l.Load())) +} + +// SetLevel alters the logging level. +func (lvl AtomicLevel) SetLevel(l zapcore.Level) { + lvl.l.Store(int32(l)) +} + +// String returns the string representation of the underlying Level. +func (lvl AtomicLevel) String() string { + return lvl.Level().String() +} + +// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text +// representations as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl *AtomicLevel) UnmarshalText(text []byte) error { + if lvl.l == nil { + lvl.l = &atomic.Int32{} + } + + var l zapcore.Level + if err := l.UnmarshalText(text); err != nil { + return err + } + + lvl.SetLevel(l) + return nil +} + +// MarshalText marshals the AtomicLevel to a byte slice. It uses the same +// text representation as the static zapcore.Levels ("debug", "info", "warn", +// "error", "dpanic", "panic", and "fatal"). +func (lvl AtomicLevel) MarshalText() (text []byte, err error) { + return lvl.Level().MarshalText() +} diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go new file mode 100644 index 00000000000..c4d30032394 --- /dev/null +++ b/vendor/go.uber.org/zap/logger.go @@ -0,0 +1,435 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + "os" + "strings" + + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/stacktrace" + "go.uber.org/zap/zapcore" +) + +// A Logger provides fast, leveled, structured logging. All methods are safe +// for concurrent use. +// +// The Logger is designed for contexts in which every microsecond and every +// allocation matters, so its API intentionally favors performance and type +// safety over brevity. For most applications, the SugaredLogger strikes a +// better balance between performance and ergonomics. +type Logger struct { + core zapcore.Core + + development bool + addCaller bool + onPanic zapcore.CheckWriteHook // default is WriteThenPanic + onFatal zapcore.CheckWriteHook // default is WriteThenFatal + + name string + errorOutput zapcore.WriteSyncer + + addStack zapcore.LevelEnabler + + callerSkip int + + clock zapcore.Clock +} + +// New constructs a new Logger from the provided zapcore.Core and Options. If +// the passed zapcore.Core is nil, it falls back to using a no-op +// implementation. +// +// This is the most flexible way to construct a Logger, but also the most +// verbose. For typical use cases, the highly-opinionated presets +// (NewProduction, NewDevelopment, and NewExample) or the Config struct are +// more convenient. +// +// For sample code, see the package-level AdvancedConfiguration example. +func New(core zapcore.Core, options ...Option) *Logger { + if core == nil { + return NewNop() + } + log := &Logger{ + core: core, + errorOutput: zapcore.Lock(os.Stderr), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } + return log.WithOptions(options...) +} + +// NewNop returns a no-op Logger. It never writes out logs or internal errors, +// and it never runs user-defined hooks. +// +// Using WithOptions to replace the Core or error output of a no-op Logger can +// re-enable logging. +func NewNop() *Logger { + return &Logger{ + core: zapcore.NewNopCore(), + errorOutput: zapcore.AddSync(io.Discard), + addStack: zapcore.FatalLevel + 1, + clock: zapcore.DefaultClock, + } +} + +// NewProduction builds a sensible production Logger that writes InfoLevel and +// above logs to standard error as JSON. +// +// It's a shortcut for NewProductionConfig().Build(...Option). +func NewProduction(options ...Option) (*Logger, error) { + return NewProductionConfig().Build(options...) +} + +// NewDevelopment builds a development Logger that writes DebugLevel and above +// logs to standard error in a human-friendly format. +// +// It's a shortcut for NewDevelopmentConfig().Build(...Option). +func NewDevelopment(options ...Option) (*Logger, error) { + return NewDevelopmentConfig().Build(options...) +} + +// Must is a helper that wraps a call to a function returning (*Logger, error) +// and panics if the error is non-nil. It is intended for use in variable +// initialization such as: +// +// var logger = zap.Must(zap.NewProduction()) +func Must(logger *Logger, err error) *Logger { + if err != nil { + panic(err) + } + + return logger +} + +// NewExample builds a Logger that's designed for use in zap's testable +// examples. It writes DebugLevel and above logs to standard out as JSON, but +// omits the timestamp and calling function to keep example output +// short and deterministic. +func NewExample(options ...Option) *Logger { + encoderCfg := zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "logger", + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel) + return New(core).WithOptions(options...) +} + +// Sugar wraps the Logger to provide a more ergonomic, but slightly slower, +// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a +// single application to use both Loggers and SugaredLoggers, converting +// between them on the boundaries of performance-sensitive code. +func (log *Logger) Sugar() *SugaredLogger { + core := log.clone() + core.callerSkip += 2 + return &SugaredLogger{core} +} + +// Named adds a new path segment to the logger's name. Segments are joined by +// periods. By default, Loggers are unnamed. +func (log *Logger) Named(s string) *Logger { + if s == "" { + return log + } + l := log.clone() + if log.name == "" { + l.name = s + } else { + l.name = strings.Join([]string{l.name, s}, ".") + } + return l +} + +// WithOptions clones the current Logger, applies the supplied Options, and +// returns the resulting Logger. It's safe to use concurrently. +func (log *Logger) WithOptions(opts ...Option) *Logger { + c := log.clone() + for _, opt := range opts { + opt.apply(c) + } + return c +} + +// With creates a child logger and adds structured context to it. Fields added +// to the child don't affect the parent, and vice versa. Any fields that +// require evaluation (such as Objects) are evaluated upon invocation of With. +func (log *Logger) With(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + l := log.clone() + l.core = l.core.With(fields) + return l +} + +// WithLazy creates a child logger and adds structured context to it lazily. +// +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// WithLazy provides a worthwhile performance optimization for contextual loggers +// when the likelihood of using the child logger is low, +// such as error paths and rarely taken branches. +// +// Similar to [With], fields added to the child don't affect the parent, and vice versa. +func (log *Logger) WithLazy(fields ...Field) *Logger { + if len(fields) == 0 { + return log + } + return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewLazyWith(core, fields) + })) +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (log *Logger) Level() zapcore.Level { + return zapcore.LevelOf(log.core) +} + +// Check returns a CheckedEntry if logging a message at the specified level +// is enabled. It's a completely optional optimization; in high-performance +// applications, Check can help avoid allocating a slice to hold fields. +func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + return log.check(lvl, msg) +} + +// Log logs a message at the specified level. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// Any Fields that require evaluation (such as Objects) are evaluated upon +// invocation of Log. +func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) { + if ce := log.check(lvl, msg); ce != nil { + ce.Write(fields...) + } +} + +// Debug logs a message at DebugLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Debug(msg string, fields ...Field) { + if ce := log.check(DebugLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Info logs a message at InfoLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Info(msg string, fields ...Field) { + if ce := log.check(InfoLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Warn logs a message at WarnLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Warn(msg string, fields ...Field) { + if ce := log.check(WarnLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Error logs a message at ErrorLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +func (log *Logger) Error(msg string, fields ...Field) { + if ce := log.check(ErrorLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// DPanic logs a message at DPanicLevel. The message includes any fields +// passed at the log site, as well as any fields accumulated on the logger. +// +// If the logger is in development mode, it then panics (DPanic means +// "development panic"). This is useful for catching errors that are +// recoverable, but shouldn't ever happen. +func (log *Logger) DPanic(msg string, fields ...Field) { + if ce := log.check(DPanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Panic logs a message at PanicLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then panics, even if logging at PanicLevel is disabled. +func (log *Logger) Panic(msg string, fields ...Field) { + if ce := log.check(PanicLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Fatal logs a message at FatalLevel. The message includes any fields passed +// at the log site, as well as any fields accumulated on the logger. +// +// The logger then calls os.Exit(1), even if logging at FatalLevel is +// disabled. +func (log *Logger) Fatal(msg string, fields ...Field) { + if ce := log.check(FatalLevel, msg); ce != nil { + ce.Write(fields...) + } +} + +// Sync calls the underlying Core's Sync method, flushing any buffered log +// entries. Applications should take care to call Sync before exiting. +func (log *Logger) Sync() error { + return log.core.Sync() +} + +// Core returns the Logger's underlying zapcore.Core. +func (log *Logger) Core() zapcore.Core { + return log.core +} + +// Name returns the Logger's underlying name, +// or an empty string if the logger is unnamed. +func (log *Logger) Name() string { + return log.name +} + +func (log *Logger) clone() *Logger { + clone := *log + return &clone +} + +func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry { + // Logger.check must always be called directly by a method in the + // Logger interface (e.g., Check, Info, Fatal). + // This skips Logger.check and the Info/Fatal/Check/etc. method that + // called it. + const callerSkipOffset = 2 + + // Check the level first to reduce the cost of disabled log calls. + // Since Panic and higher may exit, we skip the optimization for those levels. + if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) { + return nil + } + + // Create basic checked entry thru the core; this will be non-nil if the + // log message will actually be written somewhere. + ent := zapcore.Entry{ + LoggerName: log.name, + Time: log.clock.Now(), + Level: lvl, + Message: msg, + } + ce := log.core.Check(ent, nil) + willWrite := ce != nil + + // Set up any required terminal behavior. + switch ent.Level { + case zapcore.PanicLevel: + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) + case zapcore.FatalLevel: + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal)) + case zapcore.DPanicLevel: + if log.development { + ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic)) + } + } + + // Only do further annotation if we're going to write this message; checked + // entries that exist only for terminal behavior don't benefit from + // annotation. + if !willWrite { + return ce + } + + // Thread the error output through to the CheckedEntry. + ce.ErrorOutput = log.errorOutput + + addStack := log.addStack.Enabled(ce.Level) + if !log.addCaller && !addStack { + return ce + } + + // Adding the caller or stack trace requires capturing the callers of + // this function. We'll share information between these two. + stackDepth := stacktrace.First + if addStack { + stackDepth = stacktrace.Full + } + stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth) + defer stack.Free() + + if stack.Count() == 0 { + if log.addCaller { + fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC()) + _ = log.errorOutput.Sync() + } + return ce + } + + frame, more := stack.Next() + + if log.addCaller { + ce.Caller = zapcore.EntryCaller{ + Defined: frame.PC != 0, + PC: frame.PC, + File: frame.File, + Line: frame.Line, + Function: frame.Function, + } + } + + if addStack { + buffer := bufferpool.Get() + defer buffer.Free() + + stackfmt := stacktrace.NewFormatter(buffer) + + // We've already extracted the first frame, so format that + // separately and defer to stackfmt for the rest. + stackfmt.FormatFrame(frame) + if more { + stackfmt.FormatStack(stack) + } + ce.Stack = buffer.String() + } + + return ce +} + +func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook { + // A nil or WriteThenNoop hook will lead to continued execution after + // a Panic or Fatal log entry, which is unexpected. For example, + // + // f, err := os.Open(..) + // if err != nil { + // log.Fatal("cannot open", zap.Error(err)) + // } + // fmt.Println(f.Name()) + // + // The f.Name() will panic if we continue execution after the log.Fatal. + if override == nil || override == zapcore.WriteThenNoop { + return defaultHook + } + return override +} diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go new file mode 100644 index 00000000000..43d357ac902 --- /dev/null +++ b/vendor/go.uber.org/zap/options.go @@ -0,0 +1,182 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" +) + +// An Option configures a Logger. +type Option interface { + apply(*Logger) +} + +// optionFunc wraps a func so it satisfies the Option interface. +type optionFunc func(*Logger) + +func (f optionFunc) apply(log *Logger) { + f(log) +} + +// WrapCore wraps or replaces the Logger's underlying zapcore.Core. +func WrapCore(f func(zapcore.Core) zapcore.Core) Option { + return optionFunc(func(log *Logger) { + log.core = f(log.core) + }) +} + +// Hooks registers functions which will be called each time the Logger writes +// out an Entry. Repeated use of Hooks is additive. +// +// Hooks are useful for simple side effects, like capturing metrics for the +// number of emitted logs. More complex side effects, including anything that +// requires access to the Entry's structured fields, should be implemented as +// a zapcore.Core instead. See zapcore.RegisterHooks for details. +func Hooks(hooks ...func(zapcore.Entry) error) Option { + return optionFunc(func(log *Logger) { + log.core = zapcore.RegisterHooks(log.core, hooks...) + }) +} + +// Fields adds fields to the Logger. +func Fields(fs ...Field) Option { + return optionFunc(func(log *Logger) { + log.core = log.core.With(fs) + }) +} + +// ErrorOutput sets the destination for errors generated by the Logger. Note +// that this option only affects internal errors; for sample code that sends +// error-level logs to a different location from info- and debug-level logs, +// see the package-level AdvancedConfiguration example. +// +// The supplied WriteSyncer must be safe for concurrent use. The Open and +// zapcore.Lock functions are the simplest ways to protect files with a mutex. +func ErrorOutput(w zapcore.WriteSyncer) Option { + return optionFunc(func(log *Logger) { + log.errorOutput = w + }) +} + +// Development puts the logger in development mode, which makes DPanic-level +// logs panic instead of simply logging an error. +func Development() Option { + return optionFunc(func(log *Logger) { + log.development = true + }) +} + +// AddCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller. See also WithCaller. +func AddCaller() Option { + return WithCaller(true) +} + +// WithCaller configures the Logger to annotate each message with the filename, +// line number, and function name of zap's caller, or not, depending on the +// value of enabled. This is a generalized form of AddCaller. +func WithCaller(enabled bool) Option { + return optionFunc(func(log *Logger) { + log.addCaller = enabled + }) +} + +// AddCallerSkip increases the number of callers skipped by caller annotation +// (as enabled by the AddCaller option). When building wrappers around the +// Logger and SugaredLogger, supplying this Option prevents zap from always +// reporting the wrapper code as the caller. +func AddCallerSkip(skip int) Option { + return optionFunc(func(log *Logger) { + log.callerSkip += skip + }) +} + +// AddStacktrace configures the Logger to record a stack trace for all messages at +// or above a given level. +func AddStacktrace(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + log.addStack = lvl + }) +} + +// IncreaseLevel increase the level of the logger. It has no effect if +// the passed in level tries to decrease the level of the logger. +func IncreaseLevel(lvl zapcore.LevelEnabler) Option { + return optionFunc(func(log *Logger) { + core, err := zapcore.NewIncreaseLevelCore(log.core, lvl) + if err != nil { + fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err) + } else { + log.core = core + } + }) +} + +// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs. +// Zap will call this hook after writing a log statement with a Panic/DPanic level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a Panic/DPanic log message, but it will not start a panic. +// +// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit)) +// +// This is useful for testing Panic/DPanic log output. +func WithPanicHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onPanic = hook + }) +} + +// OnFatal sets the action to take on fatal logs. +// +// Deprecated: Use [WithFatalHook] instead. +func OnFatal(action zapcore.CheckWriteAction) Option { + return WithFatalHook(action) +} + +// WithFatalHook sets a CheckWriteHook to run on fatal logs. +// Zap will call this hook after writing a log statement with a Fatal level. +// +// For example, the following builds a logger that will exit the current +// goroutine after writing a fatal log message, but it will not exit the +// program. +// +// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit)) +// +// It is important that the provided CheckWriteHook stops the control flow at +// the current statement to meet expectations of callers of the logger. +// We recommend calling os.Exit or runtime.Goexit inside custom hooks at +// minimum. +func WithFatalHook(hook zapcore.CheckWriteHook) Option { + return optionFunc(func(log *Logger) { + log.onFatal = hook + }) +} + +// WithClock specifies the clock used by the logger to determine the current +// time for logged entries. Defaults to the system clock with time.Now. +func WithClock(clock zapcore.Clock) Option { + return optionFunc(func(log *Logger) { + log.clock = clock + }) +} diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go new file mode 100644 index 00000000000..499772a00dc --- /dev/null +++ b/vendor/go.uber.org/zap/sink.go @@ -0,0 +1,180 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "go.uber.org/zap/zapcore" +) + +const schemeFile = "file" + +var _sinkRegistry = newSinkRegistry() + +// Sink defines the interface to write to and close logger destinations. +type Sink interface { + zapcore.WriteSyncer + io.Closer +} + +type errSinkNotFound struct { + scheme string +} + +func (e *errSinkNotFound) Error() string { + return fmt.Sprintf("no sink found for scheme %q", e.scheme) +} + +type nopCloserSink struct{ zapcore.WriteSyncer } + +func (nopCloserSink) Close() error { return nil } + +type sinkRegistry struct { + mu sync.Mutex + factories map[string]func(*url.URL) (Sink, error) // keyed by scheme + openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile +} + +func newSinkRegistry() *sinkRegistry { + sr := &sinkRegistry{ + factories: make(map[string]func(*url.URL) (Sink, error)), + openFile: os.OpenFile, + } + // Infallible operation: the registry is empty, so we can't have a conflict. + _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL) + return sr +} + +// RegisterScheme registers the given factory for the specific scheme. +func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + sr.mu.Lock() + defer sr.mu.Unlock() + + if scheme == "" { + return errors.New("can't register a sink factory for empty string") + } + normalized, err := normalizeScheme(scheme) + if err != nil { + return fmt.Errorf("%q is not a valid scheme: %v", scheme, err) + } + if _, ok := sr.factories[normalized]; ok { + return fmt.Errorf("sink factory already registered for scheme %q", normalized) + } + sr.factories[normalized] = factory + return nil +} + +func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) { + // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to + // the drive, and path is unset unless `c:/log.txt` is used. + // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file. + // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows. + if filepath.IsAbs(rawURL) { + return sr.newFileSinkFromPath(rawURL) + } + + u, err := url.Parse(rawURL) + if err != nil { + return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err) + } + if u.Scheme == "" { + u.Scheme = schemeFile + } + + sr.mu.Lock() + factory, ok := sr.factories[u.Scheme] + sr.mu.Unlock() + if !ok { + return nil, &errSinkNotFound{u.Scheme} + } + return factory(u) +} + +// RegisterSink registers a user-supplied factory for all sinks with a +// particular scheme. +// +// All schemes must be ASCII, valid under section 0.1 of RFC 3986 +// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already +// have a factory registered. Zap automatically registers a factory for the +// "file" scheme. +func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error { + return _sinkRegistry.RegisterSink(scheme, factory) +} + +func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) { + if u.User != nil { + return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u) + } + if u.Fragment != "" { + return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u) + } + if u.RawQuery != "" { + return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u) + } + // Error messages are better if we check hostname and port separately. + if u.Port() != "" { + return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u) + } + if hn := u.Hostname(); hn != "" && hn != "localhost" { + return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u) + } + + return sr.newFileSinkFromPath(u.Path) +} + +func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) { + switch path { + case "stdout": + return nopCloserSink{os.Stdout}, nil + case "stderr": + return nopCloserSink{os.Stderr}, nil + } + return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666) +} + +func normalizeScheme(s string) (string, error) { + // https://tools.ietf.org/html/rfc3986#section-3.1 + s = strings.ToLower(s) + if first := s[0]; 'a' > first || 'z' < first { + return "", errors.New("must start with a letter") + } + for i := 1; i < len(s); i++ { // iterate over bytes, not runes + c := s[i] + switch { + case 'a' <= c && c <= 'z': + continue + case '0' <= c && c <= '9': + continue + case c == '.' || c == '+' || c == '-': + continue + } + return "", fmt.Errorf("may not contain %q", c) + } + return s, nil +} diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go new file mode 100644 index 00000000000..8904cd0871e --- /dev/null +++ b/vendor/go.uber.org/zap/sugar.go @@ -0,0 +1,476 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +const ( + _oddNumberErrMsg = "Ignored key without a value." + _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys." + _multipleErrMsg = "Multiple errors without a key." +) + +// A SugaredLogger wraps the base Logger functionality in a slower, but less +// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar +// method. +// +// Unlike the Logger, the SugaredLogger doesn't insist on structured logging. +// For each log level, it exposes four methods: +// +// - methods named after the log level for log.Print-style logging +// - methods ending in "w" for loosely-typed structured logging +// - methods ending in "f" for log.Printf-style logging +// - methods ending in "ln" for log.Println-style logging +// +// For example, the methods for InfoLevel are: +// +// Info(...any) Print-style logging +// Infow(...any) Structured logging (read as "info with") +// Infof(string, ...any) Printf-style logging +// Infoln(...any) Println-style logging +type SugaredLogger struct { + base *Logger +} + +// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring +// is quite inexpensive, so it's reasonable for a single application to use +// both Loggers and SugaredLoggers, converting between them on the boundaries +// of performance-sensitive code. +func (s *SugaredLogger) Desugar() *Logger { + base := s.base.clone() + base.callerSkip -= 2 + return base +} + +// Named adds a sub-scope to the logger's name. See Logger.Named for details. +func (s *SugaredLogger) Named(name string) *SugaredLogger { + return &SugaredLogger{base: s.base.Named(name)} +} + +// WithOptions clones the current SugaredLogger, applies the supplied Options, +// and returns the result. It's safe to use concurrently. +func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger { + base := s.base.clone() + for _, opt := range opts { + opt.apply(base) + } + return &SugaredLogger{base: base} +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// +// sugaredLogger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// +// is the equivalent of +// +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)} +} + +// WithLazy adds a variadic number of fields to the logging context lazily. +// The fields are evaluated only if the logger is further chained with [With] +// or is written to with any of the log level methods. +// Until that occurs, the logger may retain references to objects inside the fields, +// and logging will reflect the state of an object at the time of logging, +// not the time of WithLazy(). +// +// Similar to [With], fields added to the child don't affect the parent, +// and vice versa. Also, the keys in key-value pairs should be strings. In development, +// passing a non-string key panics, while in production it logs an error and skips the pair. +// Passing an orphaned key has the same behavior. +func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger { + return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)} +} + +// Level reports the minimum enabled level for this logger. +// +// For NopLoggers, this is [zapcore.InvalidLevel]. +func (s *SugaredLogger) Level() zapcore.Level { + return zapcore.LevelOf(s.base.core) +} + +// Log logs the provided arguments at provided level. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) { + s.log(lvl, "", args, nil) +} + +// Debug logs the provided arguments at [DebugLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Debug(args ...interface{}) { + s.log(DebugLevel, "", args, nil) +} + +// Info logs the provided arguments at [InfoLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Info(args ...interface{}) { + s.log(InfoLevel, "", args, nil) +} + +// Warn logs the provided arguments at [WarnLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Warn(args ...interface{}) { + s.log(WarnLevel, "", args, nil) +} + +// Error logs the provided arguments at [ErrorLevel]. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Error(args ...interface{}) { + s.log(ErrorLevel, "", args, nil) +} + +// DPanic logs the provided arguments at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) DPanic(args ...interface{}) { + s.log(DPanicLevel, "", args, nil) +} + +// Panic constructs a message with the provided arguments and panics. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Panic(args ...interface{}) { + s.log(PanicLevel, "", args, nil) +} + +// Fatal constructs a message with the provided arguments and calls os.Exit. +// Spaces are added between arguments when neither is a string. +func (s *SugaredLogger) Fatal(args ...interface{}) { + s.log(FatalLevel, "", args, nil) +} + +// Logf formats the message according to the format specifier +// and logs it at provided level. +func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) { + s.log(lvl, template, args, nil) +} + +// Debugf formats the message according to the format specifier +// and logs it at [DebugLevel]. +func (s *SugaredLogger) Debugf(template string, args ...interface{}) { + s.log(DebugLevel, template, args, nil) +} + +// Infof formats the message according to the format specifier +// and logs it at [InfoLevel]. +func (s *SugaredLogger) Infof(template string, args ...interface{}) { + s.log(InfoLevel, template, args, nil) +} + +// Warnf formats the message according to the format specifier +// and logs it at [WarnLevel]. +func (s *SugaredLogger) Warnf(template string, args ...interface{}) { + s.log(WarnLevel, template, args, nil) +} + +// Errorf formats the message according to the format specifier +// and logs it at [ErrorLevel]. +func (s *SugaredLogger) Errorf(template string, args ...interface{}) { + s.log(ErrorLevel, template, args, nil) +} + +// DPanicf formats the message according to the format specifier +// and logs it at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +func (s *SugaredLogger) DPanicf(template string, args ...interface{}) { + s.log(DPanicLevel, template, args, nil) +} + +// Panicf formats the message according to the format specifier +// and panics. +func (s *SugaredLogger) Panicf(template string, args ...interface{}) { + s.log(PanicLevel, template, args, nil) +} + +// Fatalf formats the message according to the format specifier +// and calls os.Exit. +func (s *SugaredLogger) Fatalf(template string, args ...interface{}) { + s.log(FatalLevel, template, args, nil) +} + +// Logw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) { + s.log(lvl, msg, nil, keysAndValues) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// +// s.With(keysAndValues).Debug(msg) +func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) { + s.log(DebugLevel, msg, nil, keysAndValues) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) { + s.log(InfoLevel, msg, nil, keysAndValues) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) { + s.log(WarnLevel, msg, nil, keysAndValues) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) { + s.log(ErrorLevel, msg, nil, keysAndValues) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) { + s.log(DPanicLevel, msg, nil, keysAndValues) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) { + s.log(PanicLevel, msg, nil, keysAndValues) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) { + s.log(FatalLevel, msg, nil, keysAndValues) +} + +// Logln logs a message at provided level. +// Spaces are always added between arguments. +func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) { + s.logln(lvl, args, nil) +} + +// Debugln logs a message at [DebugLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Debugln(args ...interface{}) { + s.logln(DebugLevel, args, nil) +} + +// Infoln logs a message at [InfoLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Infoln(args ...interface{}) { + s.logln(InfoLevel, args, nil) +} + +// Warnln logs a message at [WarnLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Warnln(args ...interface{}) { + s.logln(WarnLevel, args, nil) +} + +// Errorln logs a message at [ErrorLevel]. +// Spaces are always added between arguments. +func (s *SugaredLogger) Errorln(args ...interface{}) { + s.logln(ErrorLevel, args, nil) +} + +// DPanicln logs a message at [DPanicLevel]. +// In development, the logger then panics. (See [DPanicLevel] for details.) +// Spaces are always added between arguments. +func (s *SugaredLogger) DPanicln(args ...interface{}) { + s.logln(DPanicLevel, args, nil) +} + +// Panicln logs a message at [PanicLevel] and panics. +// Spaces are always added between arguments. +func (s *SugaredLogger) Panicln(args ...interface{}) { + s.logln(PanicLevel, args, nil) +} + +// Fatalln logs a message at [FatalLevel] and calls os.Exit. +// Spaces are always added between arguments. +func (s *SugaredLogger) Fatalln(args ...interface{}) { + s.logln(FatalLevel, args, nil) +} + +// Sync flushes any buffered log entries. +func (s *SugaredLogger) Sync() error { + return s.base.Sync() +} + +// log message with Sprint, Sprintf, or neither. +func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) { + // If logging at this level is completely disabled, skip the overhead of + // string formatting. + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessage(template, fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// logln message with Sprintln +func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) { + if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) { + return + } + + msg := getMessageln(fmtArgs) + if ce := s.base.Check(lvl, msg); ce != nil { + ce.Write(s.sweetenFields(context)...) + } +} + +// getMessage format with Sprint, Sprintf, or neither. +func getMessage(template string, fmtArgs []interface{}) string { + if len(fmtArgs) == 0 { + return template + } + + if template != "" { + return fmt.Sprintf(template, fmtArgs...) + } + + if len(fmtArgs) == 1 { + if str, ok := fmtArgs[0].(string); ok { + return str + } + } + return fmt.Sprint(fmtArgs...) +} + +// getMessageln format with Sprintln. +func getMessageln(fmtArgs []interface{}) string { + msg := fmt.Sprintln(fmtArgs...) + return msg[:len(msg)-1] +} + +func (s *SugaredLogger) sweetenFields(args []interface{}) []Field { + if len(args) == 0 { + return nil + } + + var ( + // Allocate enough space for the worst case; if users pass only structured + // fields, we shouldn't penalize them with extra allocations. + fields = make([]Field, 0, len(args)) + invalid invalidPairs + seenError bool + ) + + for i := 0; i < len(args); { + // This is a strongly-typed field. Consume it and move on. + if f, ok := args[i].(Field); ok { + fields = append(fields, f) + i++ + continue + } + + // If it is an error, consume it and move on. + if err, ok := args[i].(error); ok { + if !seenError { + seenError = true + fields = append(fields, Error(err)) + } else { + s.base.Error(_multipleErrMsg, Error(err)) + } + i++ + continue + } + + // Make sure this element isn't a dangling key. + if i == len(args)-1 { + s.base.Error(_oddNumberErrMsg, Any("ignored", args[i])) + break + } + + // Consume this value and the next, treating them as a key-value pair. If the + // key isn't a string, add this pair to the slice of invalid pairs. + key, val := args[i], args[i+1] + if keyStr, ok := key.(string); !ok { + // Subsequent errors are likely, so allocate once up front. + if cap(invalid) == 0 { + invalid = make(invalidPairs, 0, len(args)/2) + } + invalid = append(invalid, invalidPair{i, key, val}) + } else { + fields = append(fields, Any(keyStr, val)) + } + i += 2 + } + + // If we encountered any invalid key-value pairs, log an error. + if len(invalid) > 0 { + s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid)) + } + return fields +} + +type invalidPair struct { + position int + key, value interface{} +} + +func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error { + enc.AddInt64("position", int64(p.position)) + Any("key", p.key).AddTo(enc) + Any("value", p.value).AddTo(enc) + return nil +} + +type invalidPairs []invalidPair + +func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error { + var err error + for i := range ps { + err = multierr.Append(err, enc.AppendObject(ps[i])) + } + return err +} diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go new file mode 100644 index 00000000000..c5a1f162259 --- /dev/null +++ b/vendor/go.uber.org/zap/time.go @@ -0,0 +1,27 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import "time" + +func timeToMillis(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond) +} diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go new file mode 100644 index 00000000000..06768c67919 --- /dev/null +++ b/vendor/go.uber.org/zap/writer.go @@ -0,0 +1,98 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zap + +import ( + "fmt" + "io" + + "go.uber.org/zap/zapcore" + + "go.uber.org/multierr" +) + +// Open is a high-level wrapper that takes a variadic number of URLs, opens or +// creates each of the specified resources, and combines them into a locked +// WriteSyncer. It also returns any error encountered and a function to close +// any opened files. +// +// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a +// scheme and URLs with the "file" scheme. Third-party code may register +// factories for other schemes using RegisterSink. +// +// URLs with the "file" scheme must use absolute paths on the local +// filesystem. No user, password, port, fragments, or query parameters are +// allowed, and the hostname must be empty or "localhost". +// +// Since it's common to write logs to the local filesystem, URLs without a +// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without +// a scheme, the special paths "stdout" and "stderr" are interpreted as +// os.Stdout and os.Stderr. When specified without a scheme, relative file +// paths also work. +func Open(paths ...string) (zapcore.WriteSyncer, func(), error) { + writers, closeAll, err := open(paths) + if err != nil { + return nil, nil, err + } + + writer := CombineWriteSyncers(writers...) + return writer, closeAll, nil +} + +func open(paths []string) ([]zapcore.WriteSyncer, func(), error) { + writers := make([]zapcore.WriteSyncer, 0, len(paths)) + closers := make([]io.Closer, 0, len(paths)) + closeAll := func() { + for _, c := range closers { + _ = c.Close() + } + } + + var openErr error + for _, path := range paths { + sink, err := _sinkRegistry.newSink(path) + if err != nil { + openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err)) + continue + } + writers = append(writers, sink) + closers = append(closers, sink) + } + if openErr != nil { + closeAll() + return nil, nil, openErr + } + + return writers, closeAll, nil +} + +// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a +// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op +// WriteSyncer. +// +// It's provided purely as a convenience; the result is no different from +// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually. +func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer { + if len(writers) == 0 { + return zapcore.AddSync(io.Discard) + } + return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...)) +} diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go new file mode 100644 index 00000000000..a40e93b3ec8 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go @@ -0,0 +1,219 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bufio" + "sync" + "time" + + "go.uber.org/multierr" +) + +const ( + // _defaultBufferSize specifies the default size used by Buffer. + _defaultBufferSize = 256 * 1024 // 256 kB + + // _defaultFlushInterval specifies the default flush interval for + // Buffer. + _defaultFlushInterval = 30 * time.Second +) + +// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before +// flushing them to a wrapped WriteSyncer after reaching some limit, or at some +// fixed interval--whichever comes first. +// +// BufferedWriteSyncer is safe for concurrent use. You don't need to use +// zapcore.Lock for WriteSyncers with BufferedWriteSyncer. +// +// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log +// destination (*os.File is a valid WriteSyncer), wrap it with +// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the +// object. +// +// func main() { +// ws := ... // your log destination +// bws := &zapcore.BufferedWriteSyncer{WS: ws} +// defer bws.Stop() +// +// // ... +// core := zapcore.NewCore(enc, bws, lvl) +// logger := zap.New(core) +// +// // ... +// } +// +// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs, +// waiting at most 30 seconds between flushes. +// You can customize these parameters by setting the Size or FlushInterval +// fields. +// For example, the following buffers up to 512 kB of logs before flushing them +// to Stderr, with a maximum of one minute between each flush. +// +// ws := &BufferedWriteSyncer{ +// WS: os.Stderr, +// Size: 512 * 1024, // 512 kB +// FlushInterval: time.Minute, +// } +// defer ws.Stop() +type BufferedWriteSyncer struct { + // WS is the WriteSyncer around which BufferedWriteSyncer will buffer + // writes. + // + // This field is required. + WS WriteSyncer + + // Size specifies the maximum amount of data the writer will buffered + // before flushing. + // + // Defaults to 256 kB if unspecified. + Size int + + // FlushInterval specifies how often the writer should flush data if + // there have been no writes. + // + // Defaults to 30 seconds if unspecified. + FlushInterval time.Duration + + // Clock, if specified, provides control of the source of time for the + // writer. + // + // Defaults to the system clock. + Clock Clock + + // unexported fields for state + mu sync.Mutex + initialized bool // whether initialize() has run + stopped bool // whether Stop() has run + writer *bufio.Writer + ticker *time.Ticker + stop chan struct{} // closed when flushLoop should stop + done chan struct{} // closed when flushLoop has stopped +} + +func (s *BufferedWriteSyncer) initialize() { + size := s.Size + if size == 0 { + size = _defaultBufferSize + } + + flushInterval := s.FlushInterval + if flushInterval == 0 { + flushInterval = _defaultFlushInterval + } + + if s.Clock == nil { + s.Clock = DefaultClock + } + + s.ticker = s.Clock.NewTicker(flushInterval) + s.writer = bufio.NewWriterSize(s.WS, size) + s.stop = make(chan struct{}) + s.done = make(chan struct{}) + s.initialized = true + go s.flushLoop() +} + +// Write writes log data into buffer syncer directly, multiple Write calls will be batched, +// and log data will be flushed to disk when the buffer is full or periodically. +func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + s.initialize() + } + + // To avoid partial writes from being flushed, we manually flush the existing buffer if: + // * The current write doesn't fit into the buffer fully, and + // * The buffer is not empty (since bufio will not split large writes when the buffer is empty) + if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 { + if err := s.writer.Flush(); err != nil { + return 0, err + } + } + + return s.writer.Write(bs) +} + +// Sync flushes buffered log data into disk directly. +func (s *BufferedWriteSyncer) Sync() error { + s.mu.Lock() + defer s.mu.Unlock() + + var err error + if s.initialized { + err = s.writer.Flush() + } + + return multierr.Append(err, s.WS.Sync()) +} + +// flushLoop flushes the buffer at the configured interval until Stop is +// called. +func (s *BufferedWriteSyncer) flushLoop() { + defer close(s.done) + + for { + select { + case <-s.ticker.C: + // we just simply ignore error here + // because the underlying bufio writer stores any errors + // and we return any error from Sync() as part of the close + _ = s.Sync() + case <-s.stop: + return + } + } +} + +// Stop closes the buffer, cleans up background goroutines, and flushes +// remaining unwritten data. +func (s *BufferedWriteSyncer) Stop() (err error) { + var stopped bool + + // Critical section. + func() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.initialized { + return + } + + stopped = s.stopped + if stopped { + return + } + s.stopped = true + + s.ticker.Stop() + close(s.stop) // tell flushLoop to stop + <-s.done // and wait until it has + }() + + // Don't call Sync on consecutive Stops. + if !stopped { + err = s.Sync() + } + + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go new file mode 100644 index 00000000000..422fd82a6b0 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/clock.go @@ -0,0 +1,48 @@ +// Copyright (c) 2021 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// DefaultClock is the default clock used by Zap in operations that require +// time. This clock uses the system clock for all operations. +var DefaultClock = systemClock{} + +// Clock is a source of time for logged entries. +type Clock interface { + // Now returns the current local time. + Now() time.Time + + // NewTicker returns *time.Ticker that holds a channel + // that delivers "ticks" of a clock. + NewTicker(time.Duration) *time.Ticker +} + +// systemClock implements default Clock that uses system time. +type systemClock struct{} + +func (systemClock) Now() time.Time { + return time.Now() +} + +func (systemClock) NewTicker(duration time.Duration) *time.Ticker { + return time.NewTicker(duration) +} diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go new file mode 100644 index 00000000000..cc2b4e07b93 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go @@ -0,0 +1,157 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" +) + +var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder { + return &sliceArrayEncoder{ + elems: make([]interface{}, 0, 2), + } +}) + +func getSliceEncoder() *sliceArrayEncoder { + return _sliceEncoderPool.Get() +} + +func putSliceEncoder(e *sliceArrayEncoder) { + e.elems = e.elems[:0] + _sliceEncoderPool.Put(e) +} + +type consoleEncoder struct { + *jsonEncoder +} + +// NewConsoleEncoder creates an encoder whose output is designed for human - +// rather than machine - consumption. It serializes the core log entry data +// (message, level, timestamp, etc.) in a plain-text format and leaves the +// structured context as JSON. +// +// Note that although the console encoder doesn't use the keys specified in the +// encoder configuration, it will omit any element whose key is set to the empty +// string. +func NewConsoleEncoder(cfg EncoderConfig) Encoder { + if cfg.ConsoleSeparator == "" { + // Use a default delimiter of '\t' for backwards compatibility + cfg.ConsoleSeparator = "\t" + } + return consoleEncoder{newJSONEncoder(cfg, true)} +} + +func (c consoleEncoder) Clone() Encoder { + return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)} +} + +func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + line := bufferpool.Get() + + // We don't want the entry's metadata to be quoted and escaped (if it's + // encoded as strings), which means that we can't use the JSON encoder. The + // simplest option is to use the memory encoder and fmt.Fprint. + // + // If this ever becomes a performance bottleneck, we can implement + // ArrayEncoder for our plain-text format. + arr := getSliceEncoder() + if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() { + c.EncodeTime(ent.Time, arr) + } + if c.LevelKey != "" && c.EncodeLevel != nil { + c.EncodeLevel(ent.Level, arr) + } + if ent.LoggerName != "" && c.NameKey != "" { + nameEncoder := c.EncodeName + + if nameEncoder == nil { + // Fall back to FullNameEncoder for backward compatibility. + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, arr) + } + if ent.Caller.Defined { + if c.CallerKey != "" && c.EncodeCaller != nil { + c.EncodeCaller(ent.Caller, arr) + } + if c.FunctionKey != "" { + arr.AppendString(ent.Caller.Function) + } + } + for i := range arr.elems { + if i > 0 { + line.AppendString(c.ConsoleSeparator) + } + fmt.Fprint(line, arr.elems[i]) + } + putSliceEncoder(arr) + + // Add the message itself. + if c.MessageKey != "" { + c.addSeparatorIfNecessary(line) + line.AppendString(ent.Message) + } + + // Add any structured context. + c.writeContext(line, fields) + + // If there's no stacktrace key, honor that; this allows users to force + // single-line output. + if ent.Stack != "" && c.StacktraceKey != "" { + line.AppendByte('\n') + line.AppendString(ent.Stack) + } + + line.AppendString(c.LineEnding) + return line, nil +} + +func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) { + context := c.jsonEncoder.Clone().(*jsonEncoder) + defer func() { + // putJSONEncoder assumes the buffer is still used, but we write out the buffer so + // we can free it. + context.buf.Free() + putJSONEncoder(context) + }() + + addFields(context, extra) + context.closeOpenNamespaces() + if context.buf.Len() == 0 { + return + } + + c.addSeparatorIfNecessary(line) + line.AppendByte('{') + line.Write(context.buf.Bytes()) + line.AppendByte('}') +} + +func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) { + if line.Len() > 0 { + line.AppendString(c.ConsoleSeparator) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go new file mode 100644 index 00000000000..776e93f6f35 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/core.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// Core is a minimal, fast logger interface. It's designed for library authors +// to wrap in a more user-friendly API. +type Core interface { + LevelEnabler + + // With adds structured context to the Core. + With([]Field) Core + // Check determines whether the supplied Entry should be logged (using the + // embedded LevelEnabler and possibly some extra logic). If the entry + // should be logged, the Core adds itself to the CheckedEntry and returns + // the result. + // + // Callers must use Check before calling Write. + Check(Entry, *CheckedEntry) *CheckedEntry + // Write serializes the Entry and any Fields supplied at the log site and + // writes them to their destination. + // + // If called, Write should always log the Entry and Fields; it should not + // replicate the logic of Check. + Write(Entry, []Field) error + // Sync flushes buffered logs (if any). + Sync() error +} + +type nopCore struct{} + +// NewNopCore returns a no-op Core. +func NewNopCore() Core { return nopCore{} } +func (nopCore) Enabled(Level) bool { return false } +func (n nopCore) With([]Field) Core { return n } +func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce } +func (nopCore) Write(Entry, []Field) error { return nil } +func (nopCore) Sync() error { return nil } + +// NewCore creates a Core that writes logs to a WriteSyncer. +func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core { + return &ioCore{ + LevelEnabler: enab, + enc: enc, + out: ws, + } +} + +type ioCore struct { + LevelEnabler + enc Encoder + out WriteSyncer +} + +var ( + _ Core = (*ioCore)(nil) + _ leveledEnabler = (*ioCore)(nil) +) + +func (c *ioCore) Level() Level { + return LevelOf(c.LevelEnabler) +} + +func (c *ioCore) With(fields []Field) Core { + clone := c.clone() + addFields(clone.enc, fields) + return clone +} + +func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if c.Enabled(ent.Level) { + return ce.AddCore(ent, c) + } + return ce +} + +func (c *ioCore) Write(ent Entry, fields []Field) error { + buf, err := c.enc.EncodeEntry(ent, fields) + if err != nil { + return err + } + _, err = c.out.Write(buf.Bytes()) + buf.Free() + if err != nil { + return err + } + if ent.Level > ErrorLevel { + // Since we may be crashing the program, sync the output. + // Ignore Sync errors, pending a clean solution to issue #370. + _ = c.Sync() + } + return nil +} + +func (c *ioCore) Sync() error { + return c.out.Sync() +} + +func (c *ioCore) clone() *ioCore { + return &ioCore{ + LevelEnabler: c.LevelEnabler, + enc: c.enc.Clone(), + out: c.out, + } +} diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go new file mode 100644 index 00000000000..31000e91f70 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/doc.go @@ -0,0 +1,24 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package zapcore defines and implements the low-level interfaces upon which +// zap is built. By providing alternate implementations of these interfaces, +// external packages can extend zap's capabilities. +package zapcore // import "go.uber.org/zap/zapcore" diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go new file mode 100644 index 00000000000..04462541565 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/encoder.go @@ -0,0 +1,466 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" + "time" + + "go.uber.org/zap/buffer" +) + +// DefaultLineEnding defines the default line ending when writing logs. +// Alternate line endings specified in EncoderConfig can override this +// behavior. +const DefaultLineEnding = "\n" + +// OmitKey defines the key to use when callers want to remove a key from log output. +const OmitKey = "" + +// A LevelEncoder serializes a Level to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. +type LevelEncoder func(Level, PrimitiveArrayEncoder) + +// LowercaseLevelEncoder serializes a Level to a lowercase string. For example, +// InfoLevel is serialized to "info". +func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.String()) +} + +// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring. +// For example, InfoLevel is serialized to "info" and colored blue. +func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToLowercaseColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.String()) + } + enc.AppendString(s) +} + +// CapitalLevelEncoder serializes a Level to an all-caps string. For example, +// InfoLevel is serialized to "INFO". +func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + enc.AppendString(l.CapitalString()) +} + +// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color. +// For example, InfoLevel is serialized to "INFO" and colored blue. +func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) { + s, ok := _levelToCapitalColorString[l] + if !ok { + s = _unknownLevelColor.Add(l.CapitalString()) + } + enc.AppendString(s) +} + +// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to +// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder, +// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else +// is unmarshaled to LowercaseLevelEncoder. +func (e *LevelEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "capital": + *e = CapitalLevelEncoder + case "capitalColor": + *e = CapitalColorLevelEncoder + case "color": + *e = LowercaseColorLevelEncoder + default: + *e = LowercaseLevelEncoder + } + return nil +} + +// A TimeEncoder serializes a time.Time to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. +type TimeEncoder func(time.Time, PrimitiveArrayEncoder) + +// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds +// since the Unix epoch. +func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + sec := float64(nanos) / float64(time.Second) + enc.AppendFloat64(sec) +} + +// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of +// milliseconds since the Unix epoch. +func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + nanos := t.UnixNano() + millis := float64(nanos) / float64(time.Millisecond) + enc.AppendFloat64(millis) +} + +// EpochNanosTimeEncoder serializes a time.Time to an integer number of +// nanoseconds since the Unix epoch. +func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + enc.AppendInt64(t.UnixNano()) +} + +func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) { + type appendTimeEncoder interface { + AppendTimeLayout(time.Time, string) + } + + if enc, ok := enc.(appendTimeEncoder); ok { + enc.AppendTimeLayout(t, layout) + return + } + + enc.AppendString(t.Format(layout)) +} + +// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string +// with millisecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc) +} + +// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339, enc) +} + +// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string +// with nanosecond precision. +// +// If enc supports AppendTimeLayout(t time.Time,layout string), it's used +// instead of appending a pre-formatted string value. +func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, time.RFC3339Nano, enc) +} + +// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using +// given layout. +func TimeEncoderOfLayout(layout string) TimeEncoder { + return func(t time.Time, enc PrimitiveArrayEncoder) { + encodeTimeLayout(t, layout, enc) + } +} + +// UnmarshalText unmarshals text to a TimeEncoder. +// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder. +// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder. +// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder. +// "millis" is unmarshaled to EpochMillisTimeEncoder. +// "nanos" is unmarshaled to EpochNanosEncoder. +// Anything else is unmarshaled to EpochTimeEncoder. +func (e *TimeEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "rfc3339nano", "RFC3339Nano": + *e = RFC3339NanoTimeEncoder + case "rfc3339", "RFC3339": + *e = RFC3339TimeEncoder + case "iso8601", "ISO8601": + *e = ISO8601TimeEncoder + case "millis": + *e = EpochMillisTimeEncoder + case "nanos": + *e = EpochNanosTimeEncoder + default: + *e = EpochTimeEncoder + } + return nil +} + +// UnmarshalYAML unmarshals YAML to a TimeEncoder. +// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout. +// +// timeEncoder: +// layout: 06/01/02 03:04pm +// +// If value is string, it uses UnmarshalText. +// +// timeEncoder: iso8601 +func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error { + var o struct { + Layout string `json:"layout" yaml:"layout"` + } + if err := unmarshal(&o); err == nil { + *e = TimeEncoderOfLayout(o.Layout) + return nil + } + + var s string + if err := unmarshal(&s); err != nil { + return err + } + return e.UnmarshalText([]byte(s)) +} + +// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does. +func (e *TimeEncoder) UnmarshalJSON(data []byte) error { + return e.UnmarshalYAML(func(v interface{}) error { + return json.Unmarshal(data, v) + }) +} + +// A DurationEncoder serializes a time.Duration to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. +type DurationEncoder func(time.Duration, PrimitiveArrayEncoder) + +// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed. +func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendFloat64(float64(d) / float64(time.Second)) +} + +// NanosDurationEncoder serializes a time.Duration to an integer number of +// nanoseconds elapsed. +func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(int64(d)) +} + +// MillisDurationEncoder serializes a time.Duration to an integer number of +// milliseconds elapsed. +func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendInt64(d.Nanoseconds() / 1e6) +} + +// StringDurationEncoder serializes a time.Duration using its built-in String +// method. +func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) { + enc.AppendString(d.String()) +} + +// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled +// to StringDurationEncoder, and anything else is unmarshaled to +// NanosDurationEncoder. +func (e *DurationEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "string": + *e = StringDurationEncoder + case "nanos": + *e = NanosDurationEncoder + case "ms": + *e = MillisDurationEncoder + default: + *e = SecondsDurationEncoder + } + return nil +} + +// A CallerEncoder serializes an EntryCaller to a primitive type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. +type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder) + +// FullCallerEncoder serializes a caller in /full/path/to/package/file:line +// format. +func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.String()) +} + +// ShortCallerEncoder serializes a caller in package/file:line format, trimming +// all but the final directory from the full path. +func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) { + // TODO: consider using a byte-oriented API to save an allocation. + enc.AppendString(caller.TrimmedPath()) +} + +// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to +// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder. +func (e *CallerEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullCallerEncoder + default: + *e = ShortCallerEncoder + } + return nil +} + +// A NameEncoder serializes a period-separated logger name to a primitive +// type. +// +// This function must make exactly one call +// to a PrimitiveArrayEncoder's Append* method. +type NameEncoder func(string, PrimitiveArrayEncoder) + +// FullNameEncoder serializes the logger name as-is. +func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) { + enc.AppendString(loggerName) +} + +// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is +// unmarshaled to FullNameEncoder. +func (e *NameEncoder) UnmarshalText(text []byte) error { + switch string(text) { + case "full": + *e = FullNameEncoder + default: + *e = FullNameEncoder + } + return nil +} + +// An EncoderConfig allows users to configure the concrete encoders supplied by +// zapcore. +type EncoderConfig struct { + // Set the keys used for each log entry. If any key is empty, that portion + // of the entry is omitted. + MessageKey string `json:"messageKey" yaml:"messageKey"` + LevelKey string `json:"levelKey" yaml:"levelKey"` + TimeKey string `json:"timeKey" yaml:"timeKey"` + NameKey string `json:"nameKey" yaml:"nameKey"` + CallerKey string `json:"callerKey" yaml:"callerKey"` + FunctionKey string `json:"functionKey" yaml:"functionKey"` + StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"` + SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"` + LineEnding string `json:"lineEnding" yaml:"lineEnding"` + // Configure the primitive representations of common complex types. For + // example, some users may want all time.Times serialized as floating-point + // seconds since epoch, while others may prefer ISO8601 strings. + EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"` + EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"` + EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"` + EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"` + // Unlike the other primitive type encoders, EncodeName is optional. The + // zero value falls back to FullNameEncoder. + EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"` + // Configure the encoder for interface{} type objects. + // If not provided, objects are encoded using json.Encoder + NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"` + // Configures the field separator used by the console encoder. Defaults + // to tab. + ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"` +} + +// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a +// map- or struct-like object to the logging context. Like maps, ObjectEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ObjectEncoder interface { + // Logging-specific marshalers. + AddArray(key string, marshaler ArrayMarshaler) error + AddObject(key string, marshaler ObjectMarshaler) error + + // Built-in types. + AddBinary(key string, value []byte) // for arbitrary bytes + AddByteString(key string, value []byte) // for UTF-8 encoded bytes + AddBool(key string, value bool) + AddComplex128(key string, value complex128) + AddComplex64(key string, value complex64) + AddDuration(key string, value time.Duration) + AddFloat64(key string, value float64) + AddFloat32(key string, value float32) + AddInt(key string, value int) + AddInt64(key string, value int64) + AddInt32(key string, value int32) + AddInt16(key string, value int16) + AddInt8(key string, value int8) + AddString(key, value string) + AddTime(key string, value time.Time) + AddUint(key string, value uint) + AddUint64(key string, value uint64) + AddUint32(key string, value uint32) + AddUint16(key string, value uint16) + AddUint8(key string, value uint8) + AddUintptr(key string, value uintptr) + + // AddReflected uses reflection to serialize arbitrary objects, so it can be + // slow and allocation-heavy. + AddReflected(key string, value interface{}) error + // OpenNamespace opens an isolated namespace where all subsequent fields will + // be added. Applications can use namespaces to prevent key collisions when + // injecting loggers into sub-components or third-party libraries. + OpenNamespace(key string) +} + +// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding +// array-like objects to the logging context. Of note, it supports mixed-type +// arrays even though they aren't typical in Go. Like slices, ArrayEncoders +// aren't safe for concurrent use (though typical use shouldn't require locks). +type ArrayEncoder interface { + // Built-in types. + PrimitiveArrayEncoder + + // Time-related types. + AppendDuration(time.Duration) + AppendTime(time.Time) + + // Logging-specific marshalers. + AppendArray(ArrayMarshaler) error + AppendObject(ObjectMarshaler) error + + // AppendReflected uses reflection to serialize arbitrary objects, so it's + // slow and allocation-heavy. + AppendReflected(value interface{}) error +} + +// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals +// only in Go's built-in types. It's included only so that Duration- and +// TimeEncoders cannot trigger infinite recursion. +type PrimitiveArrayEncoder interface { + // Built-in types. + AppendBool(bool) + AppendByteString([]byte) // for UTF-8 encoded bytes + AppendComplex128(complex128) + AppendComplex64(complex64) + AppendFloat64(float64) + AppendFloat32(float32) + AppendInt(int) + AppendInt64(int64) + AppendInt32(int32) + AppendInt16(int16) + AppendInt8(int8) + AppendString(string) + AppendUint(uint) + AppendUint64(uint64) + AppendUint32(uint32) + AppendUint16(uint16) + AppendUint8(uint8) + AppendUintptr(uintptr) +} + +// Encoder is a format-agnostic interface for all log entry marshalers. Since +// log encoders don't need to support the same wide range of use cases as +// general-purpose marshalers, it's possible to make them faster and +// lower-allocation. +// +// Implementations of the ObjectEncoder interface's methods can, of course, +// freely modify the receiver. However, the Clone and EncodeEntry methods will +// be called concurrently and shouldn't modify the receiver. +type Encoder interface { + ObjectEncoder + + // Clone copies the encoder, ensuring that adding fields to the copy doesn't + // affect the original. + Clone() Encoder + + // EncodeEntry encodes an entry and fields, along with any accumulated + // context, into a byte buffer and returns it. Any fields that are empty, + // including fields on the `Entry` type, should be omitted. + EncodeEntry(Entry, []Field) (*buffer.Buffer, error) +} diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go new file mode 100644 index 00000000000..459a5d7ce3c --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/entry.go @@ -0,0 +1,298 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "runtime" + "strings" + "time" + + "go.uber.org/multierr" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/exit" + "go.uber.org/zap/internal/pool" +) + +var _cePool = pool.New(func() *CheckedEntry { + // Pre-allocate some space for cores. + return &CheckedEntry{ + cores: make([]Core, 4), + } +}) + +func getCheckedEntry() *CheckedEntry { + ce := _cePool.Get() + ce.reset() + return ce +} + +func putCheckedEntry(ce *CheckedEntry) { + if ce == nil { + return + } + _cePool.Put(ce) +} + +// NewEntryCaller makes an EntryCaller from the return signature of +// runtime.Caller. +func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller { + if !ok { + return EntryCaller{} + } + return EntryCaller{ + PC: pc, + File: file, + Line: line, + Defined: true, + } +} + +// EntryCaller represents the caller of a logging function. +type EntryCaller struct { + Defined bool + PC uintptr + File string + Line int + Function string +} + +// String returns the full path and line number of the caller. +func (ec EntryCaller) String() string { + return ec.FullPath() +} + +// FullPath returns a /full/path/to/package/file:line description of the +// caller. +func (ec EntryCaller) FullPath() string { + if !ec.Defined { + return "undefined" + } + buf := bufferpool.Get() + buf.AppendString(ec.File) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// TrimmedPath returns a package/file:line description of the caller, +// preserving only the leaf directory name and file name. +func (ec EntryCaller) TrimmedPath() string { + if !ec.Defined { + return "undefined" + } + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + // + // Find the last separator. + // + idx := strings.LastIndexByte(ec.File, '/') + if idx == -1 { + return ec.FullPath() + } + // Find the penultimate separator. + idx = strings.LastIndexByte(ec.File[:idx], '/') + if idx == -1 { + return ec.FullPath() + } + buf := bufferpool.Get() + // Keep everything after the penultimate separator. + buf.AppendString(ec.File[idx+1:]) + buf.AppendByte(':') + buf.AppendInt(int64(ec.Line)) + caller := buf.String() + buf.Free() + return caller +} + +// An Entry represents a complete log message. The entry's structured context +// is already serialized, but the log level, time, message, and call site +// information are available for inspection and modification. Any fields left +// empty will be omitted when encoding. +// +// Entries are pooled, so any functions that accept them MUST be careful not to +// retain references to them. +type Entry struct { + Level Level + Time time.Time + LoggerName string + Message string + Caller EntryCaller + Stack string +} + +// CheckWriteHook is a custom action that may be executed after an entry is +// written. +// +// Register one on a CheckedEntry with the After method. +// +// if ce := logger.Check(...); ce != nil { +// ce = ce.After(hook) +// ce.Write(...) +// } +// +// You can configure the hook for Fatal log statements at the logger level with +// the zap.WithFatalHook option. +type CheckWriteHook interface { + // OnWrite is invoked with the CheckedEntry that was written and a list + // of fields added with that entry. + // + // The list of fields DOES NOT include fields that were already added + // to the logger with the With method. + OnWrite(*CheckedEntry, []Field) +} + +// CheckWriteAction indicates what action to take after a log entry is +// processed. Actions are ordered in increasing severity. +type CheckWriteAction uint8 + +const ( + // WriteThenNoop indicates that nothing special needs to be done. It's the + // default behavior. + WriteThenNoop CheckWriteAction = iota + // WriteThenGoexit runs runtime.Goexit after Write. + WriteThenGoexit + // WriteThenPanic causes a panic after Write. + WriteThenPanic + // WriteThenFatal causes an os.Exit(1) after Write. + WriteThenFatal +) + +// OnWrite implements the OnWrite method to keep CheckWriteAction compatible +// with the new CheckWriteHook interface which deprecates CheckWriteAction. +func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) { + switch a { + case WriteThenGoexit: + runtime.Goexit() + case WriteThenPanic: + panic(ce.Message) + case WriteThenFatal: + exit.With(1) + } +} + +var _ CheckWriteHook = CheckWriteAction(0) + +// CheckedEntry is an Entry together with a collection of Cores that have +// already agreed to log it. +// +// CheckedEntry references should be created by calling AddCore or After on a +// nil *CheckedEntry. References are returned to a pool after Write, and MUST +// NOT be retained after calling their Write method. +type CheckedEntry struct { + Entry + ErrorOutput WriteSyncer + dirty bool // best-effort detection of pool misuse + after CheckWriteHook + cores []Core +} + +func (ce *CheckedEntry) reset() { + ce.Entry = Entry{} + ce.ErrorOutput = nil + ce.dirty = false + ce.after = nil + for i := range ce.cores { + // don't keep references to cores + ce.cores[i] = nil + } + ce.cores = ce.cores[:0] +} + +// Write writes the entry to the stored Cores, returns any errors, and returns +// the CheckedEntry reference to a pool for immediate re-use. Finally, it +// executes any required CheckWriteAction. +func (ce *CheckedEntry) Write(fields ...Field) { + if ce == nil { + return + } + + if ce.dirty { + if ce.ErrorOutput != nil { + // Make a best effort to detect unsafe re-use of this CheckedEntry. + // If the entry is dirty, log an internal error; because the + // CheckedEntry is being used after it was returned to the pool, + // the message may be an amalgamation from multiple call sites. + fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry) + _ = ce.ErrorOutput.Sync() // ignore error + } + return + } + ce.dirty = true + + var err error + for i := range ce.cores { + err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields)) + } + if err != nil && ce.ErrorOutput != nil { + fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err) + _ = ce.ErrorOutput.Sync() // ignore error + } + + hook := ce.after + if hook != nil { + hook.OnWrite(ce, fields) + } + putCheckedEntry(ce) +} + +// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be +// used by Core.Check implementations, and is safe to call on nil CheckedEntry +// references. +func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.cores = append(ce.cores, core) + return ce +} + +// Should sets this CheckedEntry's CheckWriteAction, which controls whether a +// Core will panic or fatal after writing this log entry. Like AddCore, it's +// safe to call on nil CheckedEntry references. +// +// Deprecated: Use [CheckedEntry.After] instead. +func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry { + return ce.After(ent, should) +} + +// After sets this CheckEntry's CheckWriteHook, which will be called after this +// log entry has been written. It's safe to call this on nil CheckedEntry +// references. +func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry { + if ce == nil { + ce = getCheckedEntry() + ce.Entry = ent + } + ce.after = hook + return ce +} diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go new file mode 100644 index 00000000000..c40df13269a --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/error.go @@ -0,0 +1,136 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "fmt" + "reflect" + + "go.uber.org/zap/internal/pool" +) + +// Encodes the given error into fields of an object. A field with the given +// name is added for the error message. +// +// If the error implements fmt.Formatter, a field with the name ${key}Verbose +// is also added with the full verbose error message. +// +// Finally, if the error implements errorGroup (from go.uber.org/multierr) or +// causer (from github.com/pkg/errors), a ${key}Causes field is added with an +// array of objects containing the errors this error was comprised of. +// +// { +// "error": err.Error(), +// "errorVerbose": fmt.Sprintf("%+v", err), +// "errorCauses": [ +// ... +// ], +// } +func encodeError(key string, err error, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the Error() method + defer func() { + if rerr := recover(); rerr != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // error that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", rerr) + } + }() + + basic := err.Error() + enc.AddString(key, basic) + + switch e := err.(type) { + case errorGroup: + return enc.AddArray(key+"Causes", errArray(e.Errors())) + case fmt.Formatter: + verbose := fmt.Sprintf("%+v", e) + if verbose != basic { + // This is a rich error type, like those produced by + // github.com/pkg/errors. + enc.AddString(key+"Verbose", verbose) + } + } + return nil +} + +type errorGroup interface { + // Provides read-only access to the underlying list of errors, preferably + // without causing any allocs. + Errors() []error +} + +// Note that errArray and errArrayElem are very similar to the version +// implemented in the top-level error.go file. We can't re-use this because +// that would require exporting errArray as part of the zapcore API. + +// Encodes a list of errors using the standard error encoding logic. +type errArray []error + +func (errs errArray) MarshalLogArray(arr ArrayEncoder) error { + for i := range errs { + if errs[i] == nil { + continue + } + + el := newErrArrayElem(errs[i]) + err := arr.AppendObject(el) + el.Free() + if err != nil { + return err + } + } + return nil +} + +var _errArrayElemPool = pool.New(func() *errArrayElem { + return &errArrayElem{} +}) + +// Encodes any error into a {"error": ...} re-using the same errors logic. +// +// May be passed in place of an array to build a single-element array. +type errArrayElem struct{ err error } + +func newErrArrayElem(err error) *errArrayElem { + e := _errArrayElemPool.Get() + e.err = err + return e +} + +func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error { + return arr.AppendObject(e) +} + +func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error { + return encodeError("error", e.err, enc) +} + +func (e *errArrayElem) Free() { + e.err = nil + _errArrayElemPool.Put(e) +} diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go new file mode 100644 index 00000000000..308c9781ed1 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/field.go @@ -0,0 +1,233 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "fmt" + "math" + "reflect" + "time" +) + +// A FieldType indicates which member of the Field union struct should be used +// and how it should be serialized. +type FieldType uint8 + +const ( + // UnknownType is the default field type. Attempting to add it to an encoder will panic. + UnknownType FieldType = iota + // ArrayMarshalerType indicates that the field carries an ArrayMarshaler. + ArrayMarshalerType + // ObjectMarshalerType indicates that the field carries an ObjectMarshaler. + ObjectMarshalerType + // BinaryType indicates that the field carries an opaque binary blob. + BinaryType + // BoolType indicates that the field carries a bool. + BoolType + // ByteStringType indicates that the field carries UTF-8 encoded bytes. + ByteStringType + // Complex128Type indicates that the field carries a complex128. + Complex128Type + // Complex64Type indicates that the field carries a complex64. + Complex64Type + // DurationType indicates that the field carries a time.Duration. + DurationType + // Float64Type indicates that the field carries a float64. + Float64Type + // Float32Type indicates that the field carries a float32. + Float32Type + // Int64Type indicates that the field carries an int64. + Int64Type + // Int32Type indicates that the field carries an int32. + Int32Type + // Int16Type indicates that the field carries an int16. + Int16Type + // Int8Type indicates that the field carries an int8. + Int8Type + // StringType indicates that the field carries a string. + StringType + // TimeType indicates that the field carries a time.Time that is + // representable by a UnixNano() stored as an int64. + TimeType + // TimeFullType indicates that the field carries a time.Time stored as-is. + TimeFullType + // Uint64Type indicates that the field carries a uint64. + Uint64Type + // Uint32Type indicates that the field carries a uint32. + Uint32Type + // Uint16Type indicates that the field carries a uint16. + Uint16Type + // Uint8Type indicates that the field carries a uint8. + Uint8Type + // UintptrType indicates that the field carries a uintptr. + UintptrType + // ReflectType indicates that the field carries an interface{}, which should + // be serialized using reflection. + ReflectType + // NamespaceType signals the beginning of an isolated namespace. All + // subsequent fields should be added to the new namespace. + NamespaceType + // StringerType indicates that the field carries a fmt.Stringer. + StringerType + // ErrorType indicates that the field carries an error. + ErrorType + // SkipType indicates that the field is a no-op. + SkipType + + // InlineMarshalerType indicates that the field carries an ObjectMarshaler + // that should be inlined. + InlineMarshalerType +) + +// A Field is a marshaling operation used to add a key-value pair to a logger's +// context. Most fields are lazily marshaled, so it's inexpensive to add fields +// to disabled debug-level log statements. +type Field struct { + Key string + Type FieldType + Integer int64 + String string + Interface interface{} +} + +// AddTo exports a field through the ObjectEncoder interface. It's primarily +// useful to library authors, and shouldn't be necessary in most applications. +func (f Field) AddTo(enc ObjectEncoder) { + var err error + + switch f.Type { + case ArrayMarshalerType: + err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler)) + case ObjectMarshalerType: + err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler)) + case InlineMarshalerType: + err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc) + case BinaryType: + enc.AddBinary(f.Key, f.Interface.([]byte)) + case BoolType: + enc.AddBool(f.Key, f.Integer == 1) + case ByteStringType: + enc.AddByteString(f.Key, f.Interface.([]byte)) + case Complex128Type: + enc.AddComplex128(f.Key, f.Interface.(complex128)) + case Complex64Type: + enc.AddComplex64(f.Key, f.Interface.(complex64)) + case DurationType: + enc.AddDuration(f.Key, time.Duration(f.Integer)) + case Float64Type: + enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer))) + case Float32Type: + enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer))) + case Int64Type: + enc.AddInt64(f.Key, f.Integer) + case Int32Type: + enc.AddInt32(f.Key, int32(f.Integer)) + case Int16Type: + enc.AddInt16(f.Key, int16(f.Integer)) + case Int8Type: + enc.AddInt8(f.Key, int8(f.Integer)) + case StringType: + enc.AddString(f.Key, f.String) + case TimeType: + if f.Interface != nil { + enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location))) + } else { + // Fall back to UTC if location is nil. + enc.AddTime(f.Key, time.Unix(0, f.Integer)) + } + case TimeFullType: + enc.AddTime(f.Key, f.Interface.(time.Time)) + case Uint64Type: + enc.AddUint64(f.Key, uint64(f.Integer)) + case Uint32Type: + enc.AddUint32(f.Key, uint32(f.Integer)) + case Uint16Type: + enc.AddUint16(f.Key, uint16(f.Integer)) + case Uint8Type: + enc.AddUint8(f.Key, uint8(f.Integer)) + case UintptrType: + enc.AddUintptr(f.Key, uintptr(f.Integer)) + case ReflectType: + err = enc.AddReflected(f.Key, f.Interface) + case NamespaceType: + enc.OpenNamespace(f.Key) + case StringerType: + err = encodeStringer(f.Key, f.Interface, enc) + case ErrorType: + err = encodeError(f.Key, f.Interface.(error), enc) + case SkipType: + break + default: + panic(fmt.Sprintf("unknown field type: %v", f)) + } + + if err != nil { + enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error()) + } +} + +// Equals returns whether two fields are equal. For non-primitive types such as +// errors, marshalers, or reflect types, it uses reflect.DeepEqual. +func (f Field) Equals(other Field) bool { + if f.Type != other.Type { + return false + } + if f.Key != other.Key { + return false + } + + switch f.Type { + case BinaryType, ByteStringType: + return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte)) + case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType: + return reflect.DeepEqual(f.Interface, other.Interface) + default: + return f == other + } +} + +func addFields(enc ObjectEncoder, fields []Field) { + for i := range fields { + fields[i].AddTo(enc) + } +} + +func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) { + // Try to capture panics (from nil references or otherwise) when calling + // the String() method, similar to https://golang.org/src/fmt/print.go#L540 + defer func() { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() { + enc.AddString(key, "") + return + } + + retErr = fmt.Errorf("PANIC=%v", err) + } + }() + + enc.AddString(key, stringer.(fmt.Stringer).String()) + return nil +} diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go new file mode 100644 index 00000000000..198def9917c --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/hook.go @@ -0,0 +1,77 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type hooked struct { + Core + funcs []func(Entry) error +} + +var ( + _ Core = (*hooked)(nil) + _ leveledEnabler = (*hooked)(nil) +) + +// RegisterHooks wraps a Core and runs a collection of user-defined callback +// hooks each time a message is logged. Execution of the callbacks is blocking. +// +// This offers users an easy way to register simple callbacks (e.g., metrics +// collection) without implementing the full Core interface. +func RegisterHooks(core Core, hooks ...func(Entry) error) Core { + funcs := append([]func(Entry) error{}, hooks...) + return &hooked{ + Core: core, + funcs: funcs, + } +} + +func (h *hooked) Level() Level { + return LevelOf(h.Core) +} + +func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + // Let the wrapped Core decide whether to log this message or not. This + // also gives the downstream a chance to register itself directly with the + // CheckedEntry. + if downstream := h.Core.Check(ent, ce); downstream != nil { + return downstream.AddCore(ent, h) + } + return ce +} + +func (h *hooked) With(fields []Field) Core { + return &hooked{ + Core: h.Core.With(fields), + funcs: h.funcs, + } +} + +func (h *hooked) Write(ent Entry, _ []Field) error { + // Since our downstream had a chance to register itself directly with the + // CheckedMessage, we don't need to call it here. + var err error + for i := range h.funcs { + err = multierr.Append(err, h.funcs[i](ent)) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go new file mode 100644 index 00000000000..7a11237ae97 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/increase_level.go @@ -0,0 +1,75 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "fmt" + +type levelFilterCore struct { + core Core + level LevelEnabler +} + +var ( + _ Core = (*levelFilterCore)(nil) + _ leveledEnabler = (*levelFilterCore)(nil) +) + +// NewIncreaseLevelCore creates a core that can be used to increase the level of +// an existing Core. It cannot be used to decrease the logging level, as it acts +// as a filter before calling the underlying core. If level decreases the log level, +// an error is returned. +func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) { + for l := _maxLevel; l >= _minLevel; l-- { + if !core.Enabled(l) && level.Enabled(l) { + return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l) + } + } + + return &levelFilterCore{core, level}, nil +} + +func (c *levelFilterCore) Enabled(lvl Level) bool { + return c.level.Enabled(lvl) +} + +func (c *levelFilterCore) Level() Level { + return LevelOf(c.level) +} + +func (c *levelFilterCore) With(fields []Field) Core { + return &levelFilterCore{c.core.With(fields), c.level} +} + +func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !c.Enabled(ent.Level) { + return ce + } + + return c.core.Check(ent, ce) +} + +func (c *levelFilterCore) Write(ent Entry, fields []Field) error { + return c.core.Write(ent, fields) +} + +func (c *levelFilterCore) Sync() error { + return c.core.Sync() +} diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go new file mode 100644 index 00000000000..9685169b2ea --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go @@ -0,0 +1,583 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/base64" + "math" + "time" + "unicode/utf8" + + "go.uber.org/zap/buffer" + "go.uber.org/zap/internal/bufferpool" + "go.uber.org/zap/internal/pool" +) + +// For JSON-escaping; see jsonEncoder.safeAddString below. +const _hex = "0123456789abcdef" + +var _jsonPool = pool.New(func() *jsonEncoder { + return &jsonEncoder{} +}) + +func putJSONEncoder(enc *jsonEncoder) { + if enc.reflectBuf != nil { + enc.reflectBuf.Free() + } + enc.EncoderConfig = nil + enc.buf = nil + enc.spaced = false + enc.openNamespaces = 0 + enc.reflectBuf = nil + enc.reflectEnc = nil + _jsonPool.Put(enc) +} + +type jsonEncoder struct { + *EncoderConfig + buf *buffer.Buffer + spaced bool // include spaces after colons and commas + openNamespaces int + + // for encoding generic values by reflection + reflectBuf *buffer.Buffer + reflectEnc ReflectedEncoder +} + +// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder +// appropriately escapes all field keys and values. +// +// Note that the encoder doesn't deduplicate keys, so it's possible to produce +// a message like +// +// {"foo":"bar","foo":"baz"} +// +// This is permitted by the JSON specification, but not encouraged. Many +// libraries will ignore duplicate key-value pairs (typically keeping the last +// pair) when unmarshaling, but users should attempt to avoid adding duplicate +// keys. +func NewJSONEncoder(cfg EncoderConfig) Encoder { + return newJSONEncoder(cfg, false) +} + +func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder { + if cfg.SkipLineEnding { + cfg.LineEnding = "" + } else if cfg.LineEnding == "" { + cfg.LineEnding = DefaultLineEnding + } + + // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default + if cfg.NewReflectedEncoder == nil { + cfg.NewReflectedEncoder = defaultReflectedEncoder + } + + return &jsonEncoder{ + EncoderConfig: &cfg, + buf: bufferpool.Get(), + spaced: spaced, + } +} + +func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error { + enc.addKey(key) + return enc.AppendArray(arr) +} + +func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error { + enc.addKey(key) + return enc.AppendObject(obj) +} + +func (enc *jsonEncoder) AddBinary(key string, val []byte) { + enc.AddString(key, base64.StdEncoding.EncodeToString(val)) +} + +func (enc *jsonEncoder) AddByteString(key string, val []byte) { + enc.addKey(key) + enc.AppendByteString(val) +} + +func (enc *jsonEncoder) AddBool(key string, val bool) { + enc.addKey(key) + enc.AppendBool(val) +} + +func (enc *jsonEncoder) AddComplex128(key string, val complex128) { + enc.addKey(key) + enc.AppendComplex128(val) +} + +func (enc *jsonEncoder) AddComplex64(key string, val complex64) { + enc.addKey(key) + enc.AppendComplex64(val) +} + +func (enc *jsonEncoder) AddDuration(key string, val time.Duration) { + enc.addKey(key) + enc.AppendDuration(val) +} + +func (enc *jsonEncoder) AddFloat64(key string, val float64) { + enc.addKey(key) + enc.AppendFloat64(val) +} + +func (enc *jsonEncoder) AddFloat32(key string, val float32) { + enc.addKey(key) + enc.AppendFloat32(val) +} + +func (enc *jsonEncoder) AddInt64(key string, val int64) { + enc.addKey(key) + enc.AppendInt64(val) +} + +func (enc *jsonEncoder) resetReflectBuf() { + if enc.reflectBuf == nil { + enc.reflectBuf = bufferpool.Get() + enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf) + } else { + enc.reflectBuf.Reset() + } +} + +var nullLiteralBytes = []byte("null") + +// Only invoke the standard JSON encoder if there is actually something to +// encode; otherwise write JSON null literal directly. +func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) { + if obj == nil { + return nullLiteralBytes, nil + } + enc.resetReflectBuf() + if err := enc.reflectEnc.Encode(obj); err != nil { + return nil, err + } + enc.reflectBuf.TrimNewline() + return enc.reflectBuf.Bytes(), nil +} + +func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error { + valueBytes, err := enc.encodeReflected(obj) + if err != nil { + return err + } + enc.addKey(key) + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) OpenNamespace(key string) { + enc.addKey(key) + enc.buf.AppendByte('{') + enc.openNamespaces++ +} + +func (enc *jsonEncoder) AddString(key, val string) { + enc.addKey(key) + enc.AppendString(val) +} + +func (enc *jsonEncoder) AddTime(key string, val time.Time) { + enc.addKey(key) + enc.AppendTime(val) +} + +func (enc *jsonEncoder) AddUint64(key string, val uint64) { + enc.addKey(key) + enc.AppendUint64(val) +} + +func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error { + enc.addElementSeparator() + enc.buf.AppendByte('[') + err := arr.MarshalLogArray(enc) + enc.buf.AppendByte(']') + return err +} + +func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error { + // Close ONLY new openNamespaces that are created during + // AppendObject(). + old := enc.openNamespaces + enc.openNamespaces = 0 + enc.addElementSeparator() + enc.buf.AppendByte('{') + err := obj.MarshalLogObject(enc) + enc.buf.AppendByte('}') + enc.closeOpenNamespaces() + enc.openNamespaces = old + return err +} + +func (enc *jsonEncoder) AppendBool(val bool) { + enc.addElementSeparator() + enc.buf.AppendBool(val) +} + +func (enc *jsonEncoder) AppendByteString(val []byte) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddByteString(val) + enc.buf.AppendByte('"') +} + +// appendComplex appends the encoded form of the provided complex128 value. +// precision specifies the encoding precision for the real and imaginary +// components of the complex number. +func (enc *jsonEncoder) appendComplex(val complex128, precision int) { + enc.addElementSeparator() + // Cast to a platform-independent, fixed-size type. + r, i := float64(real(val)), float64(imag(val)) + enc.buf.AppendByte('"') + // Because we're always in a quoted string, we can use strconv without + // special-casing NaN and +/-Inf. + enc.buf.AppendFloat(r, precision) + // If imaginary part is less than 0, minus (-) sign is added by default + // by AppendFloat. + if i >= 0 { + enc.buf.AppendByte('+') + } + enc.buf.AppendFloat(i, precision) + enc.buf.AppendByte('i') + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendDuration(val time.Duration) { + cur := enc.buf.Len() + if e := enc.EncodeDuration; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep + // JSON valid. + enc.AppendInt64(int64(val)) + } +} + +func (enc *jsonEncoder) AppendInt64(val int64) { + enc.addElementSeparator() + enc.buf.AppendInt(val) +} + +func (enc *jsonEncoder) AppendReflected(val interface{}) error { + valueBytes, err := enc.encodeReflected(val) + if err != nil { + return err + } + enc.addElementSeparator() + _, err = enc.buf.Write(valueBytes) + return err +} + +func (enc *jsonEncoder) AppendString(val string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(val) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.buf.AppendTime(time, layout) + enc.buf.AppendByte('"') +} + +func (enc *jsonEncoder) AppendTime(val time.Time) { + cur := enc.buf.Len() + if e := enc.EncodeTime; e != nil { + e(val, enc) + } + if cur == enc.buf.Len() { + // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep + // output JSON valid. + enc.AppendInt64(val.UnixNano()) + } +} + +func (enc *jsonEncoder) AppendUint64(val uint64) { + enc.addElementSeparator() + enc.buf.AppendUint(val) +} + +func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) } +func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) } +func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) } +func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) } +func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) } +func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) } +func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) } +func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) } +func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) } + +func (enc *jsonEncoder) Clone() Encoder { + clone := enc.clone() + clone.buf.Write(enc.buf.Bytes()) + return clone +} + +func (enc *jsonEncoder) clone() *jsonEncoder { + clone := _jsonPool.Get() + clone.EncoderConfig = enc.EncoderConfig + clone.spaced = enc.spaced + clone.openNamespaces = enc.openNamespaces + clone.buf = bufferpool.Get() + return clone +} + +func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) { + final := enc.clone() + final.buf.AppendByte('{') + + if final.LevelKey != "" && final.EncodeLevel != nil { + final.addKey(final.LevelKey) + cur := final.buf.Len() + final.EncodeLevel(ent.Level, final) + if cur == final.buf.Len() { + // User-supplied EncodeLevel was a no-op. Fall back to strings to keep + // output JSON valid. + final.AppendString(ent.Level.String()) + } + } + if final.TimeKey != "" && !ent.Time.IsZero() { + final.AddTime(final.TimeKey, ent.Time) + } + if ent.LoggerName != "" && final.NameKey != "" { + final.addKey(final.NameKey) + cur := final.buf.Len() + nameEncoder := final.EncodeName + + // if no name encoder provided, fall back to FullNameEncoder for backwards + // compatibility + if nameEncoder == nil { + nameEncoder = FullNameEncoder + } + + nameEncoder(ent.LoggerName, final) + if cur == final.buf.Len() { + // User-supplied EncodeName was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.LoggerName) + } + } + if ent.Caller.Defined { + if final.CallerKey != "" { + final.addKey(final.CallerKey) + cur := final.buf.Len() + final.EncodeCaller(ent.Caller, final) + if cur == final.buf.Len() { + // User-supplied EncodeCaller was a no-op. Fall back to strings to + // keep output JSON valid. + final.AppendString(ent.Caller.String()) + } + } + if final.FunctionKey != "" { + final.addKey(final.FunctionKey) + final.AppendString(ent.Caller.Function) + } + } + if final.MessageKey != "" { + final.addKey(enc.MessageKey) + final.AppendString(ent.Message) + } + if enc.buf.Len() > 0 { + final.addElementSeparator() + final.buf.Write(enc.buf.Bytes()) + } + addFields(final, fields) + final.closeOpenNamespaces() + if ent.Stack != "" && final.StacktraceKey != "" { + final.AddString(final.StacktraceKey, ent.Stack) + } + final.buf.AppendByte('}') + final.buf.AppendString(final.LineEnding) + + ret := final.buf + putJSONEncoder(final) + return ret, nil +} + +func (enc *jsonEncoder) truncate() { + enc.buf.Reset() +} + +func (enc *jsonEncoder) closeOpenNamespaces() { + for i := 0; i < enc.openNamespaces; i++ { + enc.buf.AppendByte('}') + } + enc.openNamespaces = 0 +} + +func (enc *jsonEncoder) addKey(key string) { + enc.addElementSeparator() + enc.buf.AppendByte('"') + enc.safeAddString(key) + enc.buf.AppendByte('"') + enc.buf.AppendByte(':') + if enc.spaced { + enc.buf.AppendByte(' ') + } +} + +func (enc *jsonEncoder) addElementSeparator() { + last := enc.buf.Len() - 1 + if last < 0 { + return + } + switch enc.buf.Bytes()[last] { + case '{', '[', ':', ',', ' ': + return + default: + enc.buf.AppendByte(',') + if enc.spaced { + enc.buf.AppendByte(' ') + } + } +} + +func (enc *jsonEncoder) appendFloat(val float64, bitSize int) { + enc.addElementSeparator() + switch { + case math.IsNaN(val): + enc.buf.AppendString(`"NaN"`) + case math.IsInf(val, 1): + enc.buf.AppendString(`"+Inf"`) + case math.IsInf(val, -1): + enc.buf.AppendString(`"-Inf"`) + default: + enc.buf.AppendFloat(val, bitSize) + } +} + +// safeAddString JSON-escapes a string and appends it to the internal buffer. +// Unlike the standard library's encoder, it doesn't attempt to protect the +// user from browser vulnerabilities or JSONP-related problems. +func (enc *jsonEncoder) safeAddString(s string) { + safeAppendStringLike( + (*buffer.Buffer).AppendString, + utf8.DecodeRuneInString, + enc.buf, + s, + ) +} + +// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte. +func (enc *jsonEncoder) safeAddByteString(s []byte) { + safeAppendStringLike( + (*buffer.Buffer).AppendBytes, + utf8.DecodeRune, + enc.buf, + s, + ) +} + +// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString. +// It appends a string or byte slice to the buffer, escaping all special characters. +func safeAppendStringLike[S []byte | string]( + // appendTo appends this string-like object to the buffer. + appendTo func(*buffer.Buffer, S), + // decodeRune decodes the next rune from the string-like object + // and returns its value and width in bytes. + decodeRune func(S) (rune, int), + buf *buffer.Buffer, + s S, +) { + // The encoding logic below works by skipping over characters + // that can be safely copied as-is, + // until a character is found that needs special handling. + // At that point, we copy everything we've seen so far, + // and then handle that special character. + // + // last is the index of the last byte that was copied to the buffer. + last := 0 + for i := 0; i < len(s); { + if s[i] >= utf8.RuneSelf { + // Character >= RuneSelf may be part of a multi-byte rune. + // They need to be decoded before we can decide how to handle them. + r, size := decodeRune(s[i:]) + if r != utf8.RuneError || size != 1 { + // No special handling required. + // Skip over this rune and continue. + i += size + continue + } + + // Invalid UTF-8 sequence. + // Replace it with the Unicode replacement character. + appendTo(buf, s[last:i]) + buf.AppendString(`\ufffd`) + + i++ + last = i + } else { + // Character < RuneSelf is a single-byte UTF-8 rune. + if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' { + // No escaping necessary. + // Skip over this character and continue. + i++ + continue + } + + // This character needs to be escaped. + appendTo(buf, s[last:i]) + switch s[i] { + case '\\', '"': + buf.AppendByte('\\') + buf.AppendByte(s[i]) + case '\n': + buf.AppendByte('\\') + buf.AppendByte('n') + case '\r': + buf.AppendByte('\\') + buf.AppendByte('r') + case '\t': + buf.AppendByte('\\') + buf.AppendByte('t') + default: + // Encode bytes < 0x20, except for the escape sequences above. + buf.AppendString(`\u00`) + buf.AppendByte(_hex[s[i]>>4]) + buf.AppendByte(_hex[s[i]&0xF]) + } + + i++ + last = i + } + } + + // add remaining + appendTo(buf, s[last:]) +} diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go new file mode 100644 index 00000000000..05288d6a88e --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go @@ -0,0 +1,54 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "sync" + +type lazyWithCore struct { + Core + sync.Once + fields []Field +} + +// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if +// the logger is written to (or is further chained in a lon-lazy manner). +func NewLazyWith(core Core, fields []Field) Core { + return &lazyWithCore{ + Core: core, + fields: fields, + } +} + +func (d *lazyWithCore) initOnce() { + d.Once.Do(func() { + d.Core = d.Core.With(d.fields) + }) +} + +func (d *lazyWithCore) With(fields []Field) Core { + d.initOnce() + return d.Core.With(fields) +} + +func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry { + d.initOnce() + return d.Core.Check(e, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go new file mode 100644 index 00000000000..e01a2413166 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level.go @@ -0,0 +1,229 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "bytes" + "errors" + "fmt" +) + +var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level") + +// A Level is a logging priority. Higher levels are more important. +type Level int8 + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DebugLevel Level = iota - 1 + // InfoLevel is the default logging priority. + InfoLevel + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WarnLevel + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ErrorLevel + // DPanicLevel logs are particularly important errors. In development the + // logger panics after writing the message. + DPanicLevel + // PanicLevel logs a message, then panics. + PanicLevel + // FatalLevel logs a message, then calls os.Exit(1). + FatalLevel + + _minLevel = DebugLevel + _maxLevel = FatalLevel + + // InvalidLevel is an invalid value for Level. + // + // Core implementations may panic if they see messages of this level. + InvalidLevel = _maxLevel + 1 +) + +// ParseLevel parses a level based on the lower-case or all-caps ASCII +// representation of the log level. If the provided ASCII representation is +// invalid an error is returned. +// +// This is particularly useful when dealing with text input to configure log +// levels. +func ParseLevel(text string) (Level, error) { + var level Level + err := level.UnmarshalText([]byte(text)) + return level, err +} + +type leveledEnabler interface { + LevelEnabler + + Level() Level +} + +// LevelOf reports the minimum enabled log level for the given LevelEnabler +// from Zap's supported log levels, or [InvalidLevel] if none of them are +// enabled. +// +// A LevelEnabler may implement a 'Level() Level' method to override the +// behavior of this function. +// +// func (c *core) Level() Level { +// return c.currentLevel +// } +// +// It is recommended that [Core] implementations that wrap other cores use +// LevelOf to retrieve the level of the wrapped core. For example, +// +// func (c *coreWrapper) Level() Level { +// return zapcore.LevelOf(c.wrappedCore) +// } +func LevelOf(enab LevelEnabler) Level { + if lvler, ok := enab.(leveledEnabler); ok { + return lvler.Level() + } + + for lvl := _minLevel; lvl <= _maxLevel; lvl++ { + if enab.Enabled(lvl) { + return lvl + } + } + + return InvalidLevel +} + +// String returns a lower-case ASCII representation of the log level. +func (l Level) String() string { + switch l { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case DPanicLevel: + return "dpanic" + case PanicLevel: + return "panic" + case FatalLevel: + return "fatal" + default: + return fmt.Sprintf("Level(%d)", l) + } +} + +// CapitalString returns an all-caps ASCII representation of the log level. +func (l Level) CapitalString() string { + // Printing levels in all-caps is common enough that we should export this + // functionality. + switch l { + case DebugLevel: + return "DEBUG" + case InfoLevel: + return "INFO" + case WarnLevel: + return "WARN" + case ErrorLevel: + return "ERROR" + case DPanicLevel: + return "DPANIC" + case PanicLevel: + return "PANIC" + case FatalLevel: + return "FATAL" + default: + return fmt.Sprintf("LEVEL(%d)", l) + } +} + +// MarshalText marshals the Level to text. Note that the text representation +// drops the -Level suffix (see example). +func (l Level) MarshalText() ([]byte, error) { + return []byte(l.String()), nil +} + +// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText +// expects the text representation of a Level to drop the -Level suffix (see +// example). +// +// In particular, this makes it easy to configure logging levels using YAML, +// TOML, or JSON files. +func (l *Level) UnmarshalText(text []byte) error { + if l == nil { + return errUnmarshalNilLevel + } + if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) { + return fmt.Errorf("unrecognized level: %q", text) + } + return nil +} + +func (l *Level) unmarshalText(text []byte) bool { + switch string(text) { + case "debug", "DEBUG": + *l = DebugLevel + case "info", "INFO", "": // make the zero value useful + *l = InfoLevel + case "warn", "WARN": + *l = WarnLevel + case "error", "ERROR": + *l = ErrorLevel + case "dpanic", "DPANIC": + *l = DPanicLevel + case "panic", "PANIC": + *l = PanicLevel + case "fatal", "FATAL": + *l = FatalLevel + default: + return false + } + return true +} + +// Set sets the level for the flag.Value interface. +func (l *Level) Set(s string) error { + return l.UnmarshalText([]byte(s)) +} + +// Get gets the level for the flag.Getter interface. +func (l *Level) Get() interface{} { + return *l +} + +// Enabled returns true if the given level is at or above this level. +func (l Level) Enabled(lvl Level) bool { + return lvl >= l +} + +// LevelEnabler decides whether a given logging level is enabled when logging a +// message. +// +// Enablers are intended to be used to implement deterministic filters; +// concerns like sampling are better implemented as a Core. +// +// Each concrete Level value implements a static LevelEnabler which returns +// true for itself and all higher logging levels. For example WarnLevel.Enabled() +// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and +// FatalLevel, but return false for InfoLevel and DebugLevel. +type LevelEnabler interface { + Enabled(Level) bool +} diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go new file mode 100644 index 00000000000..7af8dadcb37 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/level_strings.go @@ -0,0 +1,46 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/zap/internal/color" + +var ( + _levelToColor = map[Level]color.Color{ + DebugLevel: color.Magenta, + InfoLevel: color.Blue, + WarnLevel: color.Yellow, + ErrorLevel: color.Red, + DPanicLevel: color.Red, + PanicLevel: color.Red, + FatalLevel: color.Red, + } + _unknownLevelColor = color.Red + + _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor)) + _levelToCapitalColorString = make(map[Level]string, len(_levelToColor)) +) + +func init() { + for level, color := range _levelToColor { + _levelToLowercaseColorString[level] = color.Add(level.String()) + _levelToCapitalColorString[level] = color.Add(level.CapitalString()) + } +} diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go new file mode 100644 index 00000000000..c3c55ba0d9c --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/marshaler.go @@ -0,0 +1,61 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +// ObjectMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ObjectMarshaler is only used when zap.Object is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ObjectMarshaler interface { + MarshalLogObject(ObjectEncoder) error +} + +// ObjectMarshalerFunc is a type adapter that turns a function into an +// ObjectMarshaler. +type ObjectMarshalerFunc func(ObjectEncoder) error + +// MarshalLogObject calls the underlying function. +func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error { + return f(enc) +} + +// ArrayMarshaler allows user-defined types to efficiently add themselves to the +// logging context, and to selectively omit information which shouldn't be +// included in logs (e.g., passwords). +// +// Note: ArrayMarshaler is only used when zap.Array is used or when +// passed directly to zap.Any. It is not used when reflection-based +// encoding is used. +type ArrayMarshaler interface { + MarshalLogArray(ArrayEncoder) error +} + +// ArrayMarshalerFunc is a type adapter that turns a function into an +// ArrayMarshaler. +type ArrayMarshalerFunc func(ArrayEncoder) error + +// MarshalLogArray calls the underlying function. +func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error { + return f(enc) +} diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go new file mode 100644 index 00000000000..dfead0829d6 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go @@ -0,0 +1,179 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "time" + +// MapObjectEncoder is an ObjectEncoder backed by a simple +// map[string]interface{}. It's not fast enough for production use, but it's +// helpful in tests. +type MapObjectEncoder struct { + // Fields contains the entire encoded log context. + Fields map[string]interface{} + // cur is a pointer to the namespace we're currently writing to. + cur map[string]interface{} +} + +// NewMapObjectEncoder creates a new map-backed ObjectEncoder. +func NewMapObjectEncoder() *MapObjectEncoder { + m := make(map[string]interface{}) + return &MapObjectEncoder{ + Fields: m, + cur: m, + } +} + +// AddArray implements ObjectEncoder. +func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error { + arr := &sliceArrayEncoder{elems: make([]interface{}, 0)} + err := v.MarshalLogArray(arr) + m.cur[key] = arr.elems + return err +} + +// AddObject implements ObjectEncoder. +func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error { + newMap := NewMapObjectEncoder() + m.cur[k] = newMap.Fields + return v.MarshalLogObject(newMap) +} + +// AddBinary implements ObjectEncoder. +func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v } + +// AddByteString implements ObjectEncoder. +func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) } + +// AddBool implements ObjectEncoder. +func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v } + +// AddDuration implements ObjectEncoder. +func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v } + +// AddComplex128 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v } + +// AddComplex64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v } + +// AddFloat64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v } + +// AddFloat32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v } + +// AddInt implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v } + +// AddInt64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v } + +// AddInt32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v } + +// AddInt16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v } + +// AddInt8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v } + +// AddString implements ObjectEncoder. +func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v } + +// AddTime implements ObjectEncoder. +func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v } + +// AddUint implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v } + +// AddUint64 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v } + +// AddUint32 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v } + +// AddUint16 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v } + +// AddUint8 implements ObjectEncoder. +func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v } + +// AddUintptr implements ObjectEncoder. +func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v } + +// AddReflected implements ObjectEncoder. +func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error { + m.cur[k] = v + return nil +} + +// OpenNamespace implements ObjectEncoder. +func (m *MapObjectEncoder) OpenNamespace(k string) { + ns := make(map[string]interface{}) + m.cur[k] = ns + m.cur = ns +} + +// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like +// the MapObjectEncoder, it's not designed for production use. +type sliceArrayEncoder struct { + elems []interface{} +} + +func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error { + enc := &sliceArrayEncoder{} + err := v.MarshalLogArray(enc) + s.elems = append(s.elems, enc.elems) + return err +} + +func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error { + m := NewMapObjectEncoder() + err := v.MarshalLogObject(m) + s.elems = append(s.elems, m.Fields) + return err +} + +func (s *sliceArrayEncoder) AppendReflected(v interface{}) error { + s.elems = append(s.elems, v) + return nil +} + +func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) } +func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) } +func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) } diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go new file mode 100644 index 00000000000..8746360eca6 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go @@ -0,0 +1,41 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "encoding/json" + "io" +) + +// ReflectedEncoder serializes log fields that can't be serialized with Zap's +// JSON encoder. These have the ReflectType field type. +// Use EncoderConfig.NewReflectedEncoder to set this. +type ReflectedEncoder interface { + // Encode encodes and writes to the underlying data stream. + Encode(interface{}) error +} + +func defaultReflectedEncoder(w io.Writer) ReflectedEncoder { + enc := json.NewEncoder(w) + // For consistency with our custom JSON encoder. + enc.SetEscapeHTML(false) + return enc +} diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go new file mode 100644 index 00000000000..b7c093a4f2d --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/sampler.go @@ -0,0 +1,229 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "sync/atomic" + "time" +) + +const ( + _numLevels = _maxLevel - _minLevel + 1 + _countersPerLevel = 4096 +) + +type counter struct { + resetAt atomic.Int64 + counter atomic.Uint64 +} + +type counters [_numLevels][_countersPerLevel]counter + +func newCounters() *counters { + return &counters{} +} + +func (cs *counters) get(lvl Level, key string) *counter { + i := lvl - _minLevel + j := fnv32a(key) % _countersPerLevel + return &cs[i][j] +} + +// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc +func fnv32a(s string) uint32 { + const ( + offset32 = 2166136261 + prime32 = 16777619 + ) + hash := uint32(offset32) + for i := 0; i < len(s); i++ { + hash ^= uint32(s[i]) + hash *= prime32 + } + return hash +} + +func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 { + tn := t.UnixNano() + resetAfter := c.resetAt.Load() + if resetAfter > tn { + return c.counter.Add(1) + } + + c.counter.Store(1) + + newResetAfter := tn + tick.Nanoseconds() + if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) { + // We raced with another goroutine trying to reset, and it also reset + // the counter to 1, so we need to reincrement the counter. + return c.counter.Add(1) + } + + return 1 +} + +// SamplingDecision is a decision represented as a bit field made by sampler. +// More decisions may be added in the future. +type SamplingDecision uint32 + +const ( + // LogDropped indicates that the Sampler dropped a log entry. + LogDropped SamplingDecision = 1 << iota + // LogSampled indicates that the Sampler sampled a log entry. + LogSampled +) + +// optionFunc wraps a func so it satisfies the SamplerOption interface. +type optionFunc func(*sampler) + +func (f optionFunc) apply(s *sampler) { + f(s) +} + +// SamplerOption configures a Sampler. +type SamplerOption interface { + apply(*sampler) +} + +// nopSamplingHook is the default hook used by sampler. +func nopSamplingHook(Entry, SamplingDecision) {} + +// SamplerHook registers a function which will be called when Sampler makes a +// decision. +// +// This hook may be used to get visibility into the performance of the sampler. +// For example, use it to track metrics of dropped versus sampled logs. +// +// var dropped atomic.Int64 +// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) { +// if dec&zapcore.LogDropped > 0 { +// dropped.Inc() +// } +// }) +func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption { + return optionFunc(func(s *sampler) { + s.hook = hook + }) +} + +// NewSamplerWithOptions creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// For example, +// +// core = NewSamplerWithOptions(core, time.Second, 10, 5) +// +// This will log the first 10 log entries with the same level and message +// in a one second interval as-is. Following that, it will allow through +// every 5th log entry with the same level and message in that interval. +// +// If thereafter is zero, the Core will drop all log entries after the first N +// in that interval. +// +// Sampler can be configured to report sampling decisions with the SamplerHook +// option. +// +// Keep in mind that Zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core { + s := &sampler{ + Core: core, + tick: tick, + counts: newCounters(), + first: uint64(first), + thereafter: uint64(thereafter), + hook: nopSamplingHook, + } + for _, opt := range opts { + opt.apply(s) + } + + return s +} + +type sampler struct { + Core + + counts *counters + tick time.Duration + first, thereafter uint64 + hook func(Entry, SamplingDecision) +} + +var ( + _ Core = (*sampler)(nil) + _ leveledEnabler = (*sampler)(nil) +) + +// NewSampler creates a Core that samples incoming entries, which +// caps the CPU and I/O load of logging while attempting to preserve a +// representative subset of your logs. +// +// Zap samples by logging the first N entries with a given level and message +// each tick. If more Entries with the same level and message are seen during +// the same interval, every Mth message is logged and the rest are dropped. +// +// Keep in mind that zap's sampling implementation is optimized for speed over +// absolute precision; under load, each tick may be slightly over- or +// under-sampled. +// +// Deprecated: use NewSamplerWithOptions. +func NewSampler(core Core, tick time.Duration, first, thereafter int) Core { + return NewSamplerWithOptions(core, tick, first, thereafter) +} + +func (s *sampler) Level() Level { + return LevelOf(s.Core) +} + +func (s *sampler) With(fields []Field) Core { + return &sampler{ + Core: s.Core.With(fields), + tick: s.tick, + counts: s.counts, + first: s.first, + thereafter: s.thereafter, + hook: s.hook, + } +} + +func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + if !s.Enabled(ent.Level) { + return ce + } + + if ent.Level >= _minLevel && ent.Level <= _maxLevel { + counter := s.counts.get(ent.Level, ent.Message) + n := counter.IncCheckReset(ent.Time, s.tick) + if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) { + s.hook(ent, LogDropped) + return ce + } + s.hook(ent, LogSampled) + } + return s.Core.Check(ent, ce) +} diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go new file mode 100644 index 00000000000..9bb32f05576 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/tee.go @@ -0,0 +1,96 @@ +// Copyright (c) 2016-2022 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import "go.uber.org/multierr" + +type multiCore []Core + +var ( + _ leveledEnabler = multiCore(nil) + _ Core = multiCore(nil) +) + +// NewTee creates a Core that duplicates log entries into two or more +// underlying Cores. +// +// Calling it with a single Core returns the input unchanged, and calling +// it with no input returns a no-op Core. +func NewTee(cores ...Core) Core { + switch len(cores) { + case 0: + return NewNopCore() + case 1: + return cores[0] + default: + return multiCore(cores) + } +} + +func (mc multiCore) With(fields []Field) Core { + clone := make(multiCore, len(mc)) + for i := range mc { + clone[i] = mc[i].With(fields) + } + return clone +} + +func (mc multiCore) Level() Level { + minLvl := _maxLevel // mc is never empty + for i := range mc { + if lvl := LevelOf(mc[i]); lvl < minLvl { + minLvl = lvl + } + } + return minLvl +} + +func (mc multiCore) Enabled(lvl Level) bool { + for i := range mc { + if mc[i].Enabled(lvl) { + return true + } + } + return false +} + +func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry { + for i := range mc { + ce = mc[i].Check(ent, ce) + } + return ce +} + +func (mc multiCore) Write(ent Entry, fields []Field) error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Write(ent, fields)) + } + return err +} + +func (mc multiCore) Sync() error { + var err error + for i := range mc { + err = multierr.Append(err, mc[i].Sync()) + } + return err +} diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go new file mode 100644 index 00000000000..d4a1af3d078 --- /dev/null +++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go @@ -0,0 +1,122 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zapcore + +import ( + "io" + "sync" + + "go.uber.org/multierr" +) + +// A WriteSyncer is an io.Writer that can also flush any buffered data. Note +// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer. +type WriteSyncer interface { + io.Writer + Sync() error +} + +// AddSync converts an io.Writer to a WriteSyncer. It attempts to be +// intelligent: if the concrete type of the io.Writer implements WriteSyncer, +// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync. +func AddSync(w io.Writer) WriteSyncer { + switch w := w.(type) { + case WriteSyncer: + return w + default: + return writerWrapper{w} + } +} + +type lockedWriteSyncer struct { + sync.Mutex + ws WriteSyncer +} + +// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In +// particular, *os.Files must be locked before use. +func Lock(ws WriteSyncer) WriteSyncer { + if _, ok := ws.(*lockedWriteSyncer); ok { + // no need to layer on another lock + return ws + } + return &lockedWriteSyncer{ws: ws} +} + +func (s *lockedWriteSyncer) Write(bs []byte) (int, error) { + s.Lock() + n, err := s.ws.Write(bs) + s.Unlock() + return n, err +} + +func (s *lockedWriteSyncer) Sync() error { + s.Lock() + err := s.ws.Sync() + s.Unlock() + return err +} + +type writerWrapper struct { + io.Writer +} + +func (w writerWrapper) Sync() error { + return nil +} + +type multiWriteSyncer []WriteSyncer + +// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes +// and sync calls, much like io.MultiWriter. +func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer { + if len(ws) == 1 { + return ws[0] + } + return multiWriteSyncer(ws) +} + +// See https://golang.org/src/io/multi.go +// When not all underlying syncers write the same number of bytes, +// the smallest number is returned even though Write() is called on +// all of them. +func (ws multiWriteSyncer) Write(p []byte) (int, error) { + var writeErr error + nWritten := 0 + for _, w := range ws { + n, err := w.Write(p) + writeErr = multierr.Append(writeErr, err) + if nWritten == 0 && n != 0 { + nWritten = n + } else if n < nWritten { + nWritten = n + } + } + return nWritten, writeErr +} + +func (ws multiWriteSyncer) Sync() error { + var err error + for _, w := range ws { + err = multierr.Append(err, w.Sync()) + } + return err +} diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go new file mode 100644 index 00000000000..fc311609081 --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/base64.go @@ -0,0 +1,35 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bcrypt + +import "encoding/base64" + +const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + +var bcEncoding = base64.NewEncoding(alphabet) + +func base64Encode(src []byte) []byte { + n := bcEncoding.EncodedLen(len(src)) + dst := make([]byte, n) + bcEncoding.Encode(dst, src) + for dst[n-1] == '=' { + n-- + } + return dst[:n] +} + +func base64Decode(src []byte) ([]byte, error) { + numOfEquals := 4 - (len(src) % 4) + for i := 0; i < numOfEquals; i++ { + src = append(src, '=') + } + + dst := make([]byte, bcEncoding.DecodedLen(len(src))) + n, err := bcEncoding.Decode(dst, src) + if err != nil { + return nil, err + } + return dst[:n], nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go new file mode 100644 index 00000000000..5577c0f939a --- /dev/null +++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go @@ -0,0 +1,304 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing +// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf +package bcrypt // import "golang.org/x/crypto/bcrypt" + +// The code is a port of Provos and Mazières's C implementation. +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "io" + "strconv" + + "golang.org/x/crypto/blowfish" +) + +const ( + MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword + MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword + DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword +) + +// The error returned from CompareHashAndPassword when a password and hash do +// not match. +var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") + +// The error returned from CompareHashAndPassword when a hash is too short to +// be a bcrypt hash. +var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") + +// The error returned from CompareHashAndPassword when a hash was created with +// a bcrypt algorithm newer than this implementation. +type HashVersionTooNewError byte + +func (hv HashVersionTooNewError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) +} + +// The error returned from CompareHashAndPassword when a hash starts with something other than '$' +type InvalidHashPrefixError byte + +func (ih InvalidHashPrefixError) Error() string { + return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) +} + +type InvalidCostError int + +func (ic InvalidCostError) Error() string { + return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost) +} + +const ( + majorVersion = '2' + minorVersion = 'a' + maxSaltSize = 16 + maxCryptedHashSize = 23 + encodedSaltSize = 22 + encodedHashSize = 31 + minHashSize = 59 +) + +// magicCipherData is an IV for the 64 Blowfish encryption calls in +// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. +var magicCipherData = []byte{ + 0x4f, 0x72, 0x70, 0x68, + 0x65, 0x61, 0x6e, 0x42, + 0x65, 0x68, 0x6f, 0x6c, + 0x64, 0x65, 0x72, 0x53, + 0x63, 0x72, 0x79, 0x44, + 0x6f, 0x75, 0x62, 0x74, +} + +type hashed struct { + hash []byte + salt []byte + cost int // allowed range is MinCost to MaxCost + major byte + minor byte +} + +// ErrPasswordTooLong is returned when the password passed to +// GenerateFromPassword is too long (i.e. > 72 bytes). +var ErrPasswordTooLong = errors.New("bcrypt: password length exceeds 72 bytes") + +// GenerateFromPassword returns the bcrypt hash of the password at the given +// cost. If the cost given is less than MinCost, the cost will be set to +// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, +// to compare the returned hashed password with its cleartext version. +// GenerateFromPassword does not accept passwords longer than 72 bytes, which +// is the longest password bcrypt will operate on. +func GenerateFromPassword(password []byte, cost int) ([]byte, error) { + if len(password) > 72 { + return nil, ErrPasswordTooLong + } + p, err := newFromPassword(password, cost) + if err != nil { + return nil, err + } + return p.Hash(), nil +} + +// CompareHashAndPassword compares a bcrypt hashed password with its possible +// plaintext equivalent. Returns nil on success, or an error on failure. +func CompareHashAndPassword(hashedPassword, password []byte) error { + p, err := newFromHash(hashedPassword) + if err != nil { + return err + } + + otherHash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return err + } + + otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} + if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { + return nil + } + + return ErrMismatchedHashAndPassword +} + +// Cost returns the hashing cost used to create the given hashed +// password. When, in the future, the hashing cost of a password system needs +// to be increased in order to adjust for greater computational power, this +// function allows one to establish which passwords need to be updated. +func Cost(hashedPassword []byte) (int, error) { + p, err := newFromHash(hashedPassword) + if err != nil { + return 0, err + } + return p.cost, nil +} + +func newFromPassword(password []byte, cost int) (*hashed, error) { + if cost < MinCost { + cost = DefaultCost + } + p := new(hashed) + p.major = majorVersion + p.minor = minorVersion + + err := checkCost(cost) + if err != nil { + return nil, err + } + p.cost = cost + + unencodedSalt := make([]byte, maxSaltSize) + _, err = io.ReadFull(rand.Reader, unencodedSalt) + if err != nil { + return nil, err + } + + p.salt = base64Encode(unencodedSalt) + hash, err := bcrypt(password, p.cost, p.salt) + if err != nil { + return nil, err + } + p.hash = hash + return p, err +} + +func newFromHash(hashedSecret []byte) (*hashed, error) { + if len(hashedSecret) < minHashSize { + return nil, ErrHashTooShort + } + p := new(hashed) + n, err := p.decodeVersion(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + n, err = p.decodeCost(hashedSecret) + if err != nil { + return nil, err + } + hashedSecret = hashedSecret[n:] + + // The "+2" is here because we'll have to append at most 2 '=' to the salt + // when base64 decoding it in expensiveBlowfishSetup(). + p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) + copy(p.salt, hashedSecret[:encodedSaltSize]) + + hashedSecret = hashedSecret[encodedSaltSize:] + p.hash = make([]byte, len(hashedSecret)) + copy(p.hash, hashedSecret) + + return p, nil +} + +func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { + cipherData := make([]byte, len(magicCipherData)) + copy(cipherData, magicCipherData) + + c, err := expensiveBlowfishSetup(password, uint32(cost), salt) + if err != nil { + return nil, err + } + + for i := 0; i < 24; i += 8 { + for j := 0; j < 64; j++ { + c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) + } + } + + // Bug compatibility with C bcrypt implementations. We only encode 23 of + // the 24 bytes encrypted. + hsh := base64Encode(cipherData[:maxCryptedHashSize]) + return hsh, nil +} + +func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { + csalt, err := base64Decode(salt) + if err != nil { + return nil, err + } + + // Bug compatibility with C bcrypt implementations. They use the trailing + // NULL in the key string during expansion. + // We copy the key to prevent changing the underlying array. + ckey := append(key[:len(key):len(key)], 0) + + c, err := blowfish.NewSaltedCipher(ckey, csalt) + if err != nil { + return nil, err + } + + var i, rounds uint64 + rounds = 1 << cost + for i = 0; i < rounds; i++ { + blowfish.ExpandKey(ckey, c) + blowfish.ExpandKey(csalt, c) + } + + return c, nil +} + +func (p *hashed) Hash() []byte { + arr := make([]byte, 60) + arr[0] = '$' + arr[1] = p.major + n := 2 + if p.minor != 0 { + arr[2] = p.minor + n = 3 + } + arr[n] = '$' + n++ + copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) + n += 2 + arr[n] = '$' + n++ + copy(arr[n:], p.salt) + n += encodedSaltSize + copy(arr[n:], p.hash) + n += encodedHashSize + return arr[:n] +} + +func (p *hashed) decodeVersion(sbytes []byte) (int, error) { + if sbytes[0] != '$' { + return -1, InvalidHashPrefixError(sbytes[0]) + } + if sbytes[1] > majorVersion { + return -1, HashVersionTooNewError(sbytes[1]) + } + p.major = sbytes[1] + n := 3 + if sbytes[2] != '$' { + p.minor = sbytes[2] + n++ + } + return n, nil +} + +// sbytes should begin where decodeVersion left off. +func (p *hashed) decodeCost(sbytes []byte) (int, error) { + cost, err := strconv.Atoi(string(sbytes[0:2])) + if err != nil { + return -1, err + } + err = checkCost(cost) + if err != nil { + return -1, err + } + p.cost = cost + return 3, nil +} + +func (p *hashed) String() string { + return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) +} + +func checkCost(cost int) error { + if cost < MinCost || cost > MaxCost { + return InvalidCostError(cost) + } + return nil +} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go new file mode 100644 index 00000000000..f3c3242a047 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -0,0 +1,173 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package secretbox encrypts and authenticates small messages. + +Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with +secret-key cryptography. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +Messages should be small because: + +1. The whole message needs to be held in memory to be processed. + +2. Using large messages pressures implementations on small machines to decrypt +and process plaintext before authenticating it. This is very dangerous, and +this API does not allow it, but a protocol that uses excessive message sizes +might present some implementations with no other choice. + +3. Fixed overheads will be sufficiently amortised by messages as small as 8KB. + +4. Performance may be improved by working with messages that fit into data caches. + +Thus large amounts of data should be chunked so that each message is small. +(Each message still needs a unique nonce.) If in doubt, 16KB is a reasonable +chunk size. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. +*/ +package secretbox // import "golang.org/x/crypto/nacl/secretbox" + +import ( + "golang.org/x/crypto/internal/alias" + "golang.org/x/crypto/internal/poly1305" + "golang.org/x/crypto/salsa20/salsa" +) + +// Overhead is the number of bytes of overhead when boxing a message. +const Overhead = poly1305.TagSize + +// setup produces a sub-key and Salsa20 counter given a nonce and key. +func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { + // We use XSalsa20 for encryption so first we need to generate a + // key and nonce with HSalsa20. + var hNonce [16]byte + copy(hNonce[:], nonce[:]) + salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) + + // The final 8 bytes of the original nonce form the new nonce. + copy(counter[:], nonce[16:]) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// must not overlap message. The key and nonce pair must be unique for each +// distinct message and the output will be Overhead bytes longer than message. +func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + + ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) + if alias.AnyOverlap(out, message) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of message with the keystream generated from + // the first block. + firstMessageBlock := message + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + + tagOut := out + out = out[poly1305.TagSize:] + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + message = message[len(firstMessageBlock):] + ciphertext := out + out = out[len(firstMessageBlock):] + + // Now encrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, message, &counter, &subKey) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, ciphertext, &poly1305Key) + copy(tagOut, tag[:]) + + return ret +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { + if len(box) < Overhead { + return nil, false + } + + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + var tag [poly1305.TagSize]byte + copy(tag[:], box) + + if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { + return nil, false + } + + ret, out := sliceForAppend(out, len(box)-Overhead) + if alias.AnyOverlap(out, box) { + panic("nacl: invalid buffer overlap") + } + + // We XOR up to 32 bytes of box with the keystream generated from + // the first block. + box = box[Overhead:] + firstMessageBlock := box + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + + box = box[len(firstMessageBlock):] + out = out[len(firstMessageBlock):] + + // Now decrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, box, &counter, &subKey) + + return ret, true +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 00000000000..904b57e01d7 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go new file mode 100644 index 00000000000..233b8b62cc2 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/golang.org/x/crypto/pkcs12/crypto.go new file mode 100644 index 00000000000..96f4a1a56ec --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if !bytes.Equal(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts an object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff --git a/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/golang.org/x/crypto/pkcs12/errors.go new file mode 100644 index 00000000000..7377ce6fb2b --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/errors.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go new file mode 100644 index 00000000000..05de9cc2cdc --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -0,0 +1,268 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" + "math/bits" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = bits.RotateLeft16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = bits.RotateLeft16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = bits.RotateLeft16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = bits.RotateLeft16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = bits.RotateLeft16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = bits.RotateLeft16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = bits.RotateLeft16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = bits.RotateLeft16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = bits.RotateLeft16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = bits.RotateLeft16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = bits.RotateLeft16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = bits.RotateLeft16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = bits.RotateLeft16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = bits.RotateLeft16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = bits.RotateLeft16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = bits.RotateLeft16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = bits.RotateLeft16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = bits.RotateLeft16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = bits.RotateLeft16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = bits.RotateLeft16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + // unmix r3 + r3 = bits.RotateLeft16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = bits.RotateLeft16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = bits.RotateLeft16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = bits.RotateLeft16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} diff --git a/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/golang.org/x/crypto/pkcs12/mac.go new file mode 100644 index 00000000000..5f38aa7de83 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/mac.go @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go new file mode 100644 index 00000000000..5c419d41e32 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pbkdf.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go new file mode 100644 index 00000000000..3a89bdb3e39 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -0,0 +1,360 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +// +// This package is frozen. If it's missing functionality you need, consider +// an alternative like software.sslmate.com/src/go-pkcs12. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) + + errUnknownAttributeOID = errors.New("pkcs12: unknown attribute OID") +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ToPEM converts all "safe bags" contained in pfxData to PEM blocks. +// Unknown attributes are discarded. +// +// Note that although the returned PEM blocks for private keys have type +// "PRIVATE KEY", the bytes are not encoded according to PKCS #8, but according +// to PKCS #1 for RSA keys and SEC 1 for ECDSA keys. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + if err != nil { + return nil, err + } + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err == errUnknownAttributeOID { + continue + } + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errUnknownAttributeOID + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData; if there are more use ToPEM instead. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + return nil, nil, err + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff --git a/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/golang.org/x/crypto/pkcs12/safebags.go new file mode 100644 index 00000000000..def1f7b98d7 --- /dev/null +++ b/vendor/golang.org/x/crypto/pkcs12/safebags.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go new file mode 100644 index 00000000000..3fd05b27516 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -0,0 +1,146 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package salsa provides low-level access to functions in the Salsa family. +package salsa // import "golang.org/x/crypto/salsa20/salsa" + +import "math/bits" + +// Sigma is the Salsa20 constant for 256-bit keys. +var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} + +// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte +// key k, and 16-byte constant c, and puts the result into the 32-byte array +// out. +func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + for i := 0; i < 20; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x5) + out[5] = byte(x5 >> 8) + out[6] = byte(x5 >> 16) + out[7] = byte(x5 >> 24) + + out[8] = byte(x10) + out[9] = byte(x10 >> 8) + out[10] = byte(x10 >> 16) + out[11] = byte(x10 >> 24) + + out[12] = byte(x15) + out[13] = byte(x15 >> 8) + out[14] = byte(x15 >> 16) + out[15] = byte(x15 >> 24) + + out[16] = byte(x6) + out[17] = byte(x6 >> 8) + out[18] = byte(x6 >> 16) + out[19] = byte(x6 >> 24) + + out[20] = byte(x7) + out[21] = byte(x7 >> 8) + out[22] = byte(x7 >> 16) + out[23] = byte(x7 >> 24) + + out[24] = byte(x8) + out[25] = byte(x8 >> 8) + out[26] = byte(x8 >> 16) + out[27] = byte(x8 >> 24) + + out[28] = byte(x9) + out[29] = byte(x9 >> 8) + out[30] = byte(x9 >> 16) + out[31] = byte(x9 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go new file mode 100644 index 00000000000..7ec7bb39bc0 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go @@ -0,0 +1,201 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +import "math/bits" + +// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts +// the result into the 64-byte array out. The input and output may be the same array. +func Core208(out *[64]byte, in *[64]byte) { + j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 + j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 + j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 + j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 + j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 + j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 + j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 + j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 + j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 + j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 + j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 + j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go new file mode 100644 index 00000000000..e76b44fe59e --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +package salsa + +//go:noescape + +// salsa2020XORKeyStream is implemented in salsa20_amd64.s. +func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + if len(in) == 0 { + return + } + _ = out[len(in)-1] + salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s new file mode 100644 index 00000000000..fcce0234b69 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.s @@ -0,0 +1,880 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && !purego && gc + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) +// This needs up to 64 bytes at 360(R12); hence the non-obvious frame size. +TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment + MOVQ out+0(FP),DI + MOVQ in+8(FP),SI + MOVQ n+16(FP),DX + MOVQ nonce+24(FP),CX + MOVQ key+32(FP),R8 + + MOVQ SP,R12 + ADDQ $31, R12 + ANDQ $~31, R12 + + MOVQ DX,R9 + MOVQ CX,DX + MOVQ R8,R10 + CMPQ R9,$0 + JBE DONE + START: + MOVL 20(R10),CX + MOVL 0(R10),R8 + MOVL 0(DX),AX + MOVL 16(R10),R11 + MOVL CX,0(R12) + MOVL R8, 4 (R12) + MOVL AX, 8 (R12) + MOVL R11, 12 (R12) + MOVL 8(DX),CX + MOVL 24(R10),R8 + MOVL 4(R10),AX + MOVL 4(DX),R11 + MOVL CX,16(R12) + MOVL R8, 20 (R12) + MOVL AX, 24 (R12) + MOVL R11, 28 (R12) + MOVL 12(DX),CX + MOVL 12(R10),DX + MOVL 28(R10),R8 + MOVL 8(R10),AX + MOVL DX,32(R12) + MOVL CX, 36 (R12) + MOVL R8, 40 (R12) + MOVL AX, 44 (R12) + MOVQ $1634760805,DX + MOVQ $857760878,CX + MOVQ $2036477234,R8 + MOVQ $1797285236,AX + MOVL DX,48(R12) + MOVL CX, 52 (R12) + MOVL R8, 56 (R12) + MOVL AX, 60 (R12) + CMPQ R9,$256 + JB BYTESBETWEEN1AND255 + MOVOA 48(R12),X0 + PSHUFL $0X55,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X3 + PSHUFL $0X00,X0,X0 + MOVOA X1,64(R12) + MOVOA X2,80(R12) + MOVOA X3,96(R12) + MOVOA X0,112(R12) + MOVOA 0(R12),X0 + PSHUFL $0XAA,X0,X1 + PSHUFL $0XFF,X0,X2 + PSHUFL $0X00,X0,X3 + PSHUFL $0X55,X0,X0 + MOVOA X1,128(R12) + MOVOA X2,144(R12) + MOVOA X3,160(R12) + MOVOA X0,176(R12) + MOVOA 16(R12),X0 + PSHUFL $0XFF,X0,X1 + PSHUFL $0X55,X0,X2 + PSHUFL $0XAA,X0,X0 + MOVOA X1,192(R12) + MOVOA X2,208(R12) + MOVOA X0,224(R12) + MOVOA 32(R12),X0 + PSHUFL $0X00,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X0 + MOVOA X1,240(R12) + MOVOA X2,256(R12) + MOVOA X0,272(R12) + BYTESATLEAST256: + MOVL 16(R12),DX + MOVL 36 (R12),CX + MOVL DX,288(R12) + MOVL CX,304(R12) + SHLQ $32,CX + ADDQ CX,DX + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 292 (R12) + MOVL CX, 308 (R12) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 296 (R12) + MOVL CX, 312 (R12) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 300 (R12) + MOVL CX, 316 (R12) + ADDQ $1,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX,16(R12) + MOVL CX, 36 (R12) + MOVQ R9,352(R12) + MOVQ $20,DX + MOVOA 64(R12),X0 + MOVOA 80(R12),X1 + MOVOA 96(R12),X2 + MOVOA 256(R12),X3 + MOVOA 272(R12),X4 + MOVOA 128(R12),X5 + MOVOA 144(R12),X6 + MOVOA 176(R12),X7 + MOVOA 192(R12),X8 + MOVOA 208(R12),X9 + MOVOA 224(R12),X10 + MOVOA 304(R12),X11 + MOVOA 112(R12),X12 + MOVOA 160(R12),X13 + MOVOA 240(R12),X14 + MOVOA 288(R12),X15 + MAINLOOP1: + MOVOA X1,320(R12) + MOVOA X2,336(R12) + MOVOA X13,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X14 + PSRLL $25,X2 + PXOR X2,X14 + MOVOA X7,X1 + PADDL X0,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X11 + PSRLL $25,X2 + PXOR X2,X11 + MOVOA X12,X1 + PADDL X14,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X15 + PSRLL $23,X2 + PXOR X2,X15 + MOVOA X0,X1 + PADDL X11,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X9 + PSRLL $23,X2 + PXOR X2,X9 + MOVOA X14,X1 + PADDL X15,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X13 + PSRLL $19,X2 + PXOR X2,X13 + MOVOA X11,X1 + PADDL X9,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X7 + PSRLL $19,X2 + PXOR X2,X7 + MOVOA X15,X1 + PADDL X13,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA 320(R12),X1 + MOVOA X12,320(R12) + MOVOA X9,X2 + PADDL X7,X2 + MOVOA X2,X12 + PSLLL $18,X2 + PXOR X2,X0 + PSRLL $14,X12 + PXOR X12,X0 + MOVOA X5,X2 + PADDL X1,X2 + MOVOA X2,X12 + PSLLL $7,X2 + PXOR X2,X3 + PSRLL $25,X12 + PXOR X12,X3 + MOVOA 336(R12),X2 + MOVOA X0,336(R12) + MOVOA X6,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X4 + PSRLL $25,X12 + PXOR X12,X4 + MOVOA X1,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X10 + PSRLL $23,X12 + PXOR X12,X10 + MOVOA X2,X0 + PADDL X4,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X8 + PSRLL $23,X12 + PXOR X12,X8 + MOVOA X3,X0 + PADDL X10,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X5 + PSRLL $19,X12 + PXOR X12,X5 + MOVOA X4,X0 + PADDL X8,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X6 + PSRLL $19,X12 + PXOR X12,X6 + MOVOA X10,X0 + PADDL X5,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA 320(R12),X0 + MOVOA X1,320(R12) + MOVOA X4,X1 + PADDL X0,X1 + MOVOA X1,X12 + PSLLL $7,X1 + PXOR X1,X7 + PSRLL $25,X12 + PXOR X12,X7 + MOVOA X8,X1 + PADDL X6,X1 + MOVOA X1,X12 + PSLLL $18,X1 + PXOR X1,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 336(R12),X12 + MOVOA X2,336(R12) + MOVOA X14,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X5 + PSRLL $25,X2 + PXOR X2,X5 + MOVOA X0,X1 + PADDL X7,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X10 + PSRLL $23,X2 + PXOR X2,X10 + MOVOA X12,X1 + PADDL X5,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X8 + PSRLL $23,X2 + PXOR X2,X8 + MOVOA X7,X1 + PADDL X10,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X4 + PSRLL $19,X2 + PXOR X2,X4 + MOVOA X5,X1 + PADDL X8,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X14 + PSRLL $19,X2 + PXOR X2,X14 + MOVOA X10,X1 + PADDL X4,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X0 + PSRLL $14,X2 + PXOR X2,X0 + MOVOA 320(R12),X1 + MOVOA X0,320(R12) + MOVOA X8,X0 + PADDL X14,X0 + MOVOA X0,X2 + PSLLL $18,X0 + PXOR X0,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA X11,X0 + PADDL X1,X0 + MOVOA X0,X2 + PSLLL $7,X0 + PXOR X0,X6 + PSRLL $25,X2 + PXOR X2,X6 + MOVOA 336(R12),X2 + MOVOA X12,336(R12) + MOVOA X3,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X13 + PSRLL $25,X12 + PXOR X12,X13 + MOVOA X1,X0 + PADDL X6,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X15 + PSRLL $23,X12 + PXOR X12,X15 + MOVOA X2,X0 + PADDL X13,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X9 + PSRLL $23,X12 + PXOR X12,X9 + MOVOA X6,X0 + PADDL X15,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X11 + PSRLL $19,X12 + PXOR X12,X11 + MOVOA X13,X0 + PADDL X9,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X3 + PSRLL $19,X12 + PXOR X12,X3 + MOVOA X15,X0 + PADDL X11,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA X9,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 320(R12),X12 + MOVOA 336(R12),X0 + SUBQ $2,DX + JA MAINLOOP1 + PADDL 112(R12),X12 + PADDL 176(R12),X7 + PADDL 224(R12),X10 + PADDL 272(R12),X4 + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 0(SI),DX + XORL 4(SI),CX + XORL 8(SI),R8 + XORL 12(SI),R9 + MOVL DX,0(DI) + MOVL CX,4(DI) + MOVL R8,8(DI) + MOVL R9,12(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 64(SI),DX + XORL 68(SI),CX + XORL 72(SI),R8 + XORL 76(SI),R9 + MOVL DX,64(DI) + MOVL CX,68(DI) + MOVL R8,72(DI) + MOVL R9,76(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 128(SI),DX + XORL 132(SI),CX + XORL 136(SI),R8 + XORL 140(SI),R9 + MOVL DX,128(DI) + MOVL CX,132(DI) + MOVL R8,136(DI) + MOVL R9,140(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + XORL 192(SI),DX + XORL 196(SI),CX + XORL 200(SI),R8 + XORL 204(SI),R9 + MOVL DX,192(DI) + MOVL CX,196(DI) + MOVL R8,200(DI) + MOVL R9,204(DI) + PADDL 240(R12),X14 + PADDL 64(R12),X0 + PADDL 128(R12),X5 + PADDL 192(R12),X8 + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 16(SI),DX + XORL 20(SI),CX + XORL 24(SI),R8 + XORL 28(SI),R9 + MOVL DX,16(DI) + MOVL CX,20(DI) + MOVL R8,24(DI) + MOVL R9,28(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 80(SI),DX + XORL 84(SI),CX + XORL 88(SI),R8 + XORL 92(SI),R9 + MOVL DX,80(DI) + MOVL CX,84(DI) + MOVL R8,88(DI) + MOVL R9,92(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 144(SI),DX + XORL 148(SI),CX + XORL 152(SI),R8 + XORL 156(SI),R9 + MOVL DX,144(DI) + MOVL CX,148(DI) + MOVL R8,152(DI) + MOVL R9,156(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + XORL 208(SI),DX + XORL 212(SI),CX + XORL 216(SI),R8 + XORL 220(SI),R9 + MOVL DX,208(DI) + MOVL CX,212(DI) + MOVL R8,216(DI) + MOVL R9,220(DI) + PADDL 288(R12),X15 + PADDL 304(R12),X11 + PADDL 80(R12),X1 + PADDL 144(R12),X6 + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 32(SI),DX + XORL 36(SI),CX + XORL 40(SI),R8 + XORL 44(SI),R9 + MOVL DX,32(DI) + MOVL CX,36(DI) + MOVL R8,40(DI) + MOVL R9,44(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 96(SI),DX + XORL 100(SI),CX + XORL 104(SI),R8 + XORL 108(SI),R9 + MOVL DX,96(DI) + MOVL CX,100(DI) + MOVL R8,104(DI) + MOVL R9,108(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 160(SI),DX + XORL 164(SI),CX + XORL 168(SI),R8 + XORL 172(SI),R9 + MOVL DX,160(DI) + MOVL CX,164(DI) + MOVL R8,168(DI) + MOVL R9,172(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + XORL 224(SI),DX + XORL 228(SI),CX + XORL 232(SI),R8 + XORL 236(SI),R9 + MOVL DX,224(DI) + MOVL CX,228(DI) + MOVL R8,232(DI) + MOVL R9,236(DI) + PADDL 160(R12),X13 + PADDL 208(R12),X9 + PADDL 256(R12),X3 + PADDL 96(R12),X2 + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 48(SI),DX + XORL 52(SI),CX + XORL 56(SI),R8 + XORL 60(SI),R9 + MOVL DX,48(DI) + MOVL CX,52(DI) + MOVL R8,56(DI) + MOVL R9,60(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 112(SI),DX + XORL 116(SI),CX + XORL 120(SI),R8 + XORL 124(SI),R9 + MOVL DX,112(DI) + MOVL CX,116(DI) + MOVL R8,120(DI) + MOVL R9,124(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 176(SI),DX + XORL 180(SI),CX + XORL 184(SI),R8 + XORL 188(SI),R9 + MOVL DX,176(DI) + MOVL CX,180(DI) + MOVL R8,184(DI) + MOVL R9,188(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + XORL 240(SI),DX + XORL 244(SI),CX + XORL 248(SI),R8 + XORL 252(SI),R9 + MOVL DX,240(DI) + MOVL CX,244(DI) + MOVL R8,248(DI) + MOVL R9,252(DI) + MOVQ 352(R12),R9 + SUBQ $256,R9 + ADDQ $256,SI + ADDQ $256,DI + CMPQ R9,$256 + JAE BYTESATLEAST256 + CMPQ R9,$0 + JBE DONE + BYTESBETWEEN1AND255: + CMPQ R9,$64 + JAE NOCOPY + MOVQ DI,DX + LEAQ 360(R12),DI + MOVQ R9,CX + REP; MOVSB + LEAQ 360(R12),DI + LEAQ 360(R12),SI + NOCOPY: + MOVQ R9,352(R12) + MOVOA 48(R12),X0 + MOVOA 0(R12),X1 + MOVOA 16(R12),X2 + MOVOA 32(R12),X3 + MOVOA X1,X4 + MOVQ $20,CX + MAINLOOP2: + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + SUBQ $4,CX + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PXOR X7,X7 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + JA MAINLOOP2 + PADDL 48(R12),X0 + PADDL 0(R12),X1 + PADDL 16(R12),X2 + PADDL 32(R12),X3 + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 0(SI),CX + XORL 48(SI),R8 + XORL 32(SI),R9 + XORL 16(SI),AX + MOVL CX,0(DI) + MOVL R8,48(DI) + MOVL R9,32(DI) + MOVL AX,16(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 20(SI),CX + XORL 4(SI),R8 + XORL 52(SI),R9 + XORL 36(SI),AX + MOVL CX,20(DI) + MOVL R8,4(DI) + MOVL R9,52(DI) + MOVL AX,36(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 40(SI),CX + XORL 24(SI),R8 + XORL 8(SI),R9 + XORL 56(SI),AX + MOVL CX,40(DI) + MOVL R8,24(DI) + MOVL R9,8(DI) + MOVL AX,56(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + XORL 60(SI),CX + XORL 44(SI),R8 + XORL 28(SI),R9 + XORL 12(SI),AX + MOVL CX,60(DI) + MOVL R8,44(DI) + MOVL R9,28(DI) + MOVL AX,12(DI) + MOVQ 352(R12),R9 + MOVL 16(R12),CX + MOVL 36 (R12),R8 + ADDQ $1,CX + SHLQ $32,R8 + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $32,R8 + MOVL CX,16(R12) + MOVL R8, 36 (R12) + CMPQ R9,$64 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI,SI + MOVQ DX,DI + MOVQ R9,CX + REP; MOVSB + BYTESATLEAST64: + DONE: + RET + BYTESATLEAST65: + SUBQ $64,R9 + ADDQ $64,DI + ADDQ $64,SI + JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go new file mode 100644 index 00000000000..9448760f26f --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_noasm.go @@ -0,0 +1,14 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc + +package salsa + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out must overlap entirely or not at all. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + genericXORKeyStream(out, in, counter, key) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go new file mode 100644 index 00000000000..e5cdb9a25be --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go @@ -0,0 +1,233 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +import "math/bits" + +const rounds = 20 + +// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, +// and 16-byte constant c, and puts the result into 64-byte array out. +func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < rounds; i += 2 { + u := x0 + x12 + x4 ^= bits.RotateLeft32(u, 7) + u = x4 + x0 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x4 + x12 ^= bits.RotateLeft32(u, 13) + u = x12 + x8 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x1 + x9 ^= bits.RotateLeft32(u, 7) + u = x9 + x5 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x9 + x1 ^= bits.RotateLeft32(u, 13) + u = x1 + x13 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x6 + x14 ^= bits.RotateLeft32(u, 7) + u = x14 + x10 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x14 + x6 ^= bits.RotateLeft32(u, 13) + u = x6 + x2 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x11 + x3 ^= bits.RotateLeft32(u, 7) + u = x3 + x15 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x3 + x11 ^= bits.RotateLeft32(u, 13) + u = x11 + x7 + x15 ^= bits.RotateLeft32(u, 18) + + u = x0 + x3 + x1 ^= bits.RotateLeft32(u, 7) + u = x1 + x0 + x2 ^= bits.RotateLeft32(u, 9) + u = x2 + x1 + x3 ^= bits.RotateLeft32(u, 13) + u = x3 + x2 + x0 ^= bits.RotateLeft32(u, 18) + + u = x5 + x4 + x6 ^= bits.RotateLeft32(u, 7) + u = x6 + x5 + x7 ^= bits.RotateLeft32(u, 9) + u = x7 + x6 + x4 ^= bits.RotateLeft32(u, 13) + u = x4 + x7 + x5 ^= bits.RotateLeft32(u, 18) + + u = x10 + x9 + x11 ^= bits.RotateLeft32(u, 7) + u = x11 + x10 + x8 ^= bits.RotateLeft32(u, 9) + u = x8 + x11 + x9 ^= bits.RotateLeft32(u, 13) + u = x9 + x8 + x10 ^= bits.RotateLeft32(u, 18) + + u = x15 + x14 + x12 ^= bits.RotateLeft32(u, 7) + u = x12 + x15 + x13 ^= bits.RotateLeft32(u, 9) + u = x13 + x12 + x14 ^= bits.RotateLeft32(u, 13) + u = x14 + x13 + x15 ^= bits.RotateLeft32(u, 18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} + +// genericXORKeyStream is the generic implementation of XORKeyStream to be used +// when no assembly implementation is available. +func genericXORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + var block [64]byte + var counterCopy [16]byte + copy(counterCopy[:], counter[:]) + + for len(in) >= 64 { + core(&block, &counterCopy, key, &Sigma) + for i, x := range block { + out[i] = in[i] ^ x + } + u := uint32(1) + for i := 8; i < 16; i++ { + u += uint32(counterCopy[i]) + counterCopy[i] = byte(u) + u >>= 8 + } + in = in[64:] + out = out[64:] + } + + if len(in) > 0 { + core(&block, &counterCopy, key, &Sigma) + for i, v := range in { + out[i] = v ^ block[i] + } + } +} diff --git a/vendor/golang.org/x/crypto/scrypt/scrypt.go b/vendor/golang.org/x/crypto/scrypt/scrypt.go new file mode 100644 index 00000000000..c971a99fa67 --- /dev/null +++ b/vendor/golang.org/x/crypto/scrypt/scrypt.go @@ -0,0 +1,212 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scrypt implements the scrypt key derivation function as defined in +// Colin Percival's paper "Stronger Key Derivation via Sequential Memory-Hard +// Functions" (https://www.tarsnap.com/scrypt/scrypt.pdf). +package scrypt // import "golang.org/x/crypto/scrypt" + +import ( + "crypto/sha256" + "encoding/binary" + "errors" + "math/bits" + + "golang.org/x/crypto/pbkdf2" +) + +const maxInt = int(^uint(0) >> 1) + +// blockCopy copies n numbers from src into dst. +func blockCopy(dst, src []uint32, n int) { + copy(dst, src[:n]) +} + +// blockXOR XORs numbers from dst with n numbers from src. +func blockXOR(dst, src []uint32, n int) { + for i, v := range src[:n] { + dst[i] ^= v + } +} + +// salsaXOR applies Salsa20/8 to the XOR of 16 numbers from tmp and in, +// and puts the result into both tmp and out. +func salsaXOR(tmp *[16]uint32, in, out []uint32) { + w0 := tmp[0] ^ in[0] + w1 := tmp[1] ^ in[1] + w2 := tmp[2] ^ in[2] + w3 := tmp[3] ^ in[3] + w4 := tmp[4] ^ in[4] + w5 := tmp[5] ^ in[5] + w6 := tmp[6] ^ in[6] + w7 := tmp[7] ^ in[7] + w8 := tmp[8] ^ in[8] + w9 := tmp[9] ^ in[9] + w10 := tmp[10] ^ in[10] + w11 := tmp[11] ^ in[11] + w12 := tmp[12] ^ in[12] + w13 := tmp[13] ^ in[13] + w14 := tmp[14] ^ in[14] + w15 := tmp[15] ^ in[15] + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := w0, w1, w2, w3, w4, w5, w6, w7, w8 + x9, x10, x11, x12, x13, x14, x15 := w9, w10, w11, w12, w13, w14, w15 + + for i := 0; i < 8; i += 2 { + x4 ^= bits.RotateLeft32(x0+x12, 7) + x8 ^= bits.RotateLeft32(x4+x0, 9) + x12 ^= bits.RotateLeft32(x8+x4, 13) + x0 ^= bits.RotateLeft32(x12+x8, 18) + + x9 ^= bits.RotateLeft32(x5+x1, 7) + x13 ^= bits.RotateLeft32(x9+x5, 9) + x1 ^= bits.RotateLeft32(x13+x9, 13) + x5 ^= bits.RotateLeft32(x1+x13, 18) + + x14 ^= bits.RotateLeft32(x10+x6, 7) + x2 ^= bits.RotateLeft32(x14+x10, 9) + x6 ^= bits.RotateLeft32(x2+x14, 13) + x10 ^= bits.RotateLeft32(x6+x2, 18) + + x3 ^= bits.RotateLeft32(x15+x11, 7) + x7 ^= bits.RotateLeft32(x3+x15, 9) + x11 ^= bits.RotateLeft32(x7+x3, 13) + x15 ^= bits.RotateLeft32(x11+x7, 18) + + x1 ^= bits.RotateLeft32(x0+x3, 7) + x2 ^= bits.RotateLeft32(x1+x0, 9) + x3 ^= bits.RotateLeft32(x2+x1, 13) + x0 ^= bits.RotateLeft32(x3+x2, 18) + + x6 ^= bits.RotateLeft32(x5+x4, 7) + x7 ^= bits.RotateLeft32(x6+x5, 9) + x4 ^= bits.RotateLeft32(x7+x6, 13) + x5 ^= bits.RotateLeft32(x4+x7, 18) + + x11 ^= bits.RotateLeft32(x10+x9, 7) + x8 ^= bits.RotateLeft32(x11+x10, 9) + x9 ^= bits.RotateLeft32(x8+x11, 13) + x10 ^= bits.RotateLeft32(x9+x8, 18) + + x12 ^= bits.RotateLeft32(x15+x14, 7) + x13 ^= bits.RotateLeft32(x12+x15, 9) + x14 ^= bits.RotateLeft32(x13+x12, 13) + x15 ^= bits.RotateLeft32(x14+x13, 18) + } + x0 += w0 + x1 += w1 + x2 += w2 + x3 += w3 + x4 += w4 + x5 += w5 + x6 += w6 + x7 += w7 + x8 += w8 + x9 += w9 + x10 += w10 + x11 += w11 + x12 += w12 + x13 += w13 + x14 += w14 + x15 += w15 + + out[0], tmp[0] = x0, x0 + out[1], tmp[1] = x1, x1 + out[2], tmp[2] = x2, x2 + out[3], tmp[3] = x3, x3 + out[4], tmp[4] = x4, x4 + out[5], tmp[5] = x5, x5 + out[6], tmp[6] = x6, x6 + out[7], tmp[7] = x7, x7 + out[8], tmp[8] = x8, x8 + out[9], tmp[9] = x9, x9 + out[10], tmp[10] = x10, x10 + out[11], tmp[11] = x11, x11 + out[12], tmp[12] = x12, x12 + out[13], tmp[13] = x13, x13 + out[14], tmp[14] = x14, x14 + out[15], tmp[15] = x15, x15 +} + +func blockMix(tmp *[16]uint32, in, out []uint32, r int) { + blockCopy(tmp[:], in[(2*r-1)*16:], 16) + for i := 0; i < 2*r; i += 2 { + salsaXOR(tmp, in[i*16:], out[i*8:]) + salsaXOR(tmp, in[i*16+16:], out[i*8+r*16:]) + } +} + +func integer(b []uint32, r int) uint64 { + j := (2*r - 1) * 16 + return uint64(b[j]) | uint64(b[j+1])<<32 +} + +func smix(b []byte, r, N int, v, xy []uint32) { + var tmp [16]uint32 + R := 32 * r + x := xy + y := xy[R:] + + j := 0 + for i := 0; i < R; i++ { + x[i] = binary.LittleEndian.Uint32(b[j:]) + j += 4 + } + for i := 0; i < N; i += 2 { + blockCopy(v[i*R:], x, R) + blockMix(&tmp, x, y, r) + + blockCopy(v[(i+1)*R:], y, R) + blockMix(&tmp, y, x, r) + } + for i := 0; i < N; i += 2 { + j := int(integer(x, r) & uint64(N-1)) + blockXOR(x, v[j*R:], R) + blockMix(&tmp, x, y, r) + + j = int(integer(y, r) & uint64(N-1)) + blockXOR(y, v[j*R:], R) + blockMix(&tmp, y, x, r) + } + j = 0 + for _, v := range x[:R] { + binary.LittleEndian.PutUint32(b[j:], v) + j += 4 + } +} + +// Key derives a key from the password, salt, and cost parameters, returning +// a byte slice of length keyLen that can be used as cryptographic key. +// +// N is a CPU/memory cost parameter, which must be a power of two greater than 1. +// r and p must satisfy r * p < 2³⁰. If the parameters do not satisfy the +// limits, the function returns a nil byte slice and an error. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// dk, err := scrypt.Key([]byte("some password"), salt, 32768, 8, 1, 32) +// +// The recommended parameters for interactive logins as of 2017 are N=32768, r=8 +// and p=1. The parameters N, r, and p should be increased as memory latency and +// CPU parallelism increases; consider setting N to the highest power of 2 you +// can derive within 100 milliseconds. Remember to get a good random salt. +func Key(password, salt []byte, N, r, p, keyLen int) ([]byte, error) { + if N <= 1 || N&(N-1) != 0 { + return nil, errors.New("scrypt: N must be > 1 and a power of 2") + } + if uint64(r)*uint64(p) >= 1<<30 || r > maxInt/128/p || r > maxInt/256 || N > maxInt/128/r { + return nil, errors.New("scrypt: parameters are too large") + } + + xy := make([]uint32, 64*r) + v := make([]uint32, 32*N*r) + b := pbkdf2.Key(password, salt, 1, p*128*r, sha256.New) + + for i := 0; i < p; i++ { + smix(b[i*128*r:], r, N, v, xy) + } + + return pbkdf2.Key(password, b, 1, keyLen, sha256.New), nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 00000000000..a4d1919a9e7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,76 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Deprecated: this package moved to golang.org/x/term. +package terminal + +import ( + "io" + + "golang.org/x/term" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes = term.EscapeCodes + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal = term.Terminal + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return term.NewTerminal(c, prompt) +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = term.ErrPasteIndicator + +// State contains the state of a terminal. +type State = term.State + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return term.IsTerminal(fd) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return term.ReadPassword(fd) +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return term.MakeRaw(fd) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, oldState *State) error { + return term.Restore(fd, oldState) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return term.GetState(fd) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return term.GetSize(fd) +} diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go new file mode 100644 index 00000000000..6404aaf157d --- /dev/null +++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go @@ -0,0 +1,367 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpproxy provides support for HTTP proxy determination +// based on environment variables, as provided by net/http's +// ProxyFromEnvironment function. +// +// The API is not subject to the Go 1 compatibility promise and may change at +// any time. +package httpproxy + +import ( + "errors" + "fmt" + "net" + "net/url" + "os" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +// Config holds configuration for HTTP proxy settings. See +// FromEnvironment for details. +type Config struct { + // HTTPProxy represents the value of the HTTP_PROXY or + // http_proxy environment variable. It will be used as the proxy + // URL for HTTP requests unless overridden by NoProxy. + HTTPProxy string + + // HTTPSProxy represents the HTTPS_PROXY or https_proxy + // environment variable. It will be used as the proxy URL for + // HTTPS requests unless overridden by NoProxy. + HTTPSProxy string + + // NoProxy represents the NO_PROXY or no_proxy environment + // variable. It specifies a string that contains comma-separated values + // specifying hosts that should be excluded from proxying. Each value is + // represented by an IP address prefix (1.2.3.4), an IP address prefix in + // CIDR notation (1.2.3.4/8), a domain name, or a special DNS label (*). + // An IP address prefix and domain name can also include a literal port + // number (1.2.3.4:80). + // A domain name matches that name and all subdomains. A domain name with + // a leading "." matches subdomains only. For example "foo.com" matches + // "foo.com" and "bar.foo.com"; ".y.com" matches "x.y.com" but not "y.com". + // A single asterisk (*) indicates that no proxying should be done. + // A best effort is made to parse the string and errors are + // ignored. + NoProxy string + + // CGI holds whether the current process is running + // as a CGI handler (FromEnvironment infers this from the + // presence of a REQUEST_METHOD environment variable). + // When this is set, ProxyForURL will return an error + // when HTTPProxy applies, because a client could be + // setting HTTP_PROXY maliciously. See https://golang.org/s/cgihttpproxy. + CGI bool +} + +// config holds the parsed configuration for HTTP proxy settings. +type config struct { + // Config represents the original configuration as defined above. + Config + + // httpsProxy is the parsed URL of the HTTPSProxy if defined. + httpsProxy *url.URL + + // httpProxy is the parsed URL of the HTTPProxy if defined. + httpProxy *url.URL + + // ipMatchers represent all values in the NoProxy that are IP address + // prefixes or an IP address in CIDR notation. + ipMatchers []matcher + + // domainMatchers represent all values in the NoProxy that are a domain + // name or hostname & domain name + domainMatchers []matcher +} + +// FromEnvironment returns a Config instance populated from the +// environment variables HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the +// lowercase versions thereof). +// +// The environment values may be either a complete URL or a +// "host[:port]", in which case the "http" scheme is assumed. An error +// is returned if the value is a different form. +func FromEnvironment() *Config { + return &Config{ + HTTPProxy: getEnvAny("HTTP_PROXY", "http_proxy"), + HTTPSProxy: getEnvAny("HTTPS_PROXY", "https_proxy"), + NoProxy: getEnvAny("NO_PROXY", "no_proxy"), + CGI: os.Getenv("REQUEST_METHOD") != "", + } +} + +func getEnvAny(names ...string) string { + for _, n := range names { + if val := os.Getenv(n); val != "" { + return val + } + } + return "" +} + +// ProxyFunc returns a function that determines the proxy URL to use for +// a given request URL. Changing the contents of cfg will not affect +// proxy functions created earlier. +// +// A nil URL and nil error are returned if no proxy is defined in the +// environment, or a proxy should not be used for the given request, as +// defined by NO_PROXY. +// +// As a special case, if req.URL.Host is "localhost" or a loopback address +// (with or without a port number), then a nil URL and nil error will be returned. +func (cfg *Config) ProxyFunc() func(reqURL *url.URL) (*url.URL, error) { + // Preprocess the Config settings for more efficient evaluation. + cfg1 := &config{ + Config: *cfg, + } + cfg1.init() + return cfg1.proxyForURL +} + +func (cfg *config) proxyForURL(reqURL *url.URL) (*url.URL, error) { + var proxy *url.URL + if reqURL.Scheme == "https" { + proxy = cfg.httpsProxy + } else if reqURL.Scheme == "http" { + proxy = cfg.httpProxy + if proxy != nil && cfg.CGI { + return nil, errors.New("refusing to use HTTP_PROXY value in CGI environment; see golang.org/s/cgihttpproxy") + } + } + if proxy == nil { + return nil, nil + } + if !cfg.useProxy(canonicalAddr(reqURL)) { + return nil, nil + } + + return proxy, nil +} + +func parseProxy(proxy string) (*url.URL, error) { + if proxy == "" { + return nil, nil + } + + proxyURL, err := url.Parse(proxy) + if err != nil || proxyURL.Scheme == "" || proxyURL.Host == "" { + // proxy was bogus. Try prepending "http://" to it and + // see if that parses correctly. If not, we fall + // through and complain about the original one. + if proxyURL, err := url.Parse("http://" + proxy); err == nil { + return proxyURL, nil + } + } + if err != nil { + return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err) + } + return proxyURL, nil +} + +// useProxy reports whether requests to addr should use a proxy, +// according to the NO_PROXY or no_proxy environment variable. +// addr is always a canonicalAddr with a host and port. +func (cfg *config) useProxy(addr string) bool { + if len(addr) == 0 { + return true + } + host, port, err := net.SplitHostPort(addr) + if err != nil { + return false + } + if host == "localhost" { + return false + } + ip := net.ParseIP(host) + if ip != nil { + if ip.IsLoopback() { + return false + } + } + + addr = strings.ToLower(strings.TrimSpace(host)) + + if ip != nil { + for _, m := range cfg.ipMatchers { + if m.match(addr, port, ip) { + return false + } + } + } + for _, m := range cfg.domainMatchers { + if m.match(addr, port, ip) { + return false + } + } + return true +} + +func (c *config) init() { + if parsed, err := parseProxy(c.HTTPProxy); err == nil { + c.httpProxy = parsed + } + if parsed, err := parseProxy(c.HTTPSProxy); err == nil { + c.httpsProxy = parsed + } + + for _, p := range strings.Split(c.NoProxy, ",") { + p = strings.ToLower(strings.TrimSpace(p)) + if len(p) == 0 { + continue + } + + if p == "*" { + c.ipMatchers = []matcher{allMatch{}} + c.domainMatchers = []matcher{allMatch{}} + return + } + + // IPv4/CIDR, IPv6/CIDR + if _, pnet, err := net.ParseCIDR(p); err == nil { + c.ipMatchers = append(c.ipMatchers, cidrMatch{cidr: pnet}) + continue + } + + // IPv4:port, [IPv6]:port + phost, pport, err := net.SplitHostPort(p) + if err == nil { + if len(phost) == 0 { + // There is no host part, likely the entry is malformed; ignore. + continue + } + if phost[0] == '[' && phost[len(phost)-1] == ']' { + phost = phost[1 : len(phost)-1] + } + } else { + phost = p + } + // IPv4, IPv6 + if pip := net.ParseIP(phost); pip != nil { + c.ipMatchers = append(c.ipMatchers, ipMatch{ip: pip, port: pport}) + continue + } + + if len(phost) == 0 { + // There is no host part, likely the entry is malformed; ignore. + continue + } + + // domain.com or domain.com:80 + // foo.com matches bar.foo.com + // .domain.com or .domain.com:port + // *.domain.com or *.domain.com:port + if strings.HasPrefix(phost, "*.") { + phost = phost[1:] + } + matchHost := false + if phost[0] != '.' { + matchHost = true + phost = "." + phost + } + if v, err := idnaASCII(phost); err == nil { + phost = v + } + c.domainMatchers = append(c.domainMatchers, domainMatch{host: phost, port: pport, matchHost: matchHost}) + } +} + +var portMap = map[string]string{ + "http": "80", + "https": "443", + "socks5": "1080", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +func canonicalAddr(url *url.URL) string { + addr := url.Hostname() + if v, err := idnaASCII(addr); err == nil { + addr = v + } + port := url.Port() + if port == "" { + port = portMap[url.Scheme] + } + return net.JoinHostPort(addr, port) +} + +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +func idnaASCII(v string) (string, error) { + // TODO: Consider removing this check after verifying performance is okay. + // Right now punycode verification, length checks, context checks, and the + // permissible character tests are all omitted. It also prevents the ToASCII + // call from salvaging an invalid IDN, when possible. As a result it may be + // possible to have two IDNs that appear identical to the user where the + // ASCII-only version causes an error downstream whereas the non-ASCII + // version does not. + // Note that for correct ASCII IDNs ToASCII will only do considerably more + // work, but it will not cause an allocation. + if isASCII(v) { + return v, nil + } + return idna.Lookup.ToASCII(v) +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// matcher represents the matching rule for a given value in the NO_PROXY list +type matcher interface { + // match returns true if the host and optional port or ip and optional port + // are allowed + match(host, port string, ip net.IP) bool +} + +// allMatch matches on all possible inputs +type allMatch struct{} + +func (a allMatch) match(host, port string, ip net.IP) bool { + return true +} + +type cidrMatch struct { + cidr *net.IPNet +} + +func (m cidrMatch) match(host, port string, ip net.IP) bool { + return m.cidr.Contains(ip) +} + +type ipMatch struct { + ip net.IP + port string +} + +func (m ipMatch) match(host, port string, ip net.IP) bool { + if m.ip.Equal(ip) { + return m.port == "" || m.port == port + } + return false +} + +type domainMatch struct { + host string + port string + + matchHost bool +} + +func (m domainMatch) match(host, port string, ip net.IP) bool { + if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) { + return m.port == "" || m.port == port + } + return false +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index e2b298d8593..43557ab7e97 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -1564,6 +1564,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if size > remainSize { hdec.SetEmitEnabled(false) mh.Truncated = true + remainSize = 0 return } remainSize -= size @@ -1576,6 +1577,36 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { var hc headersOrContinuation = hf for { frag := hc.HeaderBlockFragment() + + // Avoid parsing large amounts of headers that we will then discard. + // If the sender exceeds the max header list size by too much, + // skip parsing the fragment and close the connection. + // + // "Too much" is either any CONTINUATION frame after we've already + // exceeded the max header list size (in which case remainSize is 0), + // or a frame whose encoded size is more than twice the remaining + // header list bytes we're willing to accept. + if int64(len(frag)) > int64(2*remainSize) { + if VerboseLogs { + log.Printf("http2: header list too large") + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + + // Also close the connection after any CONTINUATION frame following an + // invalid header, since we stop tracking the size of the headers after + // an invalid one. + if invalid != nil { + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + // It would be nice to send a RST_STREAM before sending the GOAWAY, + // but the structure of the server's frame writer makes this difficult. + return nil, ConnectionError(ErrCodeProtocol) + } + if _, err := hdec.Write(frag); err != nil { return nil, ConnectionError(ErrCodeCompression) } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 684d984fd96..3b9f06b9624 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -77,7 +77,10 @@ func (p *pipe) Read(d []byte) (n int, err error) { } } -var errClosedPipeWrite = errors.New("write on closed buffer") +var ( + errClosedPipeWrite = errors.New("write on closed buffer") + errUninitializedPipeWrite = errors.New("write on uninitialized buffer") +) // Write copies bytes from p into the buffer and wakes a reader. // It is an error to write more data than the buffer can hold. @@ -91,6 +94,12 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil || p.breakErr != nil { return 0, errClosedPipeWrite } + // pipe.setBuffer is never invoked, leaving the buffer uninitialized. + // We shouldn't try to write to an uninitialized pipe, + // but returning an error is better than panicking. + if p.b == nil { + return 0, errUninitializedPipeWrite + } return p.b.Write(d) } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index ae94c6408d5..ce2e8b40eee 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -124,6 +124,7 @@ type Server struct { // IdleTimeout specifies how long until idle clients should be // closed with a GOAWAY frame. PING frames are not considered // activity for the purposes of IdleTimeout. + // If zero or negative, there is no timeout. IdleTimeout time.Duration // MaxUploadBufferPerConnection is the size of the initial flow @@ -434,7 +435,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // passes the connection off to us with the deadline already set. // Write deadlines are set per stream in serverConn.newStream. // Disarm the net.Conn write deadline here. - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -924,7 +925,7 @@ func (sc *serverConn) serve() { sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } @@ -1637,7 +1638,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { delete(sc.streams, st.id) if len(sc.streams) == 0 { sc.setConnState(http.StateIdle) - if sc.srv.IdleTimeout != 0 { + if sc.srv.IdleTimeout > 0 { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { @@ -2017,7 +2018,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // similar to how the http1 server works. Here it's // technically more like the http1 Server's ReadHeaderTimeout // (in Go 1.8), though. That's a more sane option anyway. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } @@ -2038,7 +2039,7 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { // Disable any read deadline set by the net/http package // prior to the upgrade. - if sc.hs.ReadTimeout != 0 { + if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) } @@ -2116,7 +2117,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.srv.initialStreamRecvWindowSize()) - if sc.hs.WriteTimeout != 0 { + if sc.hs.WriteTimeout > 0 { st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } diff --git a/vendor/golang.org/x/net/http2/testsync.go b/vendor/golang.org/x/net/http2/testsync.go new file mode 100644 index 00000000000..61075bd16d3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/testsync.go @@ -0,0 +1,331 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package http2 + +import ( + "context" + "sync" + "time" +) + +// testSyncHooks coordinates goroutines in tests. +// +// For example, a call to ClientConn.RoundTrip involves several goroutines, including: +// - the goroutine running RoundTrip; +// - the clientStream.doRequest goroutine, which writes the request; and +// - the clientStream.readLoop goroutine, which reads the response. +// +// Using testSyncHooks, a test can start a RoundTrip and identify when all these goroutines +// are blocked waiting for some condition such as reading the Request.Body or waiting for +// flow control to become available. +// +// The testSyncHooks also manage timers and synthetic time in tests. +// This permits us to, for example, start a request and cause it to time out waiting for +// response headers without resorting to time.Sleep calls. +type testSyncHooks struct { + // active/inactive act as a mutex and condition variable. + // + // - neither chan contains a value: testSyncHooks is locked. + // - active contains a value: unlocked, and at least one goroutine is not blocked + // - inactive contains a value: unlocked, and all goroutines are blocked + active chan struct{} + inactive chan struct{} + + // goroutine counts + total int // total goroutines + condwait map[*sync.Cond]int // blocked in sync.Cond.Wait + blocked []*testBlockedGoroutine // otherwise blocked + + // fake time + now time.Time + timers []*fakeTimer + + // Transport testing: Report various events. + newclientconn func(*ClientConn) + newstream func(*clientStream) +} + +// testBlockedGoroutine is a blocked goroutine. +type testBlockedGoroutine struct { + f func() bool // blocked until f returns true + ch chan struct{} // closed when unblocked +} + +func newTestSyncHooks() *testSyncHooks { + h := &testSyncHooks{ + active: make(chan struct{}, 1), + inactive: make(chan struct{}, 1), + condwait: map[*sync.Cond]int{}, + } + h.inactive <- struct{}{} + h.now = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + return h +} + +// lock acquires the testSyncHooks mutex. +func (h *testSyncHooks) lock() { + select { + case <-h.active: + case <-h.inactive: + } +} + +// waitInactive waits for all goroutines to become inactive. +func (h *testSyncHooks) waitInactive() { + for { + <-h.inactive + if !h.unlock() { + break + } + } +} + +// unlock releases the testSyncHooks mutex. +// It reports whether any goroutines are active. +func (h *testSyncHooks) unlock() (active bool) { + // Look for a blocked goroutine which can be unblocked. + blocked := h.blocked[:0] + unblocked := false + for _, b := range h.blocked { + if !unblocked && b.f() { + unblocked = true + close(b.ch) + } else { + blocked = append(blocked, b) + } + } + h.blocked = blocked + + // Count goroutines blocked on condition variables. + condwait := 0 + for _, count := range h.condwait { + condwait += count + } + + if h.total > condwait+len(blocked) { + h.active <- struct{}{} + return true + } else { + h.inactive <- struct{}{} + return false + } +} + +// goRun starts a new goroutine. +func (h *testSyncHooks) goRun(f func()) { + h.lock() + h.total++ + h.unlock() + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + f() + }() +} + +// blockUntil indicates that a goroutine is blocked waiting for some condition to become true. +// It waits until f returns true before proceeding. +// +// Example usage: +// +// h.blockUntil(func() bool { +// // Is the context done yet? +// select { +// case <-ctx.Done(): +// default: +// return false +// } +// return true +// }) +// // Wait for the context to become done. +// <-ctx.Done() +// +// The function f passed to blockUntil must be non-blocking and idempotent. +func (h *testSyncHooks) blockUntil(f func() bool) { + if f() { + return + } + ch := make(chan struct{}) + h.lock() + h.blocked = append(h.blocked, &testBlockedGoroutine{ + f: f, + ch: ch, + }) + h.unlock() + <-ch +} + +// broadcast is sync.Cond.Broadcast. +func (h *testSyncHooks) condBroadcast(cond *sync.Cond) { + h.lock() + delete(h.condwait, cond) + h.unlock() + cond.Broadcast() +} + +// broadcast is sync.Cond.Wait. +func (h *testSyncHooks) condWait(cond *sync.Cond) { + h.lock() + h.condwait[cond]++ + h.unlock() +} + +// newTimer creates a new fake timer. +func (h *testSyncHooks) newTimer(d time.Duration) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + c: make(chan time.Time), + } + h.timers = append(h.timers, t) + return t +} + +// afterFunc creates a new fake AfterFunc timer. +func (h *testSyncHooks) afterFunc(d time.Duration, f func()) timer { + h.lock() + defer h.unlock() + t := &fakeTimer{ + hooks: h, + when: h.now.Add(d), + f: f, + } + h.timers = append(h.timers, t) + return t +} + +func (h *testSyncHooks) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + ctx, cancel := context.WithCancel(ctx) + t := h.afterFunc(d, cancel) + return ctx, func() { + t.Stop() + cancel() + } +} + +func (h *testSyncHooks) timeUntilEvent() time.Duration { + h.lock() + defer h.unlock() + var next time.Time + for _, t := range h.timers { + if next.IsZero() || t.when.Before(next) { + next = t.when + } + } + if d := next.Sub(h.now); d > 0 { + return d + } + return 0 +} + +// advance advances time and causes synthetic timers to fire. +func (h *testSyncHooks) advance(d time.Duration) { + h.lock() + defer h.unlock() + h.now = h.now.Add(d) + timers := h.timers[:0] + for _, t := range h.timers { + t := t // remove after go.mod depends on go1.22 + t.mu.Lock() + switch { + case t.when.After(h.now): + timers = append(timers, t) + case t.when.IsZero(): + // stopped timer + default: + t.when = time.Time{} + if t.c != nil { + close(t.c) + } + if t.f != nil { + h.total++ + go func() { + defer func() { + h.lock() + h.total-- + h.unlock() + }() + t.f() + }() + } + } + t.mu.Unlock() + } + h.timers = timers +} + +// A timer wraps a time.Timer, or a synthetic equivalent in tests. +// Unlike time.Timer, timer is single-use: The timer channel is closed when the timer expires. +type timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +// timeTimer implements timer using real time. +type timeTimer struct { + t *time.Timer + c chan time.Time +} + +// newTimeTimer creates a new timer using real time. +func newTimeTimer(d time.Duration) timer { + ch := make(chan time.Time) + t := time.AfterFunc(d, func() { + close(ch) + }) + return &timeTimer{t, ch} +} + +// newTimeAfterFunc creates an AfterFunc timer using real time. +func newTimeAfterFunc(d time.Duration, f func()) timer { + return &timeTimer{ + t: time.AfterFunc(d, f), + } +} + +func (t timeTimer) C() <-chan time.Time { return t.c } +func (t timeTimer) Stop() bool { return t.t.Stop() } +func (t timeTimer) Reset(d time.Duration) bool { return t.t.Reset(d) } + +// fakeTimer implements timer using fake time. +type fakeTimer struct { + hooks *testSyncHooks + + mu sync.Mutex + when time.Time // when the timer will fire + c chan time.Time // closed when the timer fires; mutually exclusive with f + f func() // called when the timer fires; mutually exclusive with c +} + +func (t *fakeTimer) C() <-chan time.Time { return t.c } + +func (t *fakeTimer) Stop() bool { + t.mu.Lock() + defer t.mu.Unlock() + stopped := t.when.IsZero() + t.when = time.Time{} + return stopped +} + +func (t *fakeTimer) Reset(d time.Duration) bool { + if t.c != nil || t.f == nil { + panic("fakeTimer only supports Reset on AfterFunc timers") + } + t.mu.Lock() + defer t.mu.Unlock() + t.hooks.lock() + defer t.hooks.unlock() + active := !t.when.IsZero() + t.when = t.hooks.now.Add(d) + if !active { + t.hooks.timers = append(t.hooks.timers, t) + } + return active +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index c2a5b44b3d6..ce375c8c753 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -147,6 +147,12 @@ type Transport struct { // waiting for their turn. StrictMaxConcurrentStreams bool + // IdleConnTimeout is the maximum amount of time an idle + // (keep-alive) connection will remain idle before closing + // itself. + // Zero means no limit. + IdleConnTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using ping // frame will be carried out if no frame is received on the connection. // Note that a ping response will is considered a received frame, so if @@ -178,6 +184,8 @@ type Transport struct { connPoolOnce sync.Once connPoolOrDef ClientConnPool // non-nil version of ConnPool + + syncHooks *testSyncHooks } func (t *Transport) maxHeaderListSize() uint32 { @@ -302,7 +310,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer *time.Timer + idleTimer timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -344,6 +352,60 @@ type ClientConn struct { werr error // first write error that has occurred hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder + + syncHooks *testSyncHooks // can be nil +} + +// Hook points used for testing. +// Outside of tests, cc.syncHooks is nil and these all have minimal implementations. +// Inside tests, see the testSyncHooks function docs. + +// goRun starts a new goroutine. +func (cc *ClientConn) goRun(f func()) { + if cc.syncHooks != nil { + cc.syncHooks.goRun(f) + return + } + go f() +} + +// condBroadcast is cc.cond.Broadcast. +func (cc *ClientConn) condBroadcast() { + if cc.syncHooks != nil { + cc.syncHooks.condBroadcast(cc.cond) + } + cc.cond.Broadcast() +} + +// condWait is cc.cond.Wait. +func (cc *ClientConn) condWait() { + if cc.syncHooks != nil { + cc.syncHooks.condWait(cc.cond) + } + cc.cond.Wait() +} + +// newTimer creates a new time.Timer, or a synthetic timer in tests. +func (cc *ClientConn) newTimer(d time.Duration) timer { + if cc.syncHooks != nil { + return cc.syncHooks.newTimer(d) + } + return newTimeTimer(d) +} + +// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. +func (cc *ClientConn) afterFunc(d time.Duration, f func()) timer { + if cc.syncHooks != nil { + return cc.syncHooks.afterFunc(d, f) + } + return newTimeAfterFunc(d, f) +} + +func (cc *ClientConn) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { + if cc.syncHooks != nil { + return cc.syncHooks.contextWithTimeout(ctx, d) + } + return context.WithTimeout(ctx, d) } // clientStream is the state for a single HTTP/2 stream. One of these @@ -425,7 +487,7 @@ func (cs *clientStream) abortStreamLocked(err error) { // TODO(dneil): Clean up tests where cs.cc.cond is nil. if cs.cc.cond != nil { // Wake up writeRequestBody if it is waiting on flow control. - cs.cc.cond.Broadcast() + cs.cc.condBroadcast() } } @@ -435,7 +497,7 @@ func (cs *clientStream) abortRequestBodyWrite() { defer cc.mu.Unlock() if cs.reqBody != nil && cs.reqBodyClosed == nil { cs.closeReqBodyLocked() - cc.cond.Broadcast() + cc.condBroadcast() } } @@ -445,10 +507,10 @@ func (cs *clientStream) closeReqBodyLocked() { } cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed - go func() { + cs.cc.goRun(func() { cs.reqBody.Close() close(reqBodyClosed) - }() + }) } type stickyErrWriter struct { @@ -537,15 +599,6 @@ func authorityAddr(scheme string, authority string) (addr string) { return net.JoinHostPort(host, port) } -var retryBackoffHook func(time.Duration) *time.Timer - -func backoffNewTimer(d time.Duration) *time.Timer { - if retryBackoffHook != nil { - return retryBackoffHook(d) - } - return time.NewTimer(d) -} - // RoundTripOpt is like RoundTrip, but takes options. func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { @@ -573,13 +626,27 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - timer := backoffNewTimer(d) + var tm timer + if t.syncHooks != nil { + tm = t.syncHooks.newTimer(d) + t.syncHooks.blockUntil(func() bool { + select { + case <-tm.C(): + case <-req.Context().Done(): + default: + return false + } + return true + }) + } else { + tm = newTimeTimer(d) + } select { - case <-timer.C: + case <-tm.C(): t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): - timer.Stop() + tm.Stop() err = req.Context().Err() } } @@ -658,6 +725,9 @@ func canRetryError(err error) bool { } func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) { + if t.syncHooks != nil { + return t.newClientConn(nil, singleUse, t.syncHooks) + } host, _, err := net.SplitHostPort(addr) if err != nil { return nil, err @@ -666,7 +736,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b if err != nil { return nil, err } - return t.newClientConn(tconn, singleUse) + return t.newClientConn(tconn, singleUse, nil) } func (t *Transport) newTLSConfig(host string) *tls.Config { @@ -732,10 +802,10 @@ func (t *Transport) maxEncoderHeaderTableSize() uint32 { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, t.disableKeepAlives(), nil) } -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { +func (t *Transport) newClientConn(c net.Conn, singleUse bool, hooks *testSyncHooks) (*ClientConn, error) { cc := &ClientConn{ t: t, tconn: c, @@ -750,10 +820,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro wantSettingsAck: true, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), + syncHooks: hooks, + } + if hooks != nil { + hooks.newclientconn(cc) + c = cc.tconn } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + cc.idleTimer = cc.afterFunc(d, cc.onIdleTimeout) } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -818,7 +893,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro return nil, cc.werr } - go cc.readLoop() + cc.goRun(cc.readLoop) return cc, nil } @@ -826,7 +901,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.t.pingTimeout() // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) + ctx, cancel := cc.contextWithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1056,7 +1131,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { // Wait for all in-flight streams to complete or connection to close done := make(chan struct{}) cancelled := false // guarded by cc.mu - go func() { + cc.goRun(func() { cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1068,9 +1143,9 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { if cancelled { break } - cc.cond.Wait() + cc.condWait() } - }() + }) shutdownEnterWaitStateHook() select { case <-done: @@ -1080,7 +1155,7 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { cc.mu.Lock() // Free the goroutine above cancelled = true - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() return ctx.Err() } @@ -1118,7 +1193,7 @@ func (cc *ClientConn) closeForError(err error) { for _, cs := range cc.streams { cs.abortStreamLocked(err) } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() cc.closeConn() } @@ -1215,6 +1290,10 @@ func (cc *ClientConn) decrStreamReservationsLocked() { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + return cc.roundTrip(req, nil) +} + +func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) (*http.Response, error) { ctx := req.Context() cs := &clientStream{ cc: cc, @@ -1229,9 +1308,23 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { respHeaderRecv: make(chan struct{}), donec: make(chan struct{}), } - go cs.doRequest(req) + cc.goRun(func() { + cs.doRequest(req) + }) waitDone := func() error { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.donec: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.donec: return nil @@ -1292,7 +1385,24 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { return err } + if streamf != nil { + streamf(cs) + } + for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.respHeaderRecv: return handleResponseHeaders() @@ -1348,6 +1458,21 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { if cc.reqHeaderMu == nil { panic("RoundTrip on uninitialized ClientConn") // for tests } + var newStreamHook func(*clientStream) + if cc.syncHooks != nil { + newStreamHook = cc.syncHooks.newstream + cc.syncHooks.blockUntil(func() bool { + select { + case cc.reqHeaderMu <- struct{}{}: + <-cc.reqHeaderMu + case <-cs.reqCancel: + case <-ctx.Done(): + default: + return false + } + return true + }) + } select { case cc.reqHeaderMu <- struct{}{}: case <-cs.reqCancel: @@ -1372,6 +1497,10 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { } cc.mu.Unlock() + if newStreamHook != nil { + newStreamHook(cs) + } + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? if !cc.t.disableCompression() && req.Header.Get("Accept-Encoding") == "" && @@ -1452,15 +1581,30 @@ func (cs *clientStream) writeRequest(req *http.Request) (err error) { var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) + timer := cc.newTimer(d) defer timer.Stop() - respHeaderTimer = timer.C + respHeaderTimer = timer.C() respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, // or until the request is aborted (via context, error, or otherwise), // whichever comes first. for { + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-cs.peerClosed: + case <-respHeaderTimer: + case <-respHeaderRecv: + case <-cs.abort: + case <-ctx.Done(): + case <-cs.reqCancel: + default: + return false + } + return true + }) + } select { case <-cs.peerClosed: return nil @@ -1609,7 +1753,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { return nil } cc.pendingRequests++ - cc.cond.Wait() + cc.condWait() cc.pendingRequests-- select { case <-cs.abort: @@ -1871,8 +2015,24 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) cs.flow.take(take) return take, nil } - cc.cond.Wait() + cc.condWait() + } +} + +func validateHeaders(hdrs http.Header) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } } + return "" } var errNilRequestURL = errors.New("http2: Request.URI is nil") @@ -1912,19 +2072,14 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail } } - // Check for any invalid headers and return an error before we + // Check for any invalid headers+trailers and return an error before we // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httpguts.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, because it may be sensitive. - return nil, fmt.Errorf("invalid HTTP header value for header %q", k) - } - } + if err := validateHeaders(req.Header); err != "" { + return nil, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return nil, fmt.Errorf("invalid HTTP trailer %s", err) } enumerateHeaders := func(f func(name, value string)) { @@ -2143,7 +2298,7 @@ func (cc *ClientConn) forgetStreamID(id uint32) { } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. - cc.cond.Broadcast() + cc.condBroadcast() closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil if closeOnIdle && cc.streamsReserved == 0 && len(cc.streams) == 0 { @@ -2231,7 +2386,7 @@ func (rl *clientConnReadLoop) cleanup() { cs.abortStreamLocked(err) } } - cc.cond.Broadcast() + cc.condBroadcast() cc.mu.Unlock() } @@ -2266,10 +2421,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.t.ReadIdleTimeout - var t *time.Timer + var t timer if readIdleTimeout != 0 { - t = time.AfterFunc(readIdleTimeout, cc.healthCheck) - defer t.Stop() + t = cc.afterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2684,7 +2838,7 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { }) return nil } - if !cs.firstByte { + if !cs.pastHeaders { cc.logf("protocol error: received DATA before a HEADERS frame") rl.endStreamError(cs, StreamError{ StreamID: f.StreamID, @@ -2867,7 +3021,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error { for _, cs := range cc.streams { cs.flow.add(delta) } - cc.cond.Broadcast() + cc.condBroadcast() cc.initialWindowSize = s.Val case SettingHeaderTableSize: @@ -2922,7 +3076,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { return ConnectionError(ErrCodeFlowControl) } - cc.cond.Broadcast() + cc.condBroadcast() return nil } @@ -2964,24 +3118,38 @@ func (cc *ClientConn) Ping(ctx context.Context) error { } cc.mu.Unlock() } - errc := make(chan error, 1) - go func() { + var pingError error + errc := make(chan struct{}) + cc.goRun(func() { cc.wmu.Lock() defer cc.wmu.Unlock() - if err := cc.fr.WritePing(false, p); err != nil { - errc <- err + if pingError = cc.fr.WritePing(false, p); pingError != nil { + close(errc) return } - if err := cc.bw.Flush(); err != nil { - errc <- err + if pingError = cc.bw.Flush(); pingError != nil { + close(errc) return } - }() + }) + if cc.syncHooks != nil { + cc.syncHooks.blockUntil(func() bool { + select { + case <-c: + case <-errc: + case <-ctx.Done(): + case <-cc.readerDone: + default: + return false + } + return true + }) + } select { case <-c: return nil - case err := <-errc: - return err + case <-errc: + return pingError case <-ctx.Done(): return ctx.Err() case <-cc.readerDone: @@ -3150,9 +3318,17 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err } func (t *Transport) idleConnTimeout() time.Duration { + // to keep things backwards compatible, we use non-zero values of + // IdleConnTimeout, followed by using the IdleConnTimeout on the underlying + // http1 transport, followed by 0 + if t.IdleConnTimeout != 0 { + return t.IdleConnTimeout + } + if t.t1 != nil { return t.t1.IdleConnTimeout } + return 0 } diff --git a/vendor/golang.org/x/net/publicsuffix/data/children b/vendor/golang.org/x/net/publicsuffix/data/children new file mode 100644 index 00000000000..08261bffd19 Binary files /dev/null and b/vendor/golang.org/x/net/publicsuffix/data/children differ diff --git a/vendor/golang.org/x/net/publicsuffix/data/nodes b/vendor/golang.org/x/net/publicsuffix/data/nodes new file mode 100644 index 00000000000..1dae6ede8f2 Binary files /dev/null and b/vendor/golang.org/x/net/publicsuffix/data/nodes differ diff --git a/vendor/golang.org/x/net/publicsuffix/data/text b/vendor/golang.org/x/net/publicsuffix/data/text new file mode 100644 index 00000000000..7e516413f6c --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/data/text @@ -0,0 +1 @@ +birkenesoddtangentinglogoweirbitbucketrzynishikatakayamatta-varjjatjomembersaltdalovepopartysfjordiskussionsbereichatinhlfanishikatsuragitappassenger-associationishikawazukamiokameokamakurazakitaurayasudabitternidisrechtrainingloomy-routerbjarkoybjerkreimdbalsan-suedtirololitapunkapsienamsskoganeibmdeveloperauniteroirmemorialombardiadempresashibetsukumiyamagasakinderoyonagunicloudevelopmentaxiijimarriottayninhaccanthobby-siteval-d-aosta-valleyoriikaracolognebinatsukigataiwanumatajimidsundgcahcesuolocustomer-ocimperiautoscanalytics-gatewayonagoyaveroykenflfanpachihayaakasakawaiishopitsitemasekd1kappenginedre-eikerimo-siemenscaledekaascolipicenoboribetsucks3-eu-west-3utilities-16-balestrandabergentappsseekloges3-eu-west-123paginawebcamauction-acornfshostrodawaraktyubinskaunicommbank123kotisivultrobjectselinogradimo-i-rana4u2-localhostrolekanieruchomoscientistordal-o-g-i-nikolaevents3-ap-northeast-2-ddnsking123homepagefrontappchizip61123saitamakawababia-goracleaningheannakadomarineat-urlimanowarudakuneustarostwodzislawdev-myqnapcloudcontrolledgesuite-stagingdyniamusementdllclstagehirnikonantomobelementorayokosukanoyakumoliserniaurland-4-salernord-aurdalipaywhirlimiteddnslivelanddnss3-ap-south-123siteweberlevagangaviikanonji234lima-cityeats3-ap-southeast-123webseiteambulancechireadmyblogspotaribeiraogakicks-assurfakefurniturealmpmninoheguribigawaurskog-holandinggfarsundds3-ap-southeast-20001wwwedeployokote123hjemmesidealerdalaheadjuegoshikibichuobiraustevollimombetsupplyokoze164-balena-devices3-ca-central-123websiteleaf-south-12hparliamentatsunobninsk8s3-eu-central-1337bjugnishimerablackfridaynightjxn--11b4c3ditchyouripatriabloombergretaijindustriesteinkjerbloxcmsaludivtasvuodnakaiwanairlinekobayashimodatecnologiablushakotanishinomiyashironomniwebview-assetsalvadorbmoattachmentsamegawabmsamnangerbmwellbeingzonebnrweatherchannelsdvrdnsamparalleluxenishinoomotegotsukishiwadavvenjargamvikarpaczest-a-la-maisondre-landivttasvuotnakamai-stagingloppennebomlocalzonebonavstackartuzybondigitaloceanspacesamsclubartowest1-usamsunglugsmall-webspacebookonlineboomlaakesvuemielecceboschristmasakilatiron-riopretoeidsvollovesickaruizawabostik-serverrankoshigayachtsandvikcoromantovalle-d-aostakinouebostonakijinsekikogentlentapisa-geekarumaifmemsetkmaxxn--12c1fe0bradescotksatmpaviancapitalonebouncemerckmsdscloudiybounty-fullensakerrypropertiesangovtoyosatoyokawaboutiquebecologialaichaugiangmbhartiengiangminakamichiharaboutireservdrangedalpusercontentoyotapfizerboyfriendoftheinternetflixn--12cfi8ixb8lublindesnesanjosoyrovnoticiasannanishinoshimattelemarkasaokamikitayamatsurinfinitigopocznore-og-uvdalucaniabozen-sudtiroluccanva-appstmnishiokoppegardray-dnsupdaterbozen-suedtirolukowesteuropencraftoyotomiyazakinsurealtypeformesswithdnsannohekinanporovigonohejinternationaluroybplacedogawarabikomaezakirunordkappgfoggiabrandrayddns5ybrasiliadboxoslockerbresciaogashimadachicappadovaapstemp-dnswatchest-mon-blogueurodirumagazinebrindisiciliabroadwaybroke-itvedestrandraydnsanokashibatakashimashikiyosatokigawabrokerbrothermesserlifestylebtimnetzpisdnpharmaciensantamariakebrowsersafetymarketingmodumetacentrumeteorappharmacymruovatlassian-dev-builderschaefflerbrumunddalutskashiharabrusselsantoandreclaimsanukintlon-2bryanskiptveterinaireadthedocsaobernardovre-eikerbrynebwestus2bzhitomirbzzwhitesnowflakecommunity-prochowicecomodalenissandoycompanyaarphdfcbankasumigaurawa-mazowszexn--1ck2e1bambinagisobetsuldalpha-myqnapcloudaccess3-us-east-2ixboxeroxfinityolasiteastus2comparemarkerryhotelsaves-the-whalessandria-trani-barletta-andriatranibarlettaandriacomsecaasnesoddeno-stagingrondarcondoshifteditorxn--1ctwolominamatarnobrzegrongrossetouchijiwadedyn-berlincolnissayokoshibahikariyaltakazakinzais-a-bookkeepermarshallstatebankasuyalibabahccavuotnagaraholtaleniwaizumiotsurugashimaintenanceomutazasavonarviikaminoyamaxunispaceconferenceconstructionflashdrivefsncf-ipfsaxoconsuladobeio-static-accesscamdvrcampaniaconsultantranoyconsultingroundhandlingroznysaitohnoshookuwanakayamangyshlakdnepropetrovskanlandyndns-freeboxostrowwlkpmgrphilipsyno-dschokokekscholarshipschoolbusinessebycontactivetrailcontagematsubaravendbambleborkdalvdalcest-le-patron-rancherkasydneyukuhashimokawavoues3-sa-east-1contractorskenissedalcookingruecoolblogdnsfor-better-thanhhoarairforcentralus-1cooperativano-frankivskodjeephonefosschoolsztynsetransiphotographysiocoproductionschulplattforminamiechizenisshingucciprianiigatairaumalatvuopmicrolightinguidefinimaringatlancastercorsicafjschulservercosenzakopanecosidnshome-webservercellikescandypopensocialcouchpotatofrieschwarzgwangjuh-ohtawaramotoineppueblockbusternopilawacouncilcouponscrapper-sitecozoravennaharimalborkaszubytemarketscrappinguitarscrysecretrosnubananarepublic-inquiryurihonjoyenthickaragandaxarnetbankanzakiwielunnerepairbusanagochigasakishimabarakawaharaolbia-tempio-olbiatempioolbialowiezachpomorskiengiangjesdalolipopmcdirepbodyn53cqcxn--1lqs03niyodogawacrankyotobetsumidaknongujaratmallcrdyndns-homednscwhminamifuranocreditcardyndns-iphutholdingservehttpbincheonl-ams-1creditunionionjukujitawaravpagecremonashorokanaiecrewhoswholidaycricketnedalcrimeast-kazakhstanangercrotonecrowniphuyencrsvp4cruiseservehumourcuisinellair-traffic-controllagdenesnaaseinet-freakserveircasertainaircraftingvolloansnasaarlanduponthewifidelitypedreamhostersaotomeldaluxurycuneocupcakecuritibacgiangiangryggeecurvalled-aostargets-itranslatedyndns-mailcutegirlfriendyndns-office-on-the-webhoptogurafedoraprojectransurlfeirafembetsukuis-a-bruinsfanfermodenakasatsunairportrapaniizaferraraferraris-a-bulls-fanferrerotikagoshimalopolskanittedalfetsundyndns-wikimobetsumitakagildeskaliszkolamericanfamilydservemp3fgunmaniwamannorth-kazakhstanfhvalerfilegear-augustowiiheyakagefilegear-deatnuniversitysvardofilegear-gbizfilegear-iefilegear-jpmorgangwonporterfilegear-sg-1filminamiizukamiminefinalchikugokasellfyis-a-candidatefinancefinnoyfirebaseappiemontefirenetlifylkesbiblackbaudcdn-edgestackhero-networkinggroupowiathletajimabaria-vungtaudiopsysharpigboatshawilliamhillfirenzefirestonefireweblikes-piedmontravelersinsurancefirmdalegalleryfishingoldpoint2thisamitsukefitjarfitnessettsurugiminamimakis-a-catererfjalerfkatsushikabeebyteappilottonsberguovdageaidnunjargausdalflekkefjordyndns-workservep2phxn--1lqs71dyndns-remotewdyndns-picserveminecraftransporteflesbergushikamifuranorthflankatsuyamashikokuchuoflickragerokunohealthcareershellflierneflirfloginlinefloppythonanywherealtorfloraflorencefloripalmasfjordenfloristanohatajiris-a-celticsfanfloromskogxn--2m4a15eflowershimokitayamafltravinhlonganflynnhosting-clusterfncashgabadaddjabbottoyourafndyndns1fnwkzfolldalfoolfor-ourfor-somegurownproviderfor-theaterfordebianforexrotheworkpccwinbar0emmafann-arborlandd-dnsiskinkyowariasahikawarszawashtenawsmppl-wawsglobalacceleratorahimeshimakanegasakievennodebalancern4t3l3p0rtatarantours3-ap-northeast-123minsidaarborteaches-yogano-ipifony-123miwebaccelastx4432-b-datacenterprisesakijobservableusercontentateshinanomachintaifun-dnsdojournalistoloseyouriparisor-fronavuotnarashinoharaetnabudejjunipereggio-emilia-romagnaroyboltateyamajureggiocalabriakrehamnayoro0o0forgotdnshimonitayanagithubpreviewsaikisarazure-mobileirfjordynnservepicservequakeforli-cesena-forlicesenaforlillehammerfeste-ipimientaketomisatoolshimonosekikawaforsalegoismailillesandefjordynservebbservesarcasmileforsandasuolodingenfortalfortefosneshimosuwalkis-a-chefashionstorebaseljordyndns-serverisignfotrdynulvikatowicefoxn--2scrj9casinordlandurbanamexnetgamersapporomurafozfr-1fr-par-1fr-par-2franamizuhoboleslawiecommerce-shoppingyeongnamdinhachijohanamakisofukushimaoris-a-conservativegarsheiheijis-a-cparachutingfredrikstadynv6freedesktopazimuthaibinhphuocelotenkawakayamagnetcieszynh-servebeero-stageiseiroumugifuchungbukharag-cloud-championshiphoplixn--30rr7yfreemyiphosteurovisionredumbrellangevagrigentobishimadridvagsoygardenebakkeshibechambagricoharugbydgoszczecin-berlindasdaburfreesitefreetlshimotsukefreisennankokubunjis-a-cubicle-slavellinodeobjectshimotsumafrenchkisshikindleikangerfreseniushinichinanfriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfroganshinjotelulubin-vpncateringebunkyonanaoshimamateramockashiwarafrognfrolandynvpnpluservicesevastopolitiendafrom-akamaized-stagingfrom-alfrom-arfrom-azurewebsiteshikagamiishibuyabukihokuizumobaragusabaerobaticketshinjukuleuvenicefrom-campobassociatest-iserveblogsytenrissadistdlibestadultrentin-sudtirolfrom-coachaseljeducationcillahppiacenzaganfrom-ctrentin-sued-tirolfrom-dcatfooddagestangefrom-decagliarikuzentakataikillfrom-flapymntrentin-suedtirolfrom-gap-east-1from-higashiagatsumagoianiafrom-iafrom-idyroyrvikingulenfrom-ilfrom-in-the-bandairtelebitbridgestonemurorangecloudplatform0from-kshinkamigototalfrom-kyfrom-langsonyantakahamalselveruminamiminowafrom-malvikaufentigerfrom-mdfrom-mein-vigorlicefrom-mifunefrom-mnfrom-modshinshinotsurgeryfrom-mshinshirofrom-mtnfrom-ncatholicurus-4from-ndfrom-nefrom-nhs-heilbronnoysundfrom-njshintokushimafrom-nminamioguni5from-nvalledaostargithubusercontentrentino-a-adigefrom-nycaxiaskvollpagesardegnarutolgaulardalvivanovoldafrom-ohdancefrom-okegawassamukawataris-a-democratrentino-aadigefrom-orfrom-panasonichernovtsykkylvenneslaskerrylogisticsardiniafrom-pratohmamurogawatsonrenderfrom-ris-a-designerimarugame-hostyhostingfrom-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--32vp30hachinoheavyfrom-utsiracusagaeroclubmedecin-addrammenuorodoyerfrom-val-daostavalleyfrom-vtrentino-alto-adigefrom-wafrom-wiardwebthingsjcbnpparibashkiriafrom-wvallee-aosteroyfrom-wyfrosinonefrostabackplaneapplebesbyengerdalp1froyal-commissionfruskydivingfujiiderafujikawaguchikonefujiminokamoenairtrafficplexus-2fujinomiyadapliefujiokazakinkobearalvahkikonaibetsubame-south-1fujisatoshoeshintomikasaharafujisawafujishiroishidakabiratoridediboxn--3bst00minamisanrikubetsupportrentino-altoadigefujitsuruokakamigaharafujiyoshidappnodearthainguyenfukayabeardubaikawagoefukuchiyamadatsunanjoburgfukudomigawafukuis-a-doctorfukumitsubishigakirkeneshinyoshitomiokamisatokamachippubetsuikitchenfukuokakegawafukuroishikariwakunigamigrationfukusakirovogradoyfukuyamagatakaharunusualpersonfunabashiriuchinadattorelayfunagatakahashimamakiryuohkurafunahashikamiamakusatsumasendaisenergyeongginowaniihamatamakinoharafundfunkfeuerfuoiskujukuriyamandalfuosskoczowindowskrakowinefurubirafurudonordreisa-hockeynutwentertainmentrentino-s-tirolfurukawajimangolffanshiojirishirifujiedafusoctrangfussagamiharafutabayamaguchinomihachimanagementrentino-stirolfutboldlygoingnowhere-for-more-og-romsdalfuttsurutashinais-a-financialadvisor-aurdalfuturecmshioyamelhushirahamatonbetsurnadalfuturehostingfuturemailingfvghakuis-a-gurunzenhakusandnessjoenhaldenhalfmoonscalebookinghostedpictetrentino-sud-tirolhalsakakinokiaham-radio-opinbar1hamburghammarfeastasiahamurakamigoris-a-hard-workershiraokamisunagawahanamigawahanawahandavvesiidanangodaddyn-o-saurealestatefarmerseinehandcrafteducatorprojectrentino-sudtirolhangglidinghangoutrentino-sued-tirolhannannestadhannosegawahanoipinkazohanyuzenhappouzshiratakahagianghasamap-northeast-3hasaminami-alpshishikuis-a-hunterhashbanghasudazaifudaigodogadobeioruntimedio-campidano-mediocampidanomediohasura-appinokokamikoaniikappudopaashisogndalhasvikazteleportrentino-suedtirolhatogayahoooshikamagayaitakamoriokakudamatsuehatoyamazakitahiroshimarcheapartmentshisuifuettertdasnetzhatsukaichikaiseiyoichipshitaramahattfjelldalhayashimamotobusells-for-lesshizukuishimoichilloutsystemscloudsitehazuminobushibukawahelplfinancialhelsinkitakamiizumisanofidonnakamurataitogliattinnhemneshizuokamitondabayashiogamagoriziahemsedalhepforgeblockshoujis-a-knightpointtokaizukamaishikshacknetrentinoa-adigehetemlbfanhigashichichibuzentsujiiehigashihiroshimanehigashiizumozakitakatakanabeautychyattorneyagawakkanaioirasebastopoleangaviikadenagahamaroyhigashikagawahigashikagurasoedahigashikawakitaaikitakyushunantankazunovecorebungoonow-dnshowahigashikurumeinforumzhigashimatsushimarnardalhigashimatsuyamakitaakitadaitoigawahigashimurayamamotorcycleshowtimeloyhigashinarusells-for-uhigashinehigashiomitamanoshiroomghigashiosakasayamanakakogawahigashishirakawamatakanezawahigashisumiyoshikawaminamiaikitamihamadahigashitsunospamproxyhigashiurausukitamotosunnydayhigashiyamatokoriyamanashiibaclieu-1higashiyodogawahigashiyoshinogaris-a-landscaperspectakasakitanakagusukumoldeliveryhippyhiraizumisatohokkaidontexistmein-iservschulecznakaniikawatanagurahirakatashinagawahiranais-a-lawyerhirarahiratsukaeruhirayaizuwakamatsubushikusakadogawahitachiomiyaginozawaonsensiositehitachiotaketakaokalmykiahitraeumtgeradegreehjartdalhjelmelandholyhomegoodshwinnersiiitesilkddiamondsimple-urlhomeipioneerhomelinkyard-cloudjiffyresdalhomelinuxn--3ds443ghomeofficehomesecuritymacaparecidahomesecuritypchiryukyuragiizehomesenseeringhomeskleppippugliahomeunixn--3e0b707ehondahonjyoitakarazukaluganskfh-muensterhornindalhorsells-itrentinoaadigehortendofinternet-dnsimplesitehospitalhotelwithflightsirdalhotmailhoyangerhoylandetakasagooglecodespotrentinoalto-adigehungyenhurdalhurumajis-a-liberalhyllestadhyogoris-a-libertarianhyugawarahyundaiwafuneis-very-evillasalleitungsenis-very-goodyearis-very-niceis-very-sweetpepperugiais-with-thebandoomdnstraceisk01isk02jenv-arubacninhbinhdinhktistoryjeonnamegawajetztrentinostiroljevnakerjewelryjgorajlljls-sto1jls-sto2jls-sto3jmpixolinodeusercontentrentinosud-tiroljnjcloud-ver-jpchitosetogitsuliguriajoyokaichibahcavuotnagaivuotnagaokakyotambabymilk3jozis-a-musicianjpnjprsolarvikhersonlanxessolundbeckhmelnitskiyamasoykosaigawakosakaerodromegalloabatobamaceratachikawafaicloudineencoreapigeekoseis-a-painterhostsolutionslupskhakassiakosheroykoshimizumakis-a-patsfankoshughesomakosugekotohiradomainstitutekotourakouhokumakogenkounosupersalevangerkouyamasudakouzushimatrixn--3pxu8khplaystation-cloudyclusterkozagawakozakis-a-personaltrainerkozowiosomnarviklabudhabikinokawachinaganoharamcocottekpnkppspbarcelonagawakepnord-odalwaysdatabaseballangenkainanaejrietisalatinabenogiehtavuoatnaamesjevuemielnombrendlyngen-rootaruibxos3-us-gov-west-1krasnikahokutokonamegatakatoris-a-photographerokussldkrasnodarkredstonekrelliankristiansandcatsoowitdkmpspawnextdirectrentinosudtirolkristiansundkrodsheradkrokstadelvaldaostavangerkropyvnytskyis-a-playershiftcryptonomichinomiyakekryminamiyamashirokawanabelaudnedalnkumamotoyamatsumaebashimofusakatakatsukis-a-republicanonoichinosekigaharakumanowtvaokumatorinokumejimatsumotofukekumenanyokkaichirurgiens-dentistes-en-francekundenkunisakis-a-rockstarachowicekunitachiaraisaijolsterkunitomigusukukis-a-socialistgstagekunneppubtlsopotrentinosued-tirolkuokgroupizzakurgankurobegetmyipirangalluplidlugolekagaminorddalkurogimimozaokinawashirosatochiokinoshimagentositempurlkuroisodegaurakuromatsunais-a-soxfankuronkurotakikawasakis-a-studentalkushirogawakustanais-a-teacherkassyncloudkusuppliesor-odalkutchanelkutnokuzumakis-a-techietipslzkvafjordkvalsundkvamsterdamnserverbaniakvanangenkvinesdalkvinnheradkviteseidatingkvitsoykwpspdnsor-varangermishimatsusakahogirlymisugitokorozawamitakeharamitourismartlabelingmitoyoakemiuramiyazurecontainerdpoliticaobangmiyotamatsukuris-an-actormjondalenmonzabrianzaramonzaebrianzamonzaedellabrianzamordoviamorenapolicemoriyamatsuuramoriyoshiminamiashigaramormonstermoroyamatsuzakis-an-actressmushcdn77-sslingmortgagemoscowithgoogleapiszmoseushimogosenmosjoenmoskenesorreisahayakawakamiichikawamisatottoris-an-anarchistjordalshalsenmossortlandmosviknx-serversusakiyosupabaseminemotegit-reposoruminanomoviemovimientokyotangotembaixadattowebhareidsbergmozilla-iotrentinosuedtirolmtranbytomaridagawalmartrentinsud-tirolmuikaminokawanishiaizubangemukoelnmunakatanemuosattemupkomatsushimassa-carrara-massacarraramassabuzzmurmanskomforbar2murotorcraftranakatombetsumy-gatewaymusashinodesakegawamuseumincomcastoripressorfoldmusicapetownnews-stagingmutsuzawamy-vigormy-wanggoupilemyactivedirectorymyamazeplaymyasustor-elvdalmycdmycloudnsoundcastorjdevcloudfunctionsokndalmydattolocalcertificationmyddnsgeekgalaxymydissentrentinsudtirolmydobissmarterthanyoumydrobofageometre-experts-comptablesowamydspectruminisitemyeffectrentinsued-tirolmyfastly-edgekey-stagingmyfirewalledreplittlestargardmyforuminterecifedextraspace-to-rentalstomakomaibaramyfritzmyftpaccesspeedpartnermyhome-servermyjinomykolaivencloud66mymailermymediapchoseikarugalsacemyokohamamatsudamypeplatformsharis-an-artistockholmestrandmypetsphinxn--41amyphotoshibajddarvodkafjordvaporcloudmypictureshinomypsxn--42c2d9amysecuritycamerakermyshopblockspjelkavikommunalforbundmyshopifymyspreadshopselectrentinsuedtirolmytabitordermythic-beastspydebergmytis-a-anarchistg-buildermytuleap-partnersquaresindevicenzamyvnchoshichikashukudoyamakeuppermywirecipescaracallypoivronpokerpokrovskommunepolkowicepoltavalle-aostavernpomorzeszowithyoutuberspacekitagawaponpesaro-urbino-pesarourbinopesaromasvuotnaritakurashikis-bykleclerchitachinakagawaltervistaipeigersundynamic-dnsarlpordenonepornporsangerporsangugeporsgrunnanpoznanpraxihuanprdprgmrprimetelprincipeprivatelinkomonowruzhgorodeoprivatizehealthinsuranceprofesionalprogressivegasrlpromonza-e-della-brianzaptokuyamatsushigepropertysnesrvarggatrevisogneprotectionprotonetroandindependent-inquest-a-la-masionprudentialpruszkowiwatsukiyonotaireserve-onlineprvcyonabarumbriaprzeworskogpunyufuelpupulawypussycatanzarowixsitepvhachirogatakahatakaishimojis-a-geekautokeinotteroypvtrogstadpwchowderpzqhadanorthwesternmutualqldqotoyohashimotoshimaqponiatowadaqslgbtroitskomorotsukagawaqualifioapplatter-applatterplcube-serverquangngais-certifiedugit-pagespeedmobilizeroticaltanissettailscaleforcequangninhthuanquangtritonoshonais-foundationquickconnectromsakuragawaquicksytestreamlitapplumbingouvaresearchitectesrhtrentoyonakagyokutoyakomakizunokunimimatakasugais-an-engineeringquipelementstrippertuscanytushungrytuvalle-daostamayukis-into-animeiwamizawatuxfamilytuyenquangbinhthuantwmailvestnesuzukis-gonevestre-slidreggio-calabriavestre-totennishiawakuravestvagoyvevelstadvibo-valentiaavibovalentiavideovinhphuchromedicinagatorogerssarufutsunomiyawakasaikaitakokonoevinnicarbonia-iglesias-carboniaiglesiascarboniavinnytsiavipsinaapplurinacionalvirginanmokurennebuvirtual-userveexchangevirtualservervirtualuserveftpodhalevisakurais-into-carsnoasakuholeckodairaviterboliviajessheimmobilienvivianvivoryvixn--45br5cylvlaanderennesoyvladikavkazimierz-dolnyvladimirvlogintoyonezawavmintsorocabalashovhachiojiyahikobierzycevologdanskoninjambylvolvolkswagencyouvolyngdalvoorlopervossevangenvotevotingvotoyonovps-hostrowiechungnamdalseidfjordynathomebuiltwithdarkhangelskypecorittogojomeetoystre-slidrettozawawmemergencyahabackdropalermochizukikirarahkkeravjuwmflabsvalbardunloppadualstackomvuxn--3hcrj9chonanbuskerudynamisches-dnsarpsborgripeeweeklylotterywoodsidellogliastradingworse-thanhphohochiminhadselbuyshouseshirakolobrzegersundongthapmircloudletshiranukamishihorowowloclawekonskowolawawpdevcloudwpenginepoweredwphostedmailwpmucdnipropetrovskygearappodlasiellaknoluoktagajobojis-an-entertainerwpmudevcdnaccessojamparaglidingwritesthisblogoipodzonewroclawmcloudwsseoullensvanguardianwtcp4wtfastlylbanzaicloudappspotagereporthruherecreationinomiyakonojorpelandigickarasjohkameyamatotakadawuozuerichardlillywzmiuwajimaxn--4it797konsulatrobeepsondriobranconagareyamaizuruhrxn--4pvxs4allxn--54b7fta0ccistrondheimpertrixcdn77-secureadymadealstahaugesunderxn--55qw42gxn--55qx5dxn--5dbhl8dxn--5js045dxn--5rtp49citadelhichisochimkentozsdell-ogliastraderxn--5rtq34kontuminamiuonumatsunoxn--5su34j936bgsgxn--5tzm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264citicarrdrobakamaiorigin-stagingmxn--12co0c3b4evalleaostaobaomoriguchiharaffleentrycloudflare-ipfstcgroupaaskimitsubatamibulsan-suedtirolkuszczytnoopscbgrimstadrrxn--80aaa0cvacationsvchoyodobashichinohealth-carereforminamidaitomanaustdalxn--80adxhksveioxn--80ao21axn--80aqecdr1axn--80asehdbarclaycards3-us-west-1xn--80aswgxn--80aukraanghkeliwebpaaskoyabeagleboardxn--8dbq2axn--8ltr62konyvelohmusashimurayamassivegridxn--8pvr4uxn--8y0a063axn--90a1affinitylotterybnikeisencowayxn--90a3academiamicable-modemoneyxn--90aeroportsinfolionetworkangerxn--90aishobaraxn--90amckinseyxn--90azhytomyrxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byanagawaxn--asky-iraxn--aurskog-hland-jnbarclays3-us-west-2xn--avery-yuasakurastoragexn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbsvelvikongsbergxn--bck1b9a5dre4civilaviationfabricafederation-webredirectmediatechnologyeongbukashiwazakiyosembokutamamuraxn--bdddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyanaizuxn--bjddar-ptarumizusawaxn--blt-elabcienciamallamaceiobbcn-north-1xn--bmlo-graingerxn--bod-2natalxn--bozen-sdtirol-2obanazawaxn--brnny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investigation-aptibleadpagesquare7xn--brum-voagatrustkanazawaxn--btsfjord-9zaxn--bulsan-sdtirol-nsbarefootballooningjovikarasjoketokashikiyokawaraxn--c1avgxn--c2br7gxn--c3s14misakis-a-therapistoiaxn--cck2b3baremetalombardyn-vpndns3-website-ap-northeast-1xn--cckwcxetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-into-cartoonsokamitsuexn--ciqpnxn--clchc0ea0b2g2a9gcdxn--czr694bargainstantcloudfrontdoorestauranthuathienhuebinordre-landiherokuapparochernigovernmentjeldsundiscordsays3-website-ap-southeast-1xn--czrs0trvaroyxn--czru2dxn--czrw28barrel-of-knowledgeapplinziitatebayashijonawatebizenakanojoetsumomodellinglassnillfjordiscordsezgoraxn--d1acj3barrell-of-knowledgecomputermezproxyzgorzeleccoffeedbackanagawarmiastalowa-wolayangroupars3-website-ap-southeast-2xn--d1alfaststacksevenassigdalxn--d1atrysiljanxn--d5qv7z876clanbibaiduckdnsaseboknowsitallxn--davvenjrga-y4axn--djrs72d6uyxn--djty4koobindalxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e1a4cldmail-boxaxn--eckvdtc9dxn--efvn9svn-repostuff-4-salexn--efvy88haebaruericssongdalenviknaklodzkochikushinonsenasakuchinotsuchiurakawaxn--ehqz56nxn--elqq16hagakhanhhoabinhduongxn--eveni-0qa01gaxn--f6qx53axn--fct429kooris-a-nascarfanxn--fhbeiarnxn--finny-yuaxn--fiq228c5hsbcleverappsassarinuyamashinazawaxn--fiq64barsycenterprisecloudcontrolappgafanquangnamasteigenoamishirasatochigifts3-website-eu-west-1xn--fiqs8swidnicaravanylvenetogakushimotoganexn--fiqz9swidnikitagatakkomaganexn--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesena-fcbsswiebodzindependent-commissionxn--forlcesena-c8axn--fpcrj9c3dxn--frde-granexn--frna-woaxn--frya-hraxn--fzc2c9e2clickrisinglesjaguarxn--fzys8d69uvgmailxn--g2xx48clinicasacampinagrandebungotakadaemongolianishitosashimizunaminamiawajikintuitoyotsukaidownloadrudtvsaogoncapooguyxn--gckr3f0fastvps-serveronakanotoddenxn--gecrj9cliniquedaklakasamatsudoesntexisteingeekasserversicherungroks-theatrentin-sud-tirolxn--ggaviika-8ya47hagebostadxn--gildeskl-g0axn--givuotna-8yandexcloudxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050is-into-gamessinamsosnowieconomiasadojin-dslattuminamitanexn--gmqw5axn--gnstigbestellen-zvbrplsbxn--45brj9churcharterxn--gnstigliefern-wobihirosakikamijimayfirstorfjordxn--h-2failxn--h1ahnxn--h1alizxn--h2breg3eveneswinoujsciencexn--h2brj9c8clothingdustdatadetectrani-andria-barletta-trani-andriaxn--h3cuzk1dienbienxn--hbmer-xqaxn--hcesuolo-7ya35barsyonlinehimejiiyamanouchikujoinvilleirvikarasuyamashikemrevistathellequipmentjmaxxxjavald-aostatics3-website-sa-east-1xn--hebda8basicserversejny-2xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-s4accident-prevention-k3swisstufftoread-booksnestudioxn--hnefoss-q1axn--hobl-iraxn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyaotsusonoxn--io0a7is-leetrentinoaltoadigexn--j1adpohlxn--j1aefauskedsmokorsetagayaseralingenovaraxn--j1ael8basilicataniaxn--j1amhaibarakisosakitahatakamatsukawaxn--j6w193gxn--jlq480n2rgxn--jlster-byasakaiminatoyookananiimiharuxn--jrpeland-54axn--jvr189misasaguris-an-accountantsmolaquilaocais-a-linux-useranishiaritabashikaoizumizakitashiobaraxn--k7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--45q11circlerkstagentsasayamaxn--koluokta-7ya57haiduongxn--kprw13dxn--kpry57dxn--kput3is-lostre-toteneis-a-llamarumorimachidaxn--krager-gyasugitlabbvieeexn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdfastly-terrariuminamiiseharaxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyasuokanmakiwakuratexn--kvnangen-k0axn--l-1fairwindsynology-diskstationxn--l1accentureklamborghinikkofuefukihabororosynology-dsuzakadnsaliastudynaliastrynxn--laheadju-7yatominamibosoftwarendalenugxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52basketballfinanzjaworznoticeableksvikaratsuginamikatagamilanotogawaxn--lesund-huaxn--lgbbat1ad8jejuxn--lgrd-poacctulaspeziaxn--lhppi-xqaxn--linds-pramericanexpresservegame-serverxn--loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacn-northwest-1xn--lten-granvindafjordxn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2ddesxn--mgb9awbfbsbxn--1qqw23axn--mgba3a3ejtunesuzukamogawaxn--mgba3a4f16axn--mgba3a4fra1-deloittexn--mgba7c0bbn0axn--mgbaakc7dvfsxn--mgbaam7a8haiphongonnakatsugawaxn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordiscountry-snowplowiczeladzlgleezeu-2xn--mgbai9azgqp6jelasticbeanstalkharkovalleeaostexn--mgbayh7gparasitexn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgbcpq6gpa1axn--mgberp4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbpl2fhskopervikhmelnytskyivalleedaostexn--mgbqly7c0a67fbcngroks-thisayamanobeatsaudaxn--mgbqly7cvafricargoboavistanbulsan-sudtirolxn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhauspostman-echofunatoriginstances3-website-us-east-1xn--mgbx4cd0abkhaziaxn--mix082fbx-osewienxn--mix891fbxosexyxn--mjndalen-64axn--mk0axindependent-inquiryxn--mk1bu44cnpyatigorskjervoyagexn--mkru45is-not-certifiedxn--mlatvuopmi-s4axn--mli-tlavagiskexn--mlselv-iuaxn--moreke-juaxn--mori-qsakuratanxn--mosjen-eyatsukannamihokksundxn--mot-tlavangenxn--mre-og-romsdal-qqbuservecounterstrikexn--msy-ula0hair-surveillancexn--mtta-vrjjat-k7aflakstadaokayamazonaws-cloud9guacuiababybluebiteckidsmynasushiobaracingrok-freeddnsfreebox-osascoli-picenogatabuseating-organicbcgjerdrumcprequalifymelbourneasypanelblagrarq-authgear-stagingjerstadeltaishinomakilovecollegefantasyleaguenoharauthgearappspacehosted-by-previderehabmereitattoolforgerockyombolzano-altoadigeorgeorgiauthordalandroideporteatonamidorivnebetsukubankanumazuryomitanocparmautocodebergamoarekembuchikumagayagawafflecelloisirs3-external-180reggioemiliaromagnarusawaustrheimbalsan-sudtirolivingitpagexlivornobserveregruhostingivestbyglandroverhalladeskjakamaiedge-stagingivingjemnes3-eu-west-2038xn--muost-0qaxn--mxtq1misawaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--4dbgdty6ciscofreakamaihd-stagingriwataraindroppdalxn--nit225koryokamikawanehonbetsuwanouchikuhokuryugasakis-a-nursellsyourhomeftpiwatexn--nmesjevuemie-tcbalatinord-frontierxn--nnx388axn--nodessakurawebsozais-savedxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn--ntsq17gxn--nttery-byaeservehalflifeinsurancexn--nvuotna-hwaxn--nyqy26axn--o1achernivtsicilynxn--4dbrk0cexn--o3cw4hakatanortonkotsunndalxn--o3cyx2axn--od0algardxn--od0aq3beneventodayusuharaxn--ogbpf8fldrvelvetromsohuissier-justicexn--oppegrd-ixaxn--ostery-fyatsushiroxn--osyro-wuaxn--otu796dxn--p1acfedjeezxn--p1ais-slickharkivallee-d-aostexn--pgbs0dhlx3xn--porsgu-sta26fedorainfraclouderaxn--pssu33lxn--pssy2uxn--q7ce6axn--q9jyb4cnsauheradyndns-at-homedepotenzamamicrosoftbankasukabedzin-brbalsfjordietgoryoshiokanravocats3-fips-us-gov-west-1xn--qcka1pmcpenzapposxn--qqqt11misconfusedxn--qxa6axn--qxamunexus-3xn--rady-iraxn--rdal-poaxn--rde-ulazioxn--rdy-0nabaris-uberleetrentinos-tirolxn--rennesy-v1axn--rhkkervju-01afedorapeoplefrakkestadyndns-webhostingujogaszxn--rholt-mragowoltlab-democraciaxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--risa-5naturalxn--risr-iraxn--rland-uuaxn--rlingen-mxaxn--rmskog-byawaraxn--rny31hakodatexn--rovu88bentleyusuitatamotorsitestinglitchernihivgubs3-website-us-west-1xn--rros-graphicsxn--rskog-uuaxn--rst-0naturbruksgymnxn--rsta-framercanvasxn--rvc1e0am3exn--ryken-vuaxn--ryrvik-byawatahamaxn--s-1faitheshopwarezzoxn--s9brj9cntraniandriabarlettatraniandriaxn--sandnessjen-ogbentrendhostingliwiceu-3xn--sandy-yuaxn--sdtirol-n2axn--seral-lraxn--ses554gxn--sgne-graphoxn--4gbriminiserverxn--skierv-utazurestaticappspaceusercontentunkongsvingerxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-fxaxn--slat-5navigationxn--slt-elabogadobeaemcloud-fr1xn--smla-hraxn--smna-gratangenxn--snase-nraxn--sndre-land-0cbeppublishproxyuufcfanirasakindependent-panelomonza-brianzaporizhzhedmarkarelianceu-4xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbeskidyn-ip24xn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bloggerxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbestbuyshoparenagasakikuchikuseihicampinashikiminohostfoldnavyuzawaxn--stre-toten-zcbetainaboxfuselfipartindependent-reviewegroweibolognagasukeu-north-1xn--t60b56axn--tckweddingxn--tiq49xqyjelenia-goraxn--tjme-hraxn--tn0agrocerydxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbhzc66xn--trentin-sdtirol-7vbialystokkeymachineu-south-1xn--trentino-sd-tirol-c3bielawakuyachimataharanzanishiazaindielddanuorrindigenamerikawauevje-og-hornnes3-website-us-west-2xn--trentino-sdtirol-szbiella-speziaxn--trentinosd-tirol-rzbieszczadygeyachiyodaeguamfamscompute-1xn--trentinosdtirol-7vbievat-band-campaignieznoorstaplesakyotanabellunordeste-idclkarlsoyxn--trentinsd-tirol-6vbifukagawalbrzycharitydalomzaporizhzhiaxn--trentinsdtirol-nsbigv-infolkebiblegnicalvinklein-butterhcloudiscoursesalangenishigotpantheonsitexn--trgstad-r1axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atventuresinstagingxn--uc0ay4axn--uist22hakonexn--uisz3gxn--unjrga-rtashkenturindalxn--unup4yxn--uuwu58axn--vads-jraxn--valle-aoste-ebbturystykaneyamazoexn--valle-d-aoste-ehboehringerikexn--valleaoste-e7axn--valledaoste-ebbvadsoccertmgreaterxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctb-hostingxn--vermgensberatung-pwbiharstadotsubetsugarulezajskiervaksdalondonetskarmoyxn--vestvgy-ixa6oxn--vg-yiabruzzombieidskogasawarackmazerbaijan-mayenbaidarmeniaxn--vgan-qoaxn--vgsy-qoa0jellybeanxn--vgu402coguchikuzenishiwakinvestmentsaveincloudyndns-at-workisboringsakershusrcfdyndns-blogsitexn--vhquvestfoldxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bihoronobeokagakikugawalesundiscoverdalondrinaplesknsalon-1xn--w4r85el8fhu5dnraxn--w4rs40lxn--wcvs22dxn--wgbh1communexn--wgbl6axn--xhq521bikedaejeonbuk0xn--xkc2al3hye2axn--xkc2dl3a5ee0hakubackyardshiraois-a-greenxn--y9a3aquarelleasingxn--yer-znavois-very-badxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn--4it168dxn--ystre-slidre-ujbiofficialorenskoglobodoes-itcouldbeworldishangrilamdongnairkitapps-audibleasecuritytacticsxn--0trq7p7nnishiharaxn--zbx025dxn--zf0ao64axn--zf0avxlxn--zfr164bipartsaloonishiizunazukindustriaxnbayernxz \ No newline at end of file diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go new file mode 100644 index 00000000000..d56e9e76244 --- /dev/null +++ b/vendor/golang.org/x/net/publicsuffix/list.go @@ -0,0 +1,203 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go + +// Package publicsuffix provides a public suffix list based on data from +// https://publicsuffix.org/ +// +// A public suffix is one under which Internet users can directly register +// names. It is related to, but different from, a TLD (top level domain). +// +// "com" is a TLD (top level domain). Top level means it has no dots. +// +// "com" is also a public suffix. Amazon and Google have registered different +// siblings under that domain: "amazon.com" and "google.com". +// +// "au" is another TLD, again because it has no dots. But it's not "amazon.au". +// Instead, it's "amazon.com.au". +// +// "com.au" isn't an actual TLD, because it's not at the top level (it has +// dots). But it is an eTLD (effective TLD), because that's the branching point +// for domain name registrars. +// +// Another name for "an eTLD" is "a public suffix". Often, what's more of +// interest is the eTLD+1, or one more label than the public suffix. For +// example, browsers partition read/write access to HTTP cookies according to +// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from +// "google.com.au", but web pages served from "maps.google.com" can share +// cookies from "www.google.com", so you don't have to sign into Google Maps +// separately from signing into Google Web Search. Note that all four of those +// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1, +// the last two are not (but share the same eTLD+1: "google.com"). +// +// All of these domains have the same eTLD+1: +// - "www.books.amazon.co.uk" +// - "books.amazon.co.uk" +// - "amazon.co.uk" +// +// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". +// +// There is no closed form algorithm to calculate the eTLD of a domain. +// Instead, the calculation is data driven. This package provides a +// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at +// https://publicsuffix.org/ +package publicsuffix // import "golang.org/x/net/publicsuffix" + +// TODO: specify case sensitivity and leading/trailing dot behavior for +// func PublicSuffix and func EffectiveTLDPlusOne. + +import ( + "fmt" + "net/http/cookiejar" + "strings" +) + +// List implements the cookiejar.PublicSuffixList interface by calling the +// PublicSuffix function. +var List cookiejar.PublicSuffixList = list{} + +type list struct{} + +func (list) PublicSuffix(domain string) string { + ps, _ := PublicSuffix(domain) + return ps +} + +func (list) String() string { + return version +} + +// PublicSuffix returns the public suffix of the domain using a copy of the +// publicsuffix.org database compiled into the library. +// +// icann is whether the public suffix is managed by the Internet Corporation +// for Assigned Names and Numbers. If not, the public suffix is either a +// privately managed domain (and in practice, not a top level domain) or an +// unmanaged top level domain (and not explicitly mentioned in the +// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN +// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and +// "cromulent" is an unmanaged top level domain. +// +// Use cases for distinguishing ICANN domains like "foo.com" from private +// domains like "foo.appspot.com" can be found at +// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases +func PublicSuffix(domain string) (publicSuffix string, icann bool) { + lo, hi := uint32(0), uint32(numTLD) + s, suffix, icannNode, wildcard := domain, len(domain), false, false +loop: + for { + dot := strings.LastIndex(s, ".") + if wildcard { + icann = icannNode + suffix = 1 + dot + } + if lo == hi { + break + } + f := find(s[1+dot:], lo, hi) + if f == notFound { + break + } + + u := uint32(nodes.get(f) >> (nodesBitsTextOffset + nodesBitsTextLength)) + icannNode = u&(1<>= nodesBitsICANN + u = children.get(u & (1<>= childrenBitsLo + hi = u & (1<>= childrenBitsHi + switch u & (1<>= childrenBitsNodeType + wildcard = u&(1<>= nodesBitsTextLength + offset := x & (1<= Go 1.11) and App Engine flexible: +// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the +// flexible environment. It delegates to ComputeTokenSource, and the provided +// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, +// which DefaultTokenSource will use in this case) instead. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + return appEngineTokenSource(ctx, scope...) +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go new file mode 100644 index 00000000000..e61587945b0 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -0,0 +1,77 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build appengine + +// This file applies to App Engine first generation runtimes (<= Go 1.9). + +package google + +import ( + "context" + "sort" + "strings" + "sync" + + "golang.org/x/oauth2" + "google.golang.org/appengine" +) + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &gaeTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type gaeTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go new file mode 100644 index 00000000000..9c79aa0a0cc --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !appengine + +// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. + +package google + +import ( + "context" + "log" + "sync" + + "golang.org/x/oauth2" +) + +var logOnce sync.Once // only spam about deprecation once + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 00000000000..18f369851bf --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,327 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "path/filepath" + "runtime" + "sync" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" + "golang.org/x/oauth2/authhandler" +) + +const ( + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + defaultUniverseDomain = "googleapis.com" +) + +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +// Credentials from external accounts (workload identity federation) are used to +// identify a particular application from an on-prem or non-Google Cloud platform +// including Amazon Web Services (AWS), Microsoft Azure or any identity provider +// that supports OpenID Connect (OIDC). +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte + + udMu sync.Mutex // guards universeDomain + // universeDomain is the default service domain for a given Cloud universe. + universeDomain string +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// +// The default value is "googleapis.com". +// +// Deprecated: Use instead (*Credentials).GetUniverseDomain(), which supports +// obtaining the universe domain when authenticating via the GCE metadata server. +// Unlike GetUniverseDomain, this method, UniverseDomain, will always return the +// default value when authenticating via the GCE metadata server. +// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). +func (c *Credentials) UniverseDomain() string { + if c.universeDomain == "" { + return defaultUniverseDomain + } + return c.universeDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe. +// +// The default value is "googleapis.com". +// +// It obtains the universe domain from the attached service account on GCE when +// authenticating via the GCE metadata server. See also [The attached service +// account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa). +// If the GCE metadata server returns a 404 error, the default value is +// returned. If the GCE metadata server returns an error other than 404, the +// error is returned. +func (c *Credentials) GetUniverseDomain() (string, error) { + c.udMu.Lock() + defer c.udMu.Unlock() + if c.universeDomain == "" && metadata.OnGCE() { + // If we're on Google Compute Engine, an App Engine standard second + // generation runtime, or App Engine flexible, use the metadata server. + err := c.computeUniverseDomain() + if err != nil { + return "", err + } + } + // If not on Google Compute Engine, or in case of any non-error path in + // computeUniverseDomain that did not set universeDomain, set the default + // universe domain. + if c.universeDomain == "" { + c.universeDomain = defaultUniverseDomain + } + return c.universeDomain, nil +} + +// computeUniverseDomain fetches the default service domain for a given Cloud +// universe from Google Compute Engine (GCE)'s metadata server. It's only valid +// to use this method if your program is running on a GCE instance. +func (c *Credentials) computeUniverseDomain() error { + var err error + c.universeDomain, err = metadata.Get("universe/universe_domain") + if err != nil { + if _, ok := err.(metadata.NotDefinedError); ok { + // http.StatusNotFound (404) + c.universeDomain = defaultUniverseDomain + return nil + } else { + return err + } + } + return nil +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + +// CredentialsParams holds user supplied parameters that are used together +// with a credentials file for building a Credentials object. +type CredentialsParams struct { + // Scopes is the list OAuth scopes. Required. + // Example: https://www.googleapis.com/auth/cloud-platform + Scopes []string + + // Subject is the user email used for domain wide delegation (see + // https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority). + // Optional. + Subject string + + // AuthHandler is the AuthorizationHandler used for 3-legged OAuth flow. Required for 3LO flow. + AuthHandler authhandler.AuthorizationHandler + + // State is a unique string used with AuthHandler. Required for 3LO flow. + State string + + // PKCE is used to support PKCE flow. Optional for 3LO flow. + PKCE *authhandler.PKCEParams + + // The OAuth2 TokenURL default override. This value overrides the default TokenURL, + // unless explicitly specified by the credentials config file. Optional. + TokenURL string + + // EarlyTokenRefresh is the amount of time before a token expires that a new + // token will be preemptively fetched. If unset the default value is 10 + // seconds. + // + // Note: This option is currently only respected when using credentials + // fetched from the GCE metadata server. + EarlyTokenRefresh time.Duration + + // UniverseDomain is the default service domain for a given Cloud universe. + // Only supported in authentication flows that support universe domains. + // This value takes precedence over a universe domain explicitly specified + // in a credentials config file or by the GCE metadata server. Optional. + UniverseDomain string +} + +func (params CredentialsParams) deepCopy() CredentialsParams { + paramsCopy := params + paramsCopy.Scopes = make([]string, len(params.Scopes)) + copy(paramsCopy.Scopes, params.Scopes) + return paramsCopy +} + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentialsWithParams searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// For workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation on +// how to generate the JSON configuration file for on-prem/non-Google cloud +// platforms. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsParams) (*Credentials, error) { + // Make defensive copy of the slices in params. + params = params.deepCopy() + + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, params) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if b, err := os.ReadFile(filename); err == nil { + return CredentialsFromJSONWithParams(ctx, b, params) + } + + // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) + // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) + // and App Engine flexible use ComputeTokenSource and the metadata server. + if appengineTokenFunc != nil { + return &Credentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, params.Scopes...), + }, nil + } + + // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // or App Engine flexible, use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &Credentials{ + ProjectID: id, + TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...), + universeDomain: params.UniverseDomain, + }, nil + } + + // None are found; return helpful error. + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information", adcSetupURL) +} + +// FindDefaultCredentials invokes FindDefaultCredentialsWithParams with the specified scopes. +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + var params CredentialsParams + params.Scopes = scopes + return FindDefaultCredentialsWithParams(ctx, params) +} + +// CredentialsFromJSONWithParams obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in ConfigFromJSON), +// a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh +// token JSON), or the JSON configuration file for workload identity federation in non-Google cloud +// platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). +func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) { + // Make defensive copy of the slices in params. + params = params.deepCopy() + + // First, attempt to parse jsonData as a Google Developers Console client_credentials.json. + config, _ := ConfigFromJSON(jsonData, params.Scopes...) + if config != nil { + return &Credentials{ + ProjectID: "", + TokenSource: authhandler.TokenSourceWithPKCE(ctx, config, params.State, params.AuthHandler, params.PKCE), + JSON: jsonData, + }, nil + } + + // Otherwise, parse jsonData as one of the other supported credentials files. + var f credentialsFile + if err := json.Unmarshal(jsonData, &f); err != nil { + return nil, err + } + + universeDomain := f.UniverseDomain + if params.UniverseDomain != "" { + universeDomain = params.UniverseDomain + } + // Authorized user credentials are only supported in the googleapis.com universe. + if f.Type == userCredentialsKey { + universeDomain = defaultUniverseDomain + } + + ts, err := f.tokenSource(ctx, params) + if err != nil { + return nil, err + } + ts = newErrWrappingTokenSource(ts) + return &Credentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + universeDomain: universeDomain, + }, nil +} + +// CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes. +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + var params CredentialsParams + params.Scopes = scopes + return CredentialsFromJSONWithParams(ctx, jsonData, params) +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, params CredentialsParams) (*Credentials, error) { + b, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + return CredentialsFromJSONWithParams(ctx, b, params) +} diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go new file mode 100644 index 00000000000..830d268c1e4 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -0,0 +1,53 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, +// Google App Engine service accounts and workload identity federation +// from non-Google cloud platforms. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// For more information on using workload identity federation, refer to +// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation. +// +// # OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// # Workload and Workforce Identity Federation +// +// For information on how to use Workload and Workforce Identity Federation, see [golang.org/x/oauth2/google/externalaccount]. +// +// # Credentials +// +// The Credentials type represents Google credentials, including Application Default +// Credentials. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// Application Default Credentials also support workload identity federation to +// access Google Cloud resources from non-Google Cloud platforms including Amazon +// Web Services (AWS), Microsoft Azure or any identity provider that supports +// OpenID Connect (OIDC). Workload identity federation is recommended for +// non-Google Cloud environments as it avoids the need to download, manage and +// store service account private keys locally. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats +// described in OAuth2 Configs, above. The TokenSource in the returned value is the +// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or +// JWTConfigFromJSON, but the Credentials may contain additional information +// that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/error.go b/vendor/golang.org/x/oauth2/google/error.go new file mode 100644 index 00000000000..d84dd004731 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/error.go @@ -0,0 +1,64 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "errors" + + "golang.org/x/oauth2" +) + +// AuthenticationError indicates there was an error in the authentication flow. +// +// Use (*AuthenticationError).Temporary to check if the error can be retried. +type AuthenticationError struct { + err *oauth2.RetrieveError +} + +func newAuthenticationError(err error) error { + re := &oauth2.RetrieveError{} + if !errors.As(err, &re) { + return err + } + return &AuthenticationError{ + err: re, + } +} + +// Temporary indicates that the network error has one of the following status codes and may be retried: 500, 503, 408, or 429. +func (e *AuthenticationError) Temporary() bool { + if e.err.Response == nil { + return false + } + sc := e.err.Response.StatusCode + return sc == 500 || sc == 503 || sc == 408 || sc == 429 +} + +func (e *AuthenticationError) Error() string { + return e.err.Error() +} + +func (e *AuthenticationError) Unwrap() error { + return e.err +} + +type errWrappingTokenSource struct { + src oauth2.TokenSource +} + +func newErrWrappingTokenSource(ts oauth2.TokenSource) oauth2.TokenSource { + return &errWrappingTokenSource{src: ts} +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *errWrappingTokenSource) Token() (*oauth2.Token, error) { + t, err := s.src.Token() + if err != nil { + return nil, newAuthenticationError(err) + } + return t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go new file mode 100644 index 00000000000..da61d0c0e84 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/aws.go @@ -0,0 +1,578 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "golang.org/x/oauth2" +) + +// AwsSecurityCredentials models AWS security credentials. +type AwsSecurityCredentials struct { + // AccessKeyId is the AWS Access Key ID - Required. + AccessKeyID string `json:"AccessKeyID"` + // SecretAccessKey is the AWS Secret Access Key - Required. + SecretAccessKey string `json:"SecretAccessKey"` + // SessionToken is the AWS Session token. This should be provided for temporary AWS security credentials - Optional. + SessionToken string `json:"Token"` +} + +// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature. +type awsRequestSigner struct { + RegionName string + AwsSecurityCredentials *AwsSecurityCredentials +} + +// getenv aliases os.Getenv for testing +var getenv = os.Getenv + +const ( + defaultRegionalCredentialVerificationUrl = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" + + // AWS Signature Version 4 signing algorithm identifier. + awsAlgorithm = "AWS4-HMAC-SHA256" + + // The termination string for the AWS credential scope value as defined in + // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html + awsRequestType = "aws4_request" + + // The AWS authorization header name for the security session token if available. + awsSecurityTokenHeader = "x-amz-security-token" + + // The name of the header containing the session token for metadata endpoint calls + awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token" + + awsIMDSv2SessionTtlHeader = "X-aws-ec2-metadata-token-ttl-seconds" + + awsIMDSv2SessionTtl = "300" + + // The AWS authorization header name for the auto-generated date. + awsDateHeader = "x-amz-date" + + // Supported AWS configuration environment variables. + awsAccessKeyId = "AWS_ACCESS_KEY_ID" + awsDefaultRegion = "AWS_DEFAULT_REGION" + awsRegion = "AWS_REGION" + awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + awsSessionToken = "AWS_SESSION_TOKEN" + + awsTimeFormatLong = "20060102T150405Z" + awsTimeFormatShort = "20060102" +) + +func getSha256(input []byte) (string, error) { + hash := sha256.New() + if _, err := hash.Write(input); err != nil { + return "", err + } + return hex.EncodeToString(hash.Sum(nil)), nil +} + +func getHmacSha256(key, input []byte) ([]byte, error) { + hash := hmac.New(sha256.New, key) + if _, err := hash.Write(input); err != nil { + return nil, err + } + return hash.Sum(nil), nil +} + +func cloneRequest(r *http.Request) *http.Request { + r2 := new(http.Request) + *r2 = *r + if r.Header != nil { + r2.Header = make(http.Header, len(r.Header)) + + // Find total number of values. + headerCount := 0 + for _, headerValues := range r.Header { + headerCount += len(headerValues) + } + copiedHeaders := make([]string, headerCount) // shared backing array for headers' values + + for headerKey, headerValues := range r.Header { + headerCount = copy(copiedHeaders, headerValues) + r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount] + copiedHeaders = copiedHeaders[headerCount:] + } + } + return r2 +} + +func canonicalPath(req *http.Request) string { + result := req.URL.EscapedPath() + if result == "" { + return "/" + } + return path.Clean(result) +} + +func canonicalQuery(req *http.Request) string { + queryValues := req.URL.Query() + for queryKey := range queryValues { + sort.Strings(queryValues[queryKey]) + } + return queryValues.Encode() +} + +func canonicalHeaders(req *http.Request) (string, string) { + // Header keys need to be sorted alphabetically. + var headers []string + lowerCaseHeaders := make(http.Header) + for k, v := range req.Header { + k := strings.ToLower(k) + if _, ok := lowerCaseHeaders[k]; ok { + // include additional values + lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...) + } else { + headers = append(headers, k) + lowerCaseHeaders[k] = v + } + } + sort.Strings(headers) + + var fullHeaders bytes.Buffer + for _, header := range headers { + headerValue := strings.Join(lowerCaseHeaders[header], ",") + fullHeaders.WriteString(header) + fullHeaders.WriteRune(':') + fullHeaders.WriteString(headerValue) + fullHeaders.WriteRune('\n') + } + + return strings.Join(headers, ";"), fullHeaders.String() +} + +func requestDataHash(req *http.Request) (string, error) { + var requestData []byte + if req.Body != nil { + requestBody, err := req.GetBody() + if err != nil { + return "", err + } + defer requestBody.Close() + + requestData, err = ioutil.ReadAll(io.LimitReader(requestBody, 1<<20)) + if err != nil { + return "", err + } + } + + return getSha256(requestData) +} + +func requestHost(req *http.Request) string { + if req.Host != "" { + return req.Host + } + return req.URL.Host +} + +func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) { + dataHash, err := requestDataHash(req) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil +} + +// SignRequest adds the appropriate headers to an http.Request +// or returns an error if something prevented this. +func (rs *awsRequestSigner) SignRequest(req *http.Request) error { + signedRequest := cloneRequest(req) + timestamp := now() + + signedRequest.Header.Add("host", requestHost(req)) + + if rs.AwsSecurityCredentials.SessionToken != "" { + signedRequest.Header.Add(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken) + } + + if signedRequest.Header.Get("date") == "" { + signedRequest.Header.Add(awsDateHeader, timestamp.Format(awsTimeFormatLong)) + } + + authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp) + if err != nil { + return err + } + signedRequest.Header.Set("Authorization", authorizationCode) + + req.Header = signedRequest.Header + return nil +} + +func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) { + canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req) + + dateStamp := timestamp.Format(awsTimeFormatShort) + serviceName := "" + if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 { + serviceName = splitHost[0] + } + + credentialScope := fmt.Sprintf("%s/%s/%s/%s", dateStamp, rs.RegionName, serviceName, awsRequestType) + + requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData) + if err != nil { + return "", err + } + requestHash, err := getSha256([]byte(requestString)) + if err != nil { + return "", err + } + + stringToSign := fmt.Sprintf("%s\n%s\n%s\n%s", awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash) + + signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey) + for _, signingInput := range []string{ + dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign, + } { + signingKey, err = getHmacSha256(signingKey, []byte(signingInput)) + if err != nil { + return "", err + } + } + + return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil +} + +type awsCredentialSource struct { + environmentID string + regionURL string + regionalCredVerificationURL string + credVerificationURL string + imdsv2SessionTokenURL string + targetResource string + requestSigner *awsRequestSigner + region string + ctx context.Context + client *http.Client + awsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + supplierOptions SupplierOptions +} + +type awsRequestHeader struct { + Key string `json:"key"` + Value string `json:"value"` +} + +type awsRequest struct { + URL string `json:"url"` + Method string `json:"method"` + Headers []awsRequestHeader `json:"headers"` +} + +func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { + if cs.client == nil { + cs.client = oauth2.NewClient(cs.ctx, nil) + } + return cs.client.Do(req.WithContext(cs.ctx)) +} + +func canRetrieveRegionFromEnvironment() bool { + // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is + // required. + return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" +} + +func canRetrieveSecurityCredentialFromEnvironment() bool { + // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. + return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" +} + +func (cs awsCredentialSource) shouldUseMetadataServer() bool { + return cs.awsSecurityCredentialsSupplier == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment()) +} + +func (cs awsCredentialSource) credentialSourceType() string { + if cs.awsSecurityCredentialsSupplier != nil { + return "programmatic" + } + return "aws" +} + +func (cs awsCredentialSource) subjectToken() (string, error) { + // Set Defaults + if cs.regionalCredVerificationURL == "" { + cs.regionalCredVerificationURL = defaultRegionalCredentialVerificationUrl + } + if cs.requestSigner == nil { + headers := make(map[string]string) + if cs.shouldUseMetadataServer() { + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } + } + + awsSecurityCredentials, err := cs.getSecurityCredentials(headers) + if err != nil { + return "", err + } + cs.region, err = cs.getRegion(headers) + if err != nil { + return "", err + } + + cs.requestSigner = &awsRequestSigner{ + RegionName: cs.region, + AwsSecurityCredentials: awsSecurityCredentials, + } + } + + // Generate the signed request to AWS STS GetCallerIdentity API. + // Use the required regional endpoint. Otherwise, the request will fail. + req, err := http.NewRequest("POST", strings.Replace(cs.regionalCredVerificationURL, "{region}", cs.region, 1), nil) + if err != nil { + return "", err + } + // The full, canonical resource name of the workload identity pool + // provider, with or without the HTTPS prefix. + // Including this header as part of the signature is recommended to + // ensure data integrity. + if cs.targetResource != "" { + req.Header.Add("x-goog-cloud-target-resource", cs.targetResource) + } + cs.requestSigner.SignRequest(req) + + /* + The GCP STS endpoint expects the headers to be formatted as: + # [ + # {key: 'x-amz-date', value: '...'}, + # {key: 'Authorization', value: '...'}, + # ... + # ] + # And then serialized as: + # quote(json.dumps({ + # url: '...', + # method: 'POST', + # headers: [{key: 'x-amz-date', value: '...'}, ...] + # })) + */ + + awsSignedReq := awsRequest{ + URL: req.URL.String(), + Method: "POST", + } + for headerKey, headerList := range req.Header { + for _, headerValue := range headerList { + awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{ + Key: headerKey, + Value: headerValue, + }) + } + } + sort.Slice(awsSignedReq.Headers, func(i, j int) bool { + headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key) + if headerCompare == 0 { + return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0 + } + return headerCompare < 0 + }) + + result, err := json.Marshal(awsSignedReq) + if err != nil { + return "", err + } + return url.QueryEscape(string(result)), nil +} + +func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { + if cs.imdsv2SessionTokenURL == "" { + return "", nil + } + + req, err := http.NewRequest("PUT", cs.imdsv2SessionTokenURL, nil) + if err != nil { + return "", err + } + + req.Header.Add(awsIMDSv2SessionTtlHeader, awsIMDSv2SessionTtl) + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS session token - %s", string(respBody)) + } + + return string(respBody), nil +} + +func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsRegion(cs.ctx, cs.supplierOptions) + } + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + cs.region = envAwsRegion + return envAwsRegion, nil + } + return getenv("AWS_DEFAULT_REGION"), nil + } + + if cs.regionURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine AWS region") + } + + req, err := http.NewRequest("GET", cs.regionURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS region - %s", string(respBody)) + } + + // This endpoint will return the region in format: us-east-2b. + // Only the us-east-2 part should be used. + respBodyEnd := 0 + if len(respBody) > 1 { + respBodyEnd = len(respBody) - 1 + } + return string(respBody[:respBodyEnd]), nil +} + +func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result *AwsSecurityCredentials, err error) { + if cs.awsSecurityCredentialsSupplier != nil { + return cs.awsSecurityCredentialsSupplier.AwsSecurityCredentials(cs.ctx, cs.supplierOptions) + } + if canRetrieveSecurityCredentialFromEnvironment() { + return &AwsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyId), + SecretAccessKey: getenv(awsSecretAccessKey), + SessionToken: getenv(awsSessionToken), + }, nil + } + + roleName, err := cs.getMetadataRoleName(headers) + if err != nil { + return + } + + credentials, err := cs.getMetadataSecurityCredentials(roleName, headers) + if err != nil { + return + } + + if credentials.AccessKeyID == "" { + return result, errors.New("oauth2/google/externalaccount: missing AccessKeyId credential") + } + + if credentials.SecretAccessKey == "" { + return result, errors.New("oauth2/google/externalaccount: missing SecretAccessKey credential") + } + + return &credentials, nil +} + +func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (AwsSecurityCredentials, error) { + var result AwsSecurityCredentials + + req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.credVerificationURL, roleName), nil) + if err != nil { + return result, err + } + req.Header.Add("Content-Type", "application/json") + + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := cs.doRequest(req) + if err != nil { + return result, err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return result, err + } + + if resp.StatusCode != 200 { + return result, fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS security credentials - %s", string(respBody)) + } + + err = json.Unmarshal(respBody, &result) + return result, err +} + +func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) { + if cs.credVerificationURL == "" { + return "", errors.New("oauth2/google/externalaccount: unable to determine the AWS metadata server security credentials endpoint") + } + + req, err := http.NewRequest("GET", cs.credVerificationURL, nil) + if err != nil { + return "", err + } + + for name, value := range headers { + req.Header.Add(name, value) + } + + resp, err := cs.doRequest(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("oauth2/google/externalaccount: unable to retrieve AWS role name - %s", string(respBody)) + } + + return string(respBody), nil +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go new file mode 100644 index 00000000000..400aa0a072a --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -0,0 +1,484 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package externalaccount provides support for creating workload identity +federation and workforce identity federation token sources that can be +used to access Google Cloud resources from external identity providers. + +# Workload Identity Federation + +Using workload identity federation, your application can access Google Cloud +resources from Amazon Web Services (AWS), Microsoft Azure or any identity +provider that supports OpenID Connect (OIDC) or SAML 2.0. +Traditionally, applications running outside Google Cloud have used service +account keys to access Google Cloud resources. Using identity federation, +you can allow your workload to impersonate a service account. +This lets you access Google Cloud resources directly, eliminating the +maintenance and security burden associated with service account keys. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Amazon Web Services (AWS): https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#aws +Microsoft Azure: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-clouds#azure +OIDC identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#saml + +For OIDC and SAML providers, the library can retrieve tokens in fours ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user defined function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workload-identity-federation-with-other-providers#create_a_credential_configuration + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers, +or one that implements [AwsSecurityCredentialsSupplier] for AWS providers. This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used to access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. + +# Workforce Identity Federation + +Workforce identity federation lets you use an external identity provider (IdP) to +authenticate and authorize a workforce—a group of users, such as employees, partners, +and contractors—using IAM, so that the users can access Google Cloud services. +Workforce identity federation extends Google Cloud's identity capabilities to support +syncless, attribute-based single sign on. + +With workforce identity federation, your workforce can access Google Cloud resources +using an external identity provider (IdP) that supports OpenID Connect (OIDC) or +SAML 2.0 such as Azure Active Directory (Azure AD), Active Directory Federation +Services (AD FS), Okta, and others. + +Follow the detailed instructions on how to configure Workload Identity Federation +in various platforms: + +Azure AD: https://cloud.google.com/iam/docs/workforce-sign-in-azure-ad +Okta: https://cloud.google.com/iam/docs/workforce-sign-in-okta +OIDC identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#oidc +SAML 2.0 identity provider: https://cloud.google.com/iam/docs/configuring-workforce-identity-federation#saml + +For workforce identity federation, the library can retrieve tokens in four ways: +from a local file location (file-sourced credentials), from a server +(URL-sourced credentials), from a local executable (executable-sourced +credentials), or from a user supplied function that returns an OIDC or SAML token. +For file-sourced credentials, a background process needs to be continuously +refreshing the file location with a new OIDC/SAML token prior to expiration. +For tokens with one hour lifetimes, the token needs to be updated in the file +every hour. The token can be stored directly as plain text or in JSON format. +For URL-sourced credentials, a local server needs to host a GET endpoint to +return the OIDC/SAML token. The response can be in plain text or JSON. +Additional required request headers can also be specified. +For executable-sourced credentials, an application needs to be available to +output the OIDC/SAML token and other information in a JSON format. +For more information on how these work (and how to implement +executable-sourced credentials), please check out: +https://cloud.google.com/iam/docs/workforce-obtaining-short-lived-credentials#generate_a_configuration_file_for_non-interactive_sign-in + +To use a custom function to supply the token, define a struct that implements the [SubjectTokenSupplier] interface for OIDC/SAML providers. +This can then be used when building a [Config]. +The [golang.org/x/oauth2.TokenSource] created from the config using [NewTokenSource] can then be used access Google +Cloud resources. For instance, you can create a new client from the +[cloud.google.com/go/storage] package and pass in option.WithTokenSource(yourTokenSource)) + +# Security considerations + +Note that this library does not perform any validation on the token_url, token_info_url, +or service_account_impersonation_url fields of the credential configuration. +It is not recommended to use a credential configuration that you did not generate with +the gcloud CLI unless you verify that the URL fields point to a googleapis.com domain. +*/ +package externalaccount + +import ( + "context" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/impersonate" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +const ( + universeDomainPlaceholder = "UNIVERSE_DOMAIN" + defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token" + defaultUniverseDomain = "googleapis.com" +) + +// now aliases time.Now for testing +var now = func() time.Time { + return time.Now().UTC() +} + +// Config stores the configuration for fetching tokens with external credentials. +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload + // identity pool or the workforce pool and the provider identifier in that pool. Required. + Audience string + // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec. + // Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + // Required. + SubjectTokenType string + // TokenURL is the STS token exchange endpoint. If not provided, will default to + // https://sts.UNIVERSE_DOMAIN/v1/token, with UNIVERSE_DOMAIN set to the + // default service domain googleapis.com unless UniverseDomain is set. + // Optional. + TokenURL string + // TokenInfoURL is the token_info endpoint used to retrieve the account related information ( + // user attributes like account identifier, eg. email, username, uid, etc). This is + // needed for gCloud session account identification. Optional. + TokenInfoURL string + // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only + // required for workload identity pools when APIs to be accessed have not integrated with UberMint. Optional. + ServiceAccountImpersonationURL string + // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation + // token will be valid for. If not provided, it will default to 3600. Optional. + ServiceAccountImpersonationLifetimeSeconds int + // ClientSecret is currently only required if token_info endpoint also + // needs to be called with the generated GCP access token. When provided, STS will be + // called with additional basic authentication using ClientId as username and ClientSecret as password. Optional. + ClientSecret string + // ClientID is only required in conjunction with ClientSecret, as described above. Optional. + ClientID string + // CredentialSource contains the necessary information to retrieve the token itself, as well + // as some environmental information. One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or + // CredentialSource must be provided. Optional. + CredentialSource *CredentialSource + // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries + // will set the x-goog-user-project header which overrides the project associated with the credentials. Optional. + QuotaProjectID string + // Scopes contains the desired scopes for the returned access token. Optional. + Scopes []string + // WorkforcePoolUserProject is the workforce pool user project number when the credential + // corresponds to a workforce pool and not a workload identity pool. + // The underlying principal must still have serviceusage.services.use IAM + // permission to use the project for billing/quota. Optional. + WorkforcePoolUserProject string + // SubjectTokenSupplier is an optional token supplier for OIDC/SAML credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + SubjectTokenSupplier SubjectTokenSupplier + // AwsSecurityCredentialsSupplier is an AWS Security Credential supplier for AWS credentials. + // One of SubjectTokenSupplier, AWSSecurityCredentialSupplier or CredentialSource must be provided. Optional. + AwsSecurityCredentialsSupplier AwsSecurityCredentialsSupplier + // UniverseDomain is the default service domain for a given Cloud universe. + // This value will be used in the default STS token URL. The default value + // is "googleapis.com". It will not be used if TokenURL is set. Optional. + UniverseDomain string +} + +var ( + validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) +) + +func validateWorkforceAudience(input string) bool { + return validWorkforceAudiencePattern.MatchString(input) +} + +// NewTokenSource Returns an external account TokenSource using the provided external account config. +func NewTokenSource(ctx context.Context, conf Config) (oauth2.TokenSource, error) { + if conf.Audience == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Audience must be set") + } + if conf.SubjectTokenType == "" { + return nil, fmt.Errorf("oauth2/google/externalaccount: Subject token type must be set") + } + if conf.WorkforcePoolUserProject != "" { + valid := validateWorkforceAudience(conf.Audience) + if !valid { + return nil, fmt.Errorf("oauth2/google/externalaccount: Workforce pool user project should not be set for non-workforce pool credentials") + } + } + count := 0 + if conf.CredentialSource != nil { + count++ + } + if conf.SubjectTokenSupplier != nil { + count++ + } + if conf.AwsSecurityCredentialsSupplier != nil { + count++ + } + if count == 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: One of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + if count > 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: Only one of CredentialSource, SubjectTokenSupplier, or AwsSecurityCredentialsSupplier must be set") + } + return conf.tokenSource(ctx, "https") +} + +// tokenSource is a private function that's directly called by some of the tests, +// because the unit test URLs are mocked, and would otherwise fail the +// validity check. +func (c *Config) tokenSource(ctx context.Context, scheme string) (oauth2.TokenSource, error) { + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + if c.ServiceAccountImpersonationURL == "" { + return oauth2.ReuseTokenSource(nil, ts), nil + } + scopes := c.Scopes + ts.conf.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"} + imp := impersonate.ImpersonateTokenSource{ + Ctx: ctx, + URL: c.ServiceAccountImpersonationURL, + Scopes: scopes, + Ts: oauth2.ReuseTokenSource(nil, ts), + TokenLifetimeSeconds: c.ServiceAccountImpersonationLifetimeSeconds, + } + return oauth2.ReuseTokenSource(nil, imp), nil +} + +// Subject token file types. +const ( + fileTypeText = "text" + fileTypeJSON = "json" +) + +// Format contains information needed to retireve a subject token for URL or File sourced credentials. +type Format struct { + // Type should be either "text" or "json". This determines whether the file or URL sourced credentials + // expect a simple text subject token or if the subject token will be contained in a JSON object. + // When not provided "text" type is assumed. + Type string `json:"type"` + // SubjectTokenFieldName is only required for JSON format. This is the field name that the credentials will check + // for the subject token in the file or URL response. This would be "access_token" for azure. + SubjectTokenFieldName string `json:"subject_token_field_name"` +} + +// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange. +type CredentialSource struct { + // File is the location for file sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + File string `json:"file"` + + // Url is the URL to call for URL sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + URL string `json:"url"` + // Headers are the headers to attach to the request for URL sourced credentials. + Headers map[string]string `json:"headers"` + + // Executable is the configuration object for executable sourced credentials. + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + Executable *ExecutableConfig `json:"executable"` + + // EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS". + // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + EnvironmentID string `json:"environment_id"` + // RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials. + RegionURL string `json:"region_url"` + // RegionalCredVerificationURL is the AWS regional credential verification URL, will default to + // "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15" if not provided." + RegionalCredVerificationURL string `json:"regional_cred_verification_url"` + // IMDSv2SessionTokenURL is the URL to retrieve the session token when using IMDSv2 in AWS. + IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"` + // Format is the format type for the subject token. Used for File and URL sourced credentials. Expected values are "text" or "json". + Format Format `json:"format"` +} + +// ExecutableConfig contains information needed for executable sourced credentials. +type ExecutableConfig struct { + // Command is the the full command to run to retrieve the subject token. + // This can include arguments. Must be an absolute path for the program. Required. + Command string `json:"command"` + // TimeoutMillis is the timeout duration, in milliseconds. Defaults to 30000 milliseconds when not provided. Optional. + TimeoutMillis *int `json:"timeout_millis"` + // OutputFile is the absolute path to the output file where the executable will cache the response. + // If specified the auth libraries will first check this location before running the executable. Optional. + OutputFile string `json:"output_file"` +} + +// SubjectTokenSupplier can be used to supply a subject token to exchange for a GCP access token. +type SubjectTokenSupplier interface { + // SubjectToken should return a valid subject token or an error. + // The external account token source does not cache the returned subject token, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same subject token. + SubjectToken(ctx context.Context, options SupplierOptions) (string, error) +} + +// AWSSecurityCredentialsSupplier can be used to supply AwsSecurityCredentials and an AWS Region to +// exchange for a GCP access token. +type AwsSecurityCredentialsSupplier interface { + // AwsRegion should return the AWS region or an error. + AwsRegion(ctx context.Context, options SupplierOptions) (string, error) + // GetAwsSecurityCredentials should return a valid set of AwsSecurityCredentials or an error. + // The external account token source does not cache the returned security credentials, so caching + // logic should be implemented in the supplier to prevent multiple requests for the same security credentials. + AwsSecurityCredentials(ctx context.Context, options SupplierOptions) (*AwsSecurityCredentials, error) +} + +// SupplierOptions contains information about the requested subject token or AWS security credentials from the +// Google external account credential. +type SupplierOptions struct { + // Audience is the requested audience for the external account credential. + Audience string + // Subject token type is the requested subject token type for the external account credential. Expected values include: + // “urn:ietf:params:oauth:token-type:jwt” + // “urn:ietf:params:oauth:token-type:id-token” + // “urn:ietf:params:oauth:token-type:saml2” + // “urn:ietf:params:aws:token-type:aws4_request” + SubjectTokenType string +} + +// tokenURL returns the default STS token endpoint with the configured universe +// domain. +func (c *Config) tokenURL() string { + if c.UniverseDomain == "" { + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1) + } + return strings.Replace(defaultTokenURL, universeDomainPlaceholder, c.UniverseDomain, 1) +} + +// parse determines the type of CredentialSource needed. +func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { + //set Defaults + if c.TokenURL == "" { + c.TokenURL = c.tokenURL() + } + supplierOptions := SupplierOptions{Audience: c.Audience, SubjectTokenType: c.SubjectTokenType} + + if c.AwsSecurityCredentialsSupplier != nil { + awsCredSource := awsCredentialSource{ + awsSecurityCredentialsSupplier: c.AwsSecurityCredentialsSupplier, + targetResource: c.Audience, + supplierOptions: supplierOptions, + ctx: ctx, + } + return awsCredSource, nil + } else if c.SubjectTokenSupplier != nil { + return programmaticRefreshCredentialSource{subjectTokenSupplier: c.SubjectTokenSupplier, supplierOptions: supplierOptions, ctx: ctx}, nil + } else if len(c.CredentialSource.EnvironmentID) > 3 && c.CredentialSource.EnvironmentID[:3] == "aws" { + if awsVersion, err := strconv.Atoi(c.CredentialSource.EnvironmentID[3:]); err == nil { + if awsVersion != 1 { + return nil, fmt.Errorf("oauth2/google/externalaccount: aws version '%d' is not supported in the current build", awsVersion) + } + + awsCredSource := awsCredentialSource{ + environmentID: c.CredentialSource.EnvironmentID, + regionURL: c.CredentialSource.RegionURL, + regionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL, + credVerificationURL: c.CredentialSource.URL, + targetResource: c.Audience, + ctx: ctx, + } + if c.CredentialSource.IMDSv2SessionTokenURL != "" { + awsCredSource.imdsv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL + } + + return awsCredSource, nil + } + } else if c.CredentialSource.File != "" { + return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil + } else if c.CredentialSource.URL != "" { + return urlCredentialSource{URL: c.CredentialSource.URL, Headers: c.CredentialSource.Headers, Format: c.CredentialSource.Format, ctx: ctx}, nil + } else if c.CredentialSource.Executable != nil { + return createExecutableCredential(ctx, c.CredentialSource.Executable, c) + } + return nil, fmt.Errorf("oauth2/google/externalaccount: unable to parse credential source") +} + +type baseCredentialSource interface { + credentialSourceType() string + subjectToken() (string, error) +} + +// tokenSource is the source that handles external credentials. It is used to retrieve Tokens. +type tokenSource struct { + ctx context.Context + conf *Config +} + +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + +// Token allows tokenSource to conform to the oauth2.TokenSource interface. +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + + credSource, err := conf.parse(ts.ctx) + if err != nil { + return nil, err + } + subjectToken, err := credSource.subjectToken() + + if err != nil { + return nil, err + } + stsRequest := stsexchange.TokenExchangeRequest{ + GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", + Audience: conf.Audience, + Scope: conf.Scopes, + RequestedTokenType: "urn:ietf:params:oauth:token-type:access_token", + SubjectToken: subjectToken, + SubjectTokenType: conf.SubjectTokenType, + } + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + var options map[string]interface{} + // Do not pass workforce_pool_user_project when client authentication is used. + // The client ID is sufficient for determining the user project. + if conf.WorkforcePoolUserProject != "" && conf.ClientID == "" { + options = map[string]interface{}{ + "userProject": conf.WorkforcePoolUserProject, + } + } + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + if err != nil { + return nil, err + } + + accessToken := &oauth2.Token{ + AccessToken: stsResp.AccessToken, + TokenType: stsResp.TokenType, + } + if stsResp.ExpiresIn < 0 { + return nil, fmt.Errorf("oauth2/google/externalaccount: got invalid expiry from security token service") + } else if stsResp.ExpiresIn >= 0 { + accessToken.Expiry = now().Add(time.Duration(stsResp.ExpiresIn) * time.Second) + } + + if stsResp.RefreshToken != "" { + accessToken.RefreshToken = stsResp.RefreshToken + } + return accessToken, nil +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go new file mode 100644 index 00000000000..dca5681a46b --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/executablecredsource.go @@ -0,0 +1,313 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strings" + "time" +) + +var serviceAccountImpersonationRE = regexp.MustCompile("https://iamcredentials\\..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken") + +const ( + executableSupportedMaxVersion = 1 + defaultTimeout = 30 * time.Second + timeoutMinimum = 5 * time.Second + timeoutMaximum = 120 * time.Second + executableSource = "response" + outputFileSource = "output file" +) + +type nonCacheableError struct { + message string +} + +func (nce nonCacheableError) Error() string { + return nce.message +} + +func missingFieldError(source, field string) error { + return fmt.Errorf("oauth2/google/externalaccount: %v missing `%q` field", source, field) +} + +func jsonParsingError(source, data string) error { + return fmt.Errorf("oauth2/google/externalaccount: unable to parse %v\nResponse: %v", source, data) +} + +func malformedFailureError() error { + return nonCacheableError{"oauth2/google/externalaccount: response must include `error` and `message` fields when unsuccessful"} +} + +func userDefinedError(code, message string) error { + return nonCacheableError{fmt.Sprintf("oauth2/google/externalaccount: response contains unsuccessful response: (%v) %v", code, message)} +} + +func unsupportedVersionError(source string, version int) error { + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported version: %v", source, version) +} + +func tokenExpiredError() error { + return nonCacheableError{"oauth2/google/externalaccount: the token returned by the executable is expired"} +} + +func tokenTypeError(source string) error { + return fmt.Errorf("oauth2/google/externalaccount: %v contains unsupported token type", source) +} + +func exitCodeError(exitCode int) error { + return fmt.Errorf("oauth2/google/externalaccount: executable command failed with exit code %v", exitCode) +} + +func executableError(err error) error { + return fmt.Errorf("oauth2/google/externalaccount: executable command failed: %v", err) +} + +func executablesDisallowedError() error { + return errors.New("oauth2/google/externalaccount: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run") +} + +func timeoutRangeError() error { + return errors.New("oauth2/google/externalaccount: invalid `timeout_millis` field — executable timeout must be between 5 and 120 seconds") +} + +func commandMissingError() error { + return errors.New("oauth2/google/externalaccount: missing `command` field — executable command must be provided") +} + +type environment interface { + existingEnv() []string + getenv(string) string + run(ctx context.Context, command string, env []string) ([]byte, error) + now() time.Time +} + +type runtimeEnvironment struct{} + +func (r runtimeEnvironment) existingEnv() []string { + return os.Environ() +} + +func (r runtimeEnvironment) getenv(key string) string { + return os.Getenv(key) +} + +func (r runtimeEnvironment) now() time.Time { + return time.Now().UTC() +} + +func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) { + splitCommand := strings.Fields(command) + cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...) + cmd.Env = env + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + if ctx.Err() == context.DeadlineExceeded { + return nil, context.DeadlineExceeded + } + + if exitError, ok := err.(*exec.ExitError); ok { + return nil, exitCodeError(exitError.ExitCode()) + } + + return nil, executableError(err) + } + + bytesStdout := bytes.TrimSpace(stdout.Bytes()) + if len(bytesStdout) > 0 { + return bytesStdout, nil + } + return bytes.TrimSpace(stderr.Bytes()), nil +} + +type executableCredentialSource struct { + Command string + Timeout time.Duration + OutputFile string + ctx context.Context + config *Config + env environment +} + +// CreateExecutableCredential creates an executableCredentialSource given an ExecutableConfig. +// It also performs defaulting and type conversions. +func createExecutableCredential(ctx context.Context, ec *ExecutableConfig, config *Config) (executableCredentialSource, error) { + if ec.Command == "" { + return executableCredentialSource{}, commandMissingError() + } + + result := executableCredentialSource{} + result.Command = ec.Command + if ec.TimeoutMillis == nil { + result.Timeout = defaultTimeout + } else { + result.Timeout = time.Duration(*ec.TimeoutMillis) * time.Millisecond + if result.Timeout < timeoutMinimum || result.Timeout > timeoutMaximum { + return executableCredentialSource{}, timeoutRangeError() + } + } + result.OutputFile = ec.OutputFile + result.ctx = ctx + result.config = config + result.env = runtimeEnvironment{} + return result, nil +} + +type executableResponse struct { + Version int `json:"version,omitempty"` + Success *bool `json:"success,omitempty"` + TokenType string `json:"token_type,omitempty"` + ExpirationTime int64 `json:"expiration_time,omitempty"` + IdToken string `json:"id_token,omitempty"` + SamlResponse string `json:"saml_response,omitempty"` + Code string `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) { + var result executableResponse + if err := json.Unmarshal(response, &result); err != nil { + return "", jsonParsingError(source, string(response)) + } + + if result.Version == 0 { + return "", missingFieldError(source, "version") + } + + if result.Success == nil { + return "", missingFieldError(source, "success") + } + + if !*result.Success { + if result.Code == "" || result.Message == "" { + return "", malformedFailureError() + } + return "", userDefinedError(result.Code, result.Message) + } + + if result.Version > executableSupportedMaxVersion || result.Version < 0 { + return "", unsupportedVersionError(source, result.Version) + } + + if result.ExpirationTime == 0 && cs.OutputFile != "" { + return "", missingFieldError(source, "expiration_time") + } + + if result.TokenType == "" { + return "", missingFieldError(source, "token_type") + } + + if result.ExpirationTime != 0 && result.ExpirationTime < now { + return "", tokenExpiredError() + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:jwt" || result.TokenType == "urn:ietf:params:oauth:token-type:id_token" { + if result.IdToken == "" { + return "", missingFieldError(source, "id_token") + } + return result.IdToken, nil + } + + if result.TokenType == "urn:ietf:params:oauth:token-type:saml2" { + if result.SamlResponse == "" { + return "", missingFieldError(source, "saml_response") + } + return result.SamlResponse, nil + } + + return "", tokenTypeError(source) +} + +func (cs executableCredentialSource) credentialSourceType() string { + return "executable" +} + +func (cs executableCredentialSource) subjectToken() (string, error) { + if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { + return token, err + } + + return cs.getTokenFromExecutableCommand() +} + +func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err error) { + if cs.OutputFile == "" { + // This ExecutableCredentialSource doesn't use an OutputFile. + return "", nil + } + + file, err := os.Open(cs.OutputFile) + if err != nil { + // No OutputFile found. Hasn't been created yet, so skip it. + return "", nil + } + defer file.Close() + + data, err := ioutil.ReadAll(io.LimitReader(file, 1<<20)) + if err != nil || len(data) == 0 { + // Cachefile exists, but no data found. Get new credential. + return "", nil + } + + token, err = cs.parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix()) + if err != nil { + if _, ok := err.(nonCacheableError); ok { + // If the cached token is expired we need a new token, + // and if the cache contains a failure, we need to try again. + return "", nil + } + + // There was an error in the cached token, and the developer should be aware of it. + return "", err + } + // Token parsing succeeded. Use found token. + return token, nil +} + +func (cs executableCredentialSource) executableEnvironment() []string { + result := cs.env.existingEnv() + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", cs.config.Audience)) + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", cs.config.SubjectTokenType)) + result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0") + if cs.config.ServiceAccountImpersonationURL != "" { + matches := serviceAccountImpersonationRE.FindStringSubmatch(cs.config.ServiceAccountImpersonationURL) + if matches != nil { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1])) + } + } + if cs.OutputFile != "" { + result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", cs.OutputFile)) + } + return result +} + +func (cs executableCredentialSource) getTokenFromExecutableCommand() (string, error) { + // For security reasons, we need our consumers to set this environment variable to allow executables to be run. + if cs.env.getenv("GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES") != "1" { + return "", executablesDisallowedError() + } + + ctx, cancel := context.WithDeadline(cs.ctx, cs.env.now().Add(cs.Timeout)) + defer cancel() + + output, err := cs.env.run(ctx, cs.Command, cs.executableEnvironment()) + if err != nil { + return "", err + } + return cs.parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix()) +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go new file mode 100644 index 00000000000..33766b97226 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/filecredsource.go @@ -0,0 +1,61 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" +) + +type fileCredentialSource struct { + File string + Format Format +} + +func (cs fileCredentialSource) credentialSourceType() string { + return "file" +} + +func (cs fileCredentialSource) subjectToken() (string, error) { + tokenFile, err := os.Open(cs.File) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to open credential file %q", cs.File) + } + defer tokenFile.Close() + tokenBytes, err := ioutil.ReadAll(io.LimitReader(tokenFile, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to read credential file: %v", err) + } + tokenBytes = bytes.TrimSpace(tokenBytes) + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(tokenBytes, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") + } + return token, nil + case "text": + return string(tokenBytes), nil + case "": + return string(tokenBytes), nil + default: + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/externalaccount/header.go new file mode 100644 index 00000000000..1d5aad2e2d9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/header.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go new file mode 100644 index 00000000000..6c1abdf2da4 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/programmaticrefreshcredsource.go @@ -0,0 +1,21 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import "context" + +type programmaticRefreshCredentialSource struct { + supplierOptions SupplierOptions + subjectTokenSupplier SubjectTokenSupplier + ctx context.Context +} + +func (cs programmaticRefreshCredentialSource) credentialSourceType() string { + return "programmatic" +} + +func (cs programmaticRefreshCredentialSource) subjectToken() (string, error) { + return cs.subjectTokenSupplier.SubjectToken(cs.ctx, cs.supplierOptions) +} diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go new file mode 100644 index 00000000000..71a7184e01a --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/externalaccount/urlcredsource.go @@ -0,0 +1,79 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "golang.org/x/oauth2" +) + +type urlCredentialSource struct { + URL string + Headers map[string]string + Format Format + ctx context.Context +} + +func (cs urlCredentialSource) credentialSourceType() string { + return "url" +} + +func (cs urlCredentialSource) subjectToken() (string, error) { + client := oauth2.NewClient(cs.ctx, nil) + req, err := http.NewRequest("GET", cs.URL, nil) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: HTTP request for URL-sourced credential failed: %v", err) + } + req = req.WithContext(cs.ctx) + + for key, val := range cs.Headers { + req.Header.Add(key, val) + } + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: invalid response when retrieving subject token: %v", err) + } + defer resp.Body.Close() + + respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: invalid body in subject token URL query: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return "", fmt.Errorf("oauth2/google/externalaccount: status code %d: %s", c, respBody) + } + + switch cs.Format.Type { + case "json": + jsonData := make(map[string]interface{}) + err = json.Unmarshal(respBody, &jsonData) + if err != nil { + return "", fmt.Errorf("oauth2/google/externalaccount: failed to unmarshal subject token file: %v", err) + } + val, ok := jsonData[cs.Format.SubjectTokenFieldName] + if !ok { + return "", errors.New("oauth2/google/externalaccount: provided subject_token_field_name not found in credentials") + } + token, ok := val.(string) + if !ok { + return "", errors.New("oauth2/google/externalaccount: improperly formatted subject token") + } + return token, nil + case "text": + return string(respBody), nil + case "": + return string(respBody), nil + default: + return "", errors.New("oauth2/google/externalaccount: invalid credential_source file format type") + } + +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 00000000000..ba931c2c3de --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,309 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/externalaccount" + "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" + "golang.org/x/oauth2/google/internal/impersonate" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 default endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + DeviceAuthURL: "https://oauth2.googleapis.com/device/code", + AuthStyle: oauth2.AuthStyleInParams, +} + +// MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. +const MTLSTokenURL = "https://oauth2.mtls.googleapis.com/token" + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://oauth2.googleapis.com/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope, ""), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" + externalAccountAuthorizedUserKey = "external_account_authorized_user" + impersonatedServiceAccount = "impersonated_service_account" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + UniverseDomain string `json:"universe_domain"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` + + // External Account fields + Audience string `json:"audience"` + SubjectTokenType string `json:"subject_token_type"` + TokenURLExternal string `json:"token_url"` + TokenInfoURL string `json:"token_info_url"` + ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"` + ServiceAccountImpersonation serviceAccountImpersonationInfo `json:"service_account_impersonation"` + Delegates []string `json:"delegates"` + CredentialSource externalaccount.CredentialSource `json:"credential_source"` + QuotaProjectID string `json:"quota_project_id"` + WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + + // External Account Authorized User fields + RevokeURL string `json:"revoke_url"` + + // Service account impersonation + SourceCredentials *credentialsFile `json:"source_credentials"` +} + +type serviceAccountImpersonationInfo struct { + TokenLifetimeSeconds int `json:"token_lifetime_seconds"` +} + +func (f *credentialsFile) jwtConfig(scopes []string, subject string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + Subject: subject, // This is the user email to impersonate + Audience: f.Audience, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsParams) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(params.Scopes, params.Subject) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: params.Scopes, + Endpoint: oauth2.Endpoint{ + AuthURL: f.AuthURL, + TokenURL: f.TokenURL, + AuthStyle: oauth2.AuthStyleInParams, + }, + } + if cfg.Endpoint.AuthURL == "" { + cfg.Endpoint.AuthURL = Endpoint.AuthURL + } + if cfg.Endpoint.TokenURL == "" { + if params.TokenURL != "" { + cfg.Endpoint.TokenURL = params.TokenURL + } else { + cfg.Endpoint.TokenURL = Endpoint.TokenURL + } + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case externalAccountKey: + cfg := &externalaccount.Config{ + Audience: f.Audience, + SubjectTokenType: f.SubjectTokenType, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL, + ServiceAccountImpersonationLifetimeSeconds: f.ServiceAccountImpersonation.TokenLifetimeSeconds, + ClientSecret: f.ClientSecret, + ClientID: f.ClientID, + CredentialSource: &f.CredentialSource, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + WorkforcePoolUserProject: f.WorkforcePoolUserProject, + } + return externalaccount.NewTokenSource(ctx, *cfg) + case externalAccountAuthorizedUserKey: + cfg := &externalaccountauthorizeduser.Config{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + RevokeURL: f.RevokeURL, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + } + return cfg.TokenSource(ctx) + case impersonatedServiceAccount: + if f.ServiceAccountImpersonationURL == "" || f.SourceCredentials == nil { + return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") + } + + ts, err := f.SourceCredentials.tokenSource(ctx, params) + if err != nil { + return nil, err + } + imp := impersonate.ImpersonateTokenSource{ + Ctx: ctx, + URL: f.ServiceAccountImpersonationURL, + Scopes: params.Scopes, + Ts: ts, + Delegates: f.Delegates, + } + return oauth2.ReuseTokenSource(nil, imp), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// If no scopes are specified, a set of default scopes are automatically granted. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { + return computeTokenSource(account, 0, scope...) +} + +func computeTokenSource(account string, earlyExpiry time.Duration, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSourceWithExpiry(nil, computeSource{account: account, scopes: scope}, earlyExpiry) +} + +type computeSource struct { + account string + scopes []string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenURI := "instance/service-accounts/" + acct + "/token" + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI = tokenURI + "?" + v.Encode() + } + tokenJSON, err := metadata.Get(tokenURI) + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + tok := &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + } + // NOTE(cbro): add hidden metadata about where the token is from. + // This is needed for detection by client libraries to know that credentials come from the metadata server. + // This may be removed in a future version of this library. + return tok.WithExtra(map[string]interface{}{ + "oauth2.google.tokenSource": "compute-metadata", + "oauth2.google.serviceAccount": acct, + }), nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go new file mode 100644 index 00000000000..cb582070746 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccountauthorizeduser + +import ( + "context" + "errors" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +// now aliases time.Now for testing. +var now = func() time.Time { + return time.Now().UTC() +} + +var tokenValid = func(token oauth2.Token) bool { + return token.Valid() +} + +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workforce pool and + // the provider identifier in that pool. + Audience string + // RefreshToken is the optional OAuth 2.0 refresh token. If specified, credentials can be refreshed. + RefreshToken string + // TokenURL is the optional STS token exchange endpoint for refresh. Must be specified for refresh, can be left as + // None if the token can not be refreshed. + TokenURL string + // TokenInfoURL is the optional STS endpoint URL for token introspection. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs to be called with the generated GCP + // access token. When provided, STS will be called with additional basic authentication using client_id as username + // and client_secret as password. + ClientSecret string + // Token is the OAuth2.0 access token. Can be nil if refresh information is provided. + Token string + // Expiry is the optional expiration datetime of the OAuth 2.0 access token. + Expiry time.Time + // RevokeURL is the optional STS endpoint URL for revoking tokens. + RevokeURL string + // QuotaProjectID is the optional project ID used for quota and billing. This project may be different from the + // project used to create the credentials. + QuotaProjectID string + Scopes []string +} + +func (c *Config) canRefresh() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + var token oauth2.Token + if c.Token != "" && !c.Expiry.IsZero() { + token = oauth2.Token{ + AccessToken: c.Token, + Expiry: c.Expiry, + TokenType: "Bearer", + } + } + if !tokenValid(token) && !c.canRefresh() { + return nil, errors.New("oauth2/google: Token should be created with fields to make it valid (`token` and `expiry`), or fields to allow it to refresh (`refresh_token`, `token_url`, `client_id`, `client_secret`).") + } + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + + return oauth2.ReuseTokenSource(&token, ts), nil +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + if !conf.canRefresh() { + return nil, errors.New("oauth2/google: The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret.") + } + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + + stsResponse, err := stsexchange.RefreshAccessToken(ts.ctx, conf.TokenURL, conf.RefreshToken, clientAuth, nil) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("oauth2/google: got invalid expiry from security token service") + } + + if stsResponse.RefreshToken != "" { + conf.RefreshToken = stsResponse.RefreshToken + } + + token := &oauth2.Token{ + AccessToken: stsResponse.AccessToken, + Expiry: now().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + TokenType: "Bearer", + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go new file mode 100644 index 00000000000..6bc3af11033 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/impersonate/impersonate.go @@ -0,0 +1,105 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +// generateAccesstokenReq is used for service account impersonation +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type impersonateTokenResponse struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +// ImpersonateTokenSource uses a source credential, stored in Ts, to request an access token to the provided URL. +// Scopes can be defined when the access token is requested. +type ImpersonateTokenSource struct { + // Ctx is the execution context of the impersonation process + // used to perform http call to the URL. Required + Ctx context.Context + // Ts is the source credential used to generate a token on the + // impersonated service account. Required. + Ts oauth2.TokenSource + + // URL is the endpoint to call to generate a token + // on behalf the service account. Required. + URL string + // Scopes that the impersonated credential should have. Required. + Scopes []string + // Delegates are the service account email addresses in a delegation chain. + // Each service account must be granted roles/iam.serviceAccountTokenCreator + // on the next service account in the chain. Optional. + Delegates []string + // TokenLifetimeSeconds is the number of seconds the impersonation token will + // be valid for. + TokenLifetimeSeconds int +} + +// Token performs the exchange to get a temporary service account token to allow access to GCP. +func (its ImpersonateTokenSource) Token() (*oauth2.Token, error) { + lifetimeString := "3600s" + if its.TokenLifetimeSeconds != 0 { + lifetimeString = fmt.Sprintf("%ds", its.TokenLifetimeSeconds) + } + reqBody := generateAccessTokenReq{ + Lifetime: lifetimeString, + Scope: its.Scopes, + Delegates: its.Delegates, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to marshal request: %v", err) + } + client := oauth2.NewClient(its.Ctx, its.Ts) + req, err := http.NewRequest("POST", its.URL, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to create impersonation request: %v", err) + } + req = req.WithContext(its.Ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + + var accessTokenResp impersonateTokenResponse + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("oauth2/google: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + TokenType: "Bearer", + }, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go new file mode 100644 index 00000000000..ebd520eace5 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go @@ -0,0 +1,45 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stsexchange + +import ( + "encoding/base64" + "net/http" + "net/url" + + "golang.org/x/oauth2" +) + +// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { + // AuthStyle can be either basic or request-body + AuthStyle oauth2.AuthStyle + ClientID string + ClientSecret string +} + +// InjectAuthentication is used to add authentication to a Secure Token Service exchange +// request. It modifies either the passed url.Values or http.Header depending on the desired +// authentication format. +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { + if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { + return + } + + switch c.AuthStyle { + case oauth2.AuthStyleInHeader: // AuthStyleInHeader corresponds to basic authentication as defined in rfc7617#2 + plainHeader := c.ClientID + ":" + c.ClientSecret + headers.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader))) + case oauth2.AuthStyleInParams: // AuthStyleInParams corresponds to request-body authentication with ClientID and ClientSecret in the message body. + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + case oauth2.AuthStyleAutoDetect: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + default: + values.Set("client_id", c.ClientID) + values.Set("client_secret", c.ClientSecret) + } +} diff --git a/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go new file mode 100644 index 00000000000..1a0bebd1595 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go @@ -0,0 +1,125 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package stsexchange + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + + "golang.org/x/oauth2" +) + +func defaultHeader() http.Header { + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + return header +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. +// The first 4 fields are all mandatory. headers can be used to pass additional +// headers beyond the bare minimum required by the token exchange. options can +// be used to pass additional JSON-structured options to the remote server. +func ExchangeToken(ctx context.Context, endpoint string, request *TokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*Response, error) { + data := url.Values{} + data.Set("audience", request.Audience) + data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") + data.Set("requested_token_type", "urn:ietf:params:oauth:token-type:access_token") + data.Set("subject_token_type", request.SubjectTokenType) + data.Set("subject_token", request.SubjectToken) + data.Set("scope", strings.Join(request.Scope, " ")) + if options != nil { + opts, err := json.Marshal(options) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to marshal additional options: %v", err) + } + data.Set("options", string(opts)) + } + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func RefreshAccessToken(ctx context.Context, endpoint string, refreshToken string, authentication ClientAuthentication, headers http.Header) (*Response, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", refreshToken) + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func makeRequest(ctx context.Context, endpoint string, data url.Values, authentication ClientAuthentication, headers http.Header) (*Response, error) { + if headers == nil { + headers = defaultHeader() + } + client := oauth2.NewClient(ctx, nil) + authentication.InjectAuthentication(data, headers) + encodedData := data.Encode() + + req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) + } + req = req.WithContext(ctx) + for key, list := range headers { + for _, val := range list { + req.Header.Add(key, val) + } + } + req.Header.Add("Content-Length", strconv.Itoa(len(encodedData))) + + resp, err := client.Do(req) + + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid response from Secure Token Server: %v", err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, err + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) + } + var stsResp Response + err = json.Unmarshal(body, &stsResp) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) + + } + + return &stsResp, nil +} + +// TokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type TokenExchangeRequest struct { + ActingParty struct { + ActorToken string + ActorTokenType string + } + GrantType string + Resource string + Audience string + Scope []string + RequestedTokenType string + SubjectToken string + SubjectTokenType string +} + +// Response is used to decode the remote server response during an oauth2 token exchange. +type Response struct { + AccessToken string `json:"access_token"` + IssuedTokenType string `json:"issued_token_type"` + TokenType string `json:"token_type"` + ExpiresIn int `json:"expires_in"` + Scope string `json:"scope"` + RefreshToken string `json:"refresh_token"` +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 00000000000..e89e6ae17bc --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,102 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, audience, nil) +} + +// JWTAccessTokenSourceWithScope uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The scope is typically a list of URLs that specifies the scope of the +// credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceWithScope(jsonKey []byte, scope ...string) (oauth2.TokenSource, error) { + return newJWTSource(jsonKey, "", scope) +} + +func newJWTSource(jsonKey []byte, audience string, scopes []string) (oauth2.TokenSource, error) { + if len(scopes) == 0 && audience == "" { + return nil, fmt.Errorf("google: missing scope/audience for JWT access token") + } + + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + scopes: scopes, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + rts := newErrWrappingTokenSource(oauth2.ReuseTokenSource(tok, ts)) + return rts, nil +} + +type jwtAccessTokenSource struct { + email, audience string + scopes []string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + scope := strings.Join(ts.scopes, " ") + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Scope: scope, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 00000000000..456224bc789 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,201 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/oauth2" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := parseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +func parseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 00000000000..95015648b43 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString) +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 00000000000..b2bf18298b0 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,185 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration + + // Audience optionally specifies the intended audience of the + // request. If empty, the value of TokenURL is used as the + // intended audience. + Audience string + + // PrivateClaims optionally specifies custom private claims in the JWT. + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + PrivateClaims map[string]interface{} + + // UseIDToken optionally specifies whether ID token should be used instead + // of access token when the server returns both. + UseIDToken bool +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + PrivateClaims: js.conf.PrivateClaims, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := js.conf.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, &oauth2.RetrieveError{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if js.conf.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("oauth2: response doesn't have JWT token") + } + token.AccessToken = tokenRes.IDToken + } + return token, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 00000000000..fd8632444ec --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 00000000000..bbf86ccf0c0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 00000000000..f533091c19e --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 00000000000..74db26b94df --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 00000000000..fc1835d8a23 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/google.golang.org/api/AUTHORS b/vendor/google.golang.org/api/AUTHORS new file mode 100644 index 00000000000..f07029059d2 --- /dev/null +++ b/vendor/google.golang.org/api/AUTHORS @@ -0,0 +1,11 @@ +# This is the official list of authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. +Google Inc. +LightStep Inc. diff --git a/vendor/google.golang.org/api/CONTRIBUTORS b/vendor/google.golang.org/api/CONTRIBUTORS new file mode 100644 index 00000000000..788677b8f04 --- /dev/null +++ b/vendor/google.golang.org/api/CONTRIBUTORS @@ -0,0 +1,56 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://cla.developers.google.com/about/google-individual +# https://cla.developers.google.com/about/google-corporate +# +# The CLA can be filled out on the web: +# +# https://cla.developers.google.com/ +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name +# +# An entry with two email addresses specifies that the +# first address should be used in the submit logs and +# that the second address should be recognized as the +# same person when interacting with Rietveld. + +# Please keep the list sorted. + +Alain Vongsouvanhalainv +Andrew Gerrand +Brad Fitzpatrick +Eric Koleda +Francesc Campoy +Garrick Evans +Glenn Lewis +Ivan Krasin +Jason Hall +Johan Euphrosine +Kostik Shtoyk +Kunpei Sakai +Matthew Dolan +Matthew Whisenhunt +Michael McGreevy +Nick Craig-Wood +Robbie Trencheny +Ross Light +Sarah Adams +Scott Van Woudenberg +Takashi Matsuo diff --git a/vendor/google.golang.org/api/LICENSE b/vendor/google.golang.org/api/LICENSE new file mode 100644 index 00000000000..263aa7a0c12 --- /dev/null +++ b/vendor/google.golang.org/api/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go new file mode 100644 index 00000000000..985f5d1feb5 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -0,0 +1,447 @@ +// Copyright 2011 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package googleapi contains the common code shared by all Google API +// libraries. +package googleapi // import "google.golang.org/api/googleapi" + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "google.golang.org/api/internal/third_party/uritemplates" +) + +// ContentTyper is an interface for Readers which know (or would like +// to override) their Content-Type. If a media body doesn't implement +// ContentTyper, the type is sniffed from the content using +// http.DetectContentType. +type ContentTyper interface { + ContentType() string +} + +// A SizeReaderAt is a ReaderAt with a Size method. +// An io.SectionReader implements SizeReaderAt. +type SizeReaderAt interface { + io.ReaderAt + Size() int64 +} + +// ServerResponse is embedded in each Do response and +// provides the HTTP status code and header sent by the server. +type ServerResponse struct { + // HTTPStatusCode is the server's response status code. When using a + // resource method's Do call, this will always be in the 2xx range. + HTTPStatusCode int + // Header contains the response header fields from the server. + Header http.Header +} + +const ( + // Version defines the gax version being used. This is typically sent + // in an HTTP header to services. + Version = "0.5" + + // UserAgent is the header string used to identify this package. + UserAgent = "google-api-go-client/" + Version + + // DefaultUploadChunkSize is the default chunk size to use for resumable + // uploads if not specified by the user. + DefaultUploadChunkSize = 16 * 1024 * 1024 + + // MinUploadChunkSize is the minimum chunk size that can be used for + // resumable uploads. All user-specified chunk sizes must be multiple of + // this value. + MinUploadChunkSize = 256 * 1024 +) + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code and will always be populated. + Code int `json:"code"` + // Message is the server response message and is only populated when + // explicitly referenced by the JSON server response. + Message string `json:"message"` + // Details provide more context to an error. + Details []interface{} `json:"details"` + // Body is the raw response returned by the server. + // It is often but not always JSON, depending on how the request fails. + Body string + // Header contains the response header fields from the server. + Header http.Header + + Errors []ErrorItem +} + +// ErrorItem is a detailed error code & message from the Google API frontend. +type ErrorItem struct { + // Reason is the typed error code. For example: "some_example". + Reason string `json:"reason"` + // Message is the human-readable description of the error. + Message string `json:"message"` +} + +func (e *Error) Error() string { + if len(e.Errors) == 0 && e.Message == "" { + return fmt.Sprintf("googleapi: got HTTP response code %d with body: %v", e.Code, e.Body) + } + var buf bytes.Buffer + fmt.Fprintf(&buf, "googleapi: Error %d: ", e.Code) + if e.Message != "" { + fmt.Fprintf(&buf, "%s", e.Message) + } + if len(e.Details) > 0 { + var detailBuf bytes.Buffer + enc := json.NewEncoder(&detailBuf) + enc.SetIndent("", " ") + if err := enc.Encode(e.Details); err == nil { + fmt.Fprint(&buf, "\nDetails:") + fmt.Fprintf(&buf, "\n%s", detailBuf.String()) + + } + } + if len(e.Errors) == 0 { + return strings.TrimSpace(buf.String()) + } + if len(e.Errors) == 1 && e.Errors[0].Message == e.Message { + fmt.Fprintf(&buf, ", %s", e.Errors[0].Reason) + return buf.String() + } + fmt.Fprintln(&buf, "\nMore details:") + for _, v := range e.Errors { + fmt.Fprintf(&buf, "Reason: %s, Message: %s\n", v.Reason, v.Message) + } + return buf.String() +} + +type errorReply struct { + Error *Error `json:"error"` +} + +// CheckResponse returns an error (of type *Error) if the response +// status code is not 2xx. +func CheckResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, err := ioutil.ReadAll(res.Body) + if err == nil { + jerr := new(errorReply) + err = json.Unmarshal(slurp, jerr) + if err == nil && jerr.Error != nil { + if jerr.Error.Code == 0 { + jerr.Error.Code = res.StatusCode + } + jerr.Error.Body = string(slurp) + return jerr.Error + } + } + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, + } +} + +// IsNotModified reports whether err is the result of the +// server replying with http.StatusNotModified. +// Such error values are sometimes returned by "Do" methods +// on calls when If-None-Match is used. +func IsNotModified(err error) bool { + if err == nil { + return false + } + ae, ok := err.(*Error) + return ok && ae.Code == http.StatusNotModified +} + +// CheckMediaResponse returns an error (of type *Error) if the response +// status code is not 2xx. Unlike CheckResponse it does not assume the +// body is a JSON error document. +// It is the caller's responsibility to close res.Body. +func CheckMediaResponse(res *http.Response) error { + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) + return &Error{ + Code: res.StatusCode, + Body: string(slurp), + } +} + +// MarshalStyle defines whether to marshal JSON with a {"data": ...} wrapper. +type MarshalStyle bool + +// WithDataWrapper marshals JSON with a {"data": ...} wrapper. +var WithDataWrapper = MarshalStyle(true) + +// WithoutDataWrapper marshals JSON without a {"data": ...} wrapper. +var WithoutDataWrapper = MarshalStyle(false) + +func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) { + buf := new(bytes.Buffer) + if wrap { + buf.Write([]byte(`{"data": `)) + } + err := json.NewEncoder(buf).Encode(v) + if err != nil { + return nil, err + } + if wrap { + buf.Write([]byte(`}`)) + } + return buf, nil +} + +// ProgressUpdater is a function that is called upon every progress update of a resumable upload. +// This is the only part of a resumable upload (from googleapi) that is usable by the developer. +// The remaining usable pieces of resumable uploads is exposed in each auto-generated API. +type ProgressUpdater func(current, total int64) + +// MediaOption defines the interface for setting media options. +type MediaOption interface { + setOptions(o *MediaOptions) +} + +type noopOption struct{} + +func (bp noopOption) setOptions(o *MediaOptions) { +} + +type bufferOption []byte + +func (bp bufferOption) setOptions(o *MediaOptions) { + o.Buffer = bp +} + +// WithBuffer returns MediaOption which sets buffer used for media uploads. +// Buffer capacity needs to be at least MinUploadChunkSize, if it's not +// this option is a no op. +// If used together with ChunkSize, buffer needs to have at least ChunkSize capacity. +// If not set, each upload will allocate its own memory. +// Buffer can be reused only after request complete. Using the same buffer +// in concurrent calls will lead to data race. +func WithBuffer(buffer []byte) MediaOption { + if cap(buffer) < MinUploadChunkSize { + return noopOption{} + } + return bufferOption(buffer) +} + +type contentTypeOption string + +func (ct contentTypeOption) setOptions(o *MediaOptions) { + o.ContentType = string(ct) + if o.ContentType == "" { + o.ForceEmptyContentType = true + } +} + +// ContentType returns a MediaOption which sets the Content-Type header for media uploads. +// If ctype is empty, the Content-Type header will be omitted. +func ContentType(ctype string) MediaOption { + return contentTypeOption(ctype) +} + +type chunkSizeOption int + +func (cs chunkSizeOption) setOptions(o *MediaOptions) { + size := int(cs) + if size%MinUploadChunkSize != 0 { + size += MinUploadChunkSize - (size % MinUploadChunkSize) + } + o.ChunkSize = size +} + +// ChunkSize returns a MediaOption which sets the chunk size for media uploads. +// size will be rounded up to the nearest multiple of 256K. +// Media which contains fewer than size bytes will be uploaded in a single request. +// Media which contains size bytes or more will be uploaded in separate chunks. +// If size is zero, media will be uploaded in a single request. +func ChunkSize(size int) MediaOption { + return chunkSizeOption(size) +} + +// MediaOptions stores options for customizing media upload. It is not used by developers directly. +type MediaOptions struct { + ContentType string + ForceEmptyContentType bool + + ChunkSize int + + Buffer []byte +} + +// ProcessMediaOptions stores options from opts in a MediaOptions. +// It is not used by developers directly. +func ProcessMediaOptions(opts []MediaOption) *MediaOptions { + mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize} + for _, o := range opts { + o.setOptions(mo) + } + return mo +} + +// ResolveRelative resolves relatives such as "http://www.golang.org/" and +// "topics/myproject/mytopic" into a single string, such as +// "http://www.golang.org/topics/myproject/mytopic". It strips all parent +// references (e.g. ../..) as well as anything after the host +// (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz). +// +// ResolveRelative panics if either basestr or relstr is not able to be parsed. +func ResolveRelative(basestr, relstr string) string { + u, err := url.Parse(basestr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", basestr)) + } + afterColonPath := "" + if i := strings.IndexRune(relstr, ':'); i > 0 { + afterColonPath = relstr[i+1:] + relstr = relstr[:i] + } + rel, err := url.Parse(relstr) + if err != nil { + panic(fmt.Sprintf("failed to parse %q", relstr)) + } + u = u.ResolveReference(rel) + us := u.String() + if afterColonPath != "" { + us = fmt.Sprintf("%s:%s", us, afterColonPath) + } + us = strings.Replace(us, "%7B", "{", -1) + us = strings.Replace(us, "%7D", "}", -1) + us = strings.Replace(us, "%2A", "*", -1) + return us +} + +// Expand subsitutes any {encoded} strings in the URL passed in using +// the map supplied. +// +// This calls SetOpaque to avoid encoding of the parameters in the URL path. +func Expand(u *url.URL, expansions map[string]string) { + escaped, unescaped, err := uritemplates.Expand(u.Path, expansions) + if err == nil { + u.Path = unescaped + u.RawPath = escaped + } +} + +// CloseBody is used to close res.Body. +// Prior to calling Close, it also tries to Read a small amount to see an EOF. +// Not seeing an EOF can prevent HTTP Transports from reusing connections. +func CloseBody(res *http.Response) { + if res == nil || res.Body == nil { + return + } + // Justification for 3 byte reads: two for up to "\r\n" after + // a JSON/XML document, and then 1 to see EOF if we haven't yet. + // TODO(bradfitz): detect Go 1.3+ and skip these reads. + // See https://codereview.appspot.com/58240043 + // and https://codereview.appspot.com/49570044 + buf := make([]byte, 1) + for i := 0; i < 3; i++ { + _, err := res.Body.Read(buf) + if err != nil { + break + } + } + res.Body.Close() + +} + +// VariantType returns the type name of the given variant. +// If the map doesn't contain the named key or the value is not a []interface{}, "" is returned. +// This is used to support "variant" APIs that can return one of a number of different types. +func VariantType(t map[string]interface{}) string { + s, _ := t["type"].(string) + return s +} + +// ConvertVariant uses the JSON encoder/decoder to fill in the struct 'dst' with the fields found in variant 'v'. +// This is used to support "variant" APIs that can return one of a number of different types. +// It reports whether the conversion was successful. +func ConvertVariant(v map[string]interface{}, dst interface{}) bool { + var buf bytes.Buffer + err := json.NewEncoder(&buf).Encode(v) + if err != nil { + return false + } + return json.Unmarshal(buf.Bytes(), dst) == nil +} + +// A Field names a field to be retrieved with a partial response. +// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance +// +// Partial responses can dramatically reduce the amount of data that must be sent to your application. +// In order to request partial responses, you can specify the full list of fields +// that your application needs by adding the Fields option to your request. +// +// Field strings use camelCase with leading lower-case characters to identify fields within the response. +// +// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields, +// you could request just those fields like this: +// +// svc.Events.List().Fields("nextPageToken", "items/id").Do() +// +// or if you were also interested in each Item's "Updated" field, you can combine them like this: +// +// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do() +// +// Another way to find field names is through the Google API explorer: +// https://developers.google.com/apis-explorer/#p/ +type Field string + +// CombineFields combines fields into a single string. +func CombineFields(s []Field) string { + r := make([]string, len(s)) + for i, v := range s { + r[i] = string(v) + } + return strings.Join(r, ",") +} + +// A CallOption is an optional argument to an API call. +// It should be treated as an opaque value by users of Google APIs. +// +// A CallOption is something that configures an API call in a way that is +// not specific to that API; for instance, controlling the quota user for +// an API call is common across many APIs, and is thus a CallOption. +type CallOption interface { + Get() (key, value string) +} + +// QuotaUser returns a CallOption that will set the quota user for a call. +// The quota user can be used by server-side applications to control accounting. +// It can be an arbitrary string up to 40 characters, and will override UserIP +// if both are provided. +func QuotaUser(u string) CallOption { return quotaUser(u) } + +type quotaUser string + +func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) } + +// UserIP returns a CallOption that will set the "userIp" parameter of a call. +// This should be the IP address of the originating request. +func UserIP(ip string) CallOption { return userIP(ip) } + +type userIP string + +func (i userIP) Get() (string, string) { return "userIp", string(i) } + +// Trace returns a CallOption that enables diagnostic tracing for a call. +// traceToken is an ID supplied by Google support. +func Trace(traceToken string) CallOption { return traceTok(traceToken) } + +type traceTok string + +func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) } + +// TODO: Fields too diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 00000000000..61720ec2ea1 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,44 @@ +// Copyright 2012 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +// +// This package is DEPRECATED. Users should instead use, +// +// service, err := NewService(..., option.WithAPIKey(...)) +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +// +// Deprecated: please use NewService(..., option.WithAPIKey(...)) instead. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/vendor/google.golang.org/api/googleapi/types.go b/vendor/google.golang.org/api/googleapi/types.go new file mode 100644 index 00000000000..fabf74d50d0 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/types.go @@ -0,0 +1,202 @@ +// Copyright 2013 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package googleapi + +import ( + "encoding/json" + "errors" + "strconv" +) + +// Int64s is a slice of int64s that marshal as quoted strings in JSON. +type Int64s []int64 + +func (q *Int64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, int64(v)) + } + return nil +} + +// Int32s is a slice of int32s that marshal as quoted strings in JSON. +type Int32s []int32 + +func (q *Int32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, int32(v)) + } + return nil +} + +// Uint64s is a slice of uint64s that marshal as quoted strings in JSON. +type Uint64s []uint64 + +func (q *Uint64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return err + } + *q = append(*q, uint64(v)) + } + return nil +} + +// Uint32s is a slice of uint32s that marshal as quoted strings in JSON. +type Uint32s []uint32 + +func (q *Uint32s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return err + } + *q = append(*q, uint32(v)) + } + return nil +} + +// Float64s is a slice of float64s that marshal as quoted strings in JSON. +type Float64s []float64 + +func (q *Float64s) UnmarshalJSON(raw []byte) error { + *q = (*q)[:0] + var ss []string + if err := json.Unmarshal(raw, &ss); err != nil { + return err + } + for _, s := range ss { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + *q = append(*q, float64(v)) + } + return nil +} + +func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) { + dst := make([]byte, 0, 2+n*10) // somewhat arbitrary + dst = append(dst, '[') + for i := 0; i < n; i++ { + if i > 0 { + dst = append(dst, ',') + } + dst = append(dst, '"') + dst = fn(dst, i) + dst = append(dst, '"') + } + dst = append(dst, ']') + return dst, nil +} + +func (q Int64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, q[i], 10) + }) +} + +func (q Int32s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendInt(dst, int64(q[i]), 10) + }) +} + +func (q Uint64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, q[i], 10) + }) +} + +func (q Uint32s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendUint(dst, uint64(q[i]), 10) + }) +} + +func (q Float64s) MarshalJSON() ([]byte, error) { + return quotedList(len(q), func(dst []byte, i int) []byte { + return strconv.AppendFloat(dst, q[i], 'g', -1, 64) + }) +} + +// RawMessage is a raw encoded JSON value. +// It is identical to json.RawMessage, except it does not suffer from +// https://golang.org/issue/14493. +type RawMessage []byte + +// MarshalJSON returns m. +func (m RawMessage) MarshalJSON() ([]byte, error) { + return m, nil +} + +// UnmarshalJSON sets *m to a copy of data. +func (m *RawMessage) UnmarshalJSON(data []byte) error { + if m == nil { + return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer") + } + *m = append((*m)[:0], data...) + return nil +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/api/internal/conn_pool.go b/vendor/google.golang.org/api/internal/conn_pool.go new file mode 100644 index 00000000000..fedcce15b46 --- /dev/null +++ b/vendor/google.golang.org/api/internal/conn_pool.go @@ -0,0 +1,30 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "google.golang.org/grpc" +) + +// ConnPool is a pool of grpc.ClientConns. +type ConnPool interface { + // Conn returns a ClientConn from the pool. + // + // Conns aren't returned to the pool. + Conn() *grpc.ClientConn + + // Num returns the number of connections in the pool. + // + // It will always return the same value. + Num() int + + // Close closes every ClientConn in the pool. + // + // The error returned by Close may be a single error or multiple errors. + Close() error + + // ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs. + grpc.ClientConnInterface +} diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go new file mode 100644 index 00000000000..dc6d50e96aa --- /dev/null +++ b/vendor/google.golang.org/api/internal/creds.go @@ -0,0 +1,131 @@ +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + + "golang.org/x/oauth2" + "google.golang.org/api/internal/impersonate" + + "golang.org/x/oauth2/google" +) + +// Creds returns credential information obtained from DialSettings, or if none, then +// it returns default credential information. +func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + creds, err := baseCreds(ctx, ds) + if err != nil { + return nil, err + } + if ds.ImpersonationConfig != nil { + return impersonateCredentials(ctx, creds, ds) + } + return creds, nil +} + +func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) { + if ds.Credentials != nil { + return ds.Credentials, nil + } + if ds.CredentialsJSON != nil { + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + if ds.CredentialsFile != "" { + data, err := ioutil.ReadFile(ds.CredentialsFile) + if err != nil { + return nil, fmt.Errorf("cannot read credentials file: %v", err) + } + return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences) + } + if ds.TokenSource != nil { + return &google.Credentials{TokenSource: ds.TokenSource}, nil + } + cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...) + if err != nil { + return nil, err + } + if len(cred.JSON) > 0 { + return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences) + } + // For GAE and GCE, the JSON is empty so return the default credentials directly. + return cred, nil +} + +// JSON key file type. +const ( + serviceAccountKey = "service_account" +) + +// credentialsFromJSON returns a google.Credentials based on the input. +// +// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow +// - Otherwise, returns OAuth 2.0 flow. +func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) { + cred, err := google.CredentialsFromJSON(ctx, data, scopes...) + if err != nil { + return nil, err + } + if len(data) > 0 && len(scopes) == 0 { + var f struct { + Type string `json:"type"` + // The rest JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(cred.JSON, &f); err != nil { + return nil, err + } + if f.Type == serviceAccountKey { + ts, err := selfSignedJWTTokenSource(data, endpoint, audiences) + if err != nil { + return nil, err + } + cred.TokenSource = ts + } + } + return cred, err +} + +func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) { + // Use the API endpoint as the default audience + audience := endpoint + if len(audiences) > 0 { + // TODO(shinfan): Update golang oauth to support multiple audiences. + if len(audiences) > 1 { + return nil, fmt.Errorf("multiple audiences support is not implemented") + } + audience = audiences[0] + } + return google.JWTAccessTokenSourceFromJSON(data, audience) +} + +// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials. +// +// NOTE(cbro): consider promoting this to a field on google.Credentials. +func QuotaProjectFromCreds(cred *google.Credentials) string { + var v struct { + QuotaProject string `json:"quota_project_id"` + } + if err := json.Unmarshal(cred.JSON, &v); err != nil { + return "" + } + return v.QuotaProject +} + +func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds *DialSettings) (*google.Credentials, error) { + if len(ds.ImpersonationConfig.Scopes) == 0 { + ds.ImpersonationConfig.Scopes = ds.Scopes + } + ts, err := impersonate.TokenSource(ctx, creds.TokenSource, ds.ImpersonationConfig) + if err != nil { + return nil, err + } + return &google.Credentials{ + TokenSource: ts, + ProjectID: creds.ProjectID, + }, nil +} diff --git a/vendor/google.golang.org/api/internal/gensupport/buffer.go b/vendor/google.golang.org/api/internal/gensupport/buffer.go new file mode 100644 index 00000000000..8c060cf0bf4 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/buffer.go @@ -0,0 +1,91 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "io" + + "google.golang.org/api/googleapi" +) + +// MediaBuffer buffers data from an io.Reader to support uploading media in +// retryable chunks. It should be created with NewMediaBuffer. +type MediaBuffer struct { + media io.Reader + + chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. + err error // Any error generated when populating chunk by reading media. + + // The absolute position of chunk in the underlying media. + off int64 +} + +// NewMediaBuffer initializes a MediaBuffer. +func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer { + return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +} + +// NewMediaBuffer initializes a MediaBuffer. +func NewMediaBufferWithBuffer(media io.Reader, chunkSize int, buffer []byte) *MediaBuffer { + // If buffer isn't long enough, allocate new one. + if cap(buffer) < chunkSize { + return NewMediaBuffer(media, chunkSize) + } + + // Implementation expects buffer of zero length. + buffer = buffer[:0] + return &MediaBuffer{media: media, chunk: buffer} +} + +// Chunk returns the current buffered chunk, the offset in the underlying media +// from which the chunk is drawn, and the size of the chunk. +// Successive calls to Chunk return the same chunk between calls to Next. +func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { + // There may already be data in chunk if Next has not been called since the previous call to Chunk. + if mb.err == nil && len(mb.chunk) == 0 { + mb.err = mb.loadChunk() + } + return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err +} + +// loadChunk will read from media into chunk, up to the capacity of chunk. +func (mb *MediaBuffer) loadChunk() error { + bufSize := cap(mb.chunk) + mb.chunk = mb.chunk[:bufSize] + + read := 0 + var err error + for err == nil && read < bufSize { + var n int + n, err = mb.media.Read(mb.chunk[read:]) + read += n + } + mb.chunk = mb.chunk[:read] + return err +} + +// Next advances to the next chunk, which will be returned by the next call to Chunk. +// Calls to Next without a corresponding prior call to Chunk will have no effect. +func (mb *MediaBuffer) Next() { + mb.off += int64(len(mb.chunk)) + mb.chunk = mb.chunk[0:0] +} + +type readerTyper struct { + io.Reader + googleapi.ContentTyper +} + +// ReaderAtToReader adapts a ReaderAt to be used as a Reader. +// If ra implements googleapi.ContentTyper, then the returned reader +// will also implement googleapi.ContentTyper, delegating to ra. +func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader { + r := io.NewSectionReader(ra, 0, size) + if typer, ok := ra.(googleapi.ContentTyper); ok { + return readerTyper{r, typer} + } + return r +} diff --git a/vendor/google.golang.org/api/internal/gensupport/doc.go b/vendor/google.golang.org/api/internal/gensupport/doc.go new file mode 100644 index 00000000000..752c4b411b2 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/doc.go @@ -0,0 +1,10 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gensupport is an internal implementation detail used by code +// generated by the google-api-go-generator tool. +// +// This package may be modified at any time without regard for backwards +// compatibility. It should not be used directly by API users. +package gensupport diff --git a/vendor/google.golang.org/api/internal/gensupport/json.go b/vendor/google.golang.org/api/internal/gensupport/json.go new file mode 100644 index 00000000000..c01e32189f4 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/json.go @@ -0,0 +1,211 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" +) + +// MarshalJSON returns a JSON encoding of schema containing only selected fields. +// A field is selected if any of the following is true: +// * it has a non-empty value +// * its field name is present in forceSendFields and it is not a nil pointer or nil interface +// * its field name is present in nullFields. +// The JSON key for each selected field is taken from the field's json: struct tag. +func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) { + if len(forceSendFields) == 0 && len(nullFields) == 0 { + return json.Marshal(schema) + } + + mustInclude := make(map[string]bool) + for _, f := range forceSendFields { + mustInclude[f] = true + } + useNull := make(map[string]bool) + useNullMaps := make(map[string]map[string]bool) + for _, nf := range nullFields { + parts := strings.SplitN(nf, ".", 2) + field := parts[0] + if len(parts) == 1 { + useNull[field] = true + } else { + if useNullMaps[field] == nil { + useNullMaps[field] = map[string]bool{} + } + useNullMaps[field][parts[1]] = true + } + } + + dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps) + if err != nil { + return nil, err + } + return json.Marshal(dataMap) +} + +func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) { + m := make(map[string]interface{}) + s := reflect.ValueOf(schema) + st := s.Type() + + for i := 0; i < s.NumField(); i++ { + jsonTag := st.Field(i).Tag.Get("json") + if jsonTag == "" { + continue + } + tag, err := parseJSONTag(jsonTag) + if err != nil { + return nil, err + } + if tag.ignore { + continue + } + + v := s.Field(i) + f := st.Field(i) + + if useNull[f.Name] { + if !isEmptyValue(v) { + return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name) + } + m[tag.apiName] = nil + continue + } + + if !includeField(v, f, mustInclude) { + continue + } + + // If map fields are explicitly set to null, use a map[string]interface{}. + if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil { + ms, ok := v.Interface().(map[string]string) + if !ok { + return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name) + } + mi := map[string]interface{}{} + for k, v := range ms { + mi[k] = v + } + for k := range useNullMaps[f.Name] { + mi[k] = nil + } + m[tag.apiName] = mi + continue + } + + // nil maps are treated as empty maps. + if f.Type.Kind() == reflect.Map && v.IsNil() { + m[tag.apiName] = map[string]string{} + continue + } + + // nil slices are treated as empty slices. + if f.Type.Kind() == reflect.Slice && v.IsNil() { + m[tag.apiName] = []bool{} + continue + } + + if tag.stringFormat { + m[tag.apiName] = formatAsString(v, f.Type.Kind()) + } else { + m[tag.apiName] = v.Interface() + } + } + return m, nil +} + +// formatAsString returns a string representation of v, dereferencing it first if possible. +func formatAsString(v reflect.Value, kind reflect.Kind) string { + if kind == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + + return fmt.Sprintf("%v", v.Interface()) +} + +// jsonTag represents a restricted version of the struct tag format used by encoding/json. +// It is used to describe the JSON encoding of fields in a Schema struct. +type jsonTag struct { + apiName string + stringFormat bool + ignore bool +} + +// parseJSONTag parses a restricted version of the struct tag format used by encoding/json. +// The format of the tag must match that generated by the Schema.writeSchemaStruct method +// in the api generator. +func parseJSONTag(val string) (jsonTag, error) { + if val == "-" { + return jsonTag{ignore: true}, nil + } + + var tag jsonTag + + i := strings.Index(val, ",") + if i == -1 || val[:i] == "" { + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + tag = jsonTag{ + apiName: val[:i], + } + + switch val[i+1:] { + case "omitempty": + case "omitempty,string": + tag.stringFormat = true + default: + return tag, fmt.Errorf("malformed json tag: %s", val) + } + + return tag, nil +} + +// Reports whether the struct field "f" with value "v" should be included in JSON output. +func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool { + // The regular JSON encoding of a nil pointer is "null", which means "delete this field". + // Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set. + // However, many fields are not pointers, so there would be no way to delete these fields. + // Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields. + // Deletion will be handled by a separate mechanism. + if f.Type.Kind() == reflect.Ptr && v.IsNil() { + return false + } + + // The "any" type is represented as an interface{}. If this interface + // is nil, there is no reasonable representation to send. We ignore + // these fields, for the same reasons as given above for pointers. + if f.Type.Kind() == reflect.Interface && v.IsNil() { + return false + } + + return mustInclude[f.Name] || !isEmptyValue(v) +} + +// isEmptyValue reports whether v is the empty value for its type. This +// implementation is based on that of the encoding/json package, but its +// correctness does not depend on it being identical. What's important is that +// this function return false in situations where v should not be sent as part +// of a PATCH operation. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go new file mode 100644 index 00000000000..13c2f930207 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/jsonfloat.go @@ -0,0 +1,47 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "encoding/json" + "errors" + "fmt" + "math" +) + +// JSONFloat64 is a float64 that supports proper unmarshaling of special float +// values in JSON, according to +// https://developers.google.com/protocol-buffers/docs/proto3#json. Although +// that is a proto-to-JSON spec, it applies to all Google APIs. +// +// The jsonpb package +// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has +// similar functionality, but only for direct translation from proto messages +// to JSON. +type JSONFloat64 float64 + +func (f *JSONFloat64) UnmarshalJSON(data []byte) error { + var ff float64 + if err := json.Unmarshal(data, &ff); err == nil { + *f = JSONFloat64(ff) + return nil + } + var s string + if err := json.Unmarshal(data, &s); err == nil { + switch s { + case "NaN": + ff = math.NaN() + case "Infinity": + ff = math.Inf(1) + case "-Infinity": + ff = math.Inf(-1) + default: + return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s) + } + *f = JSONFloat64(ff) + return nil + } + return errors.New("google.golang.org/api/internal: data not float or string") +} diff --git a/vendor/google.golang.org/api/internal/gensupport/media.go b/vendor/google.golang.org/api/internal/gensupport/media.go new file mode 100644 index 00000000000..e34ef3740ff --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/media.go @@ -0,0 +1,377 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net/http" + "net/textproto" + "strings" + "sync" + + "google.golang.org/api/googleapi" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// If the content type is already known, it can be specified via ctype. +// Otherwise, the content of media will be sniffed to determine the content type. +// If media implements googleapi.ContentTyper (deprecated), this will be used +// instead of sniffing the content. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) { + // Note: callers could avoid calling DetectContentType if ctype != "", + // but doing the check inside this function reduces the amount of + // generated code. + if ctype != "" { + return media, ctype + } + + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + if typer, ok := media.(googleapi.ContentTyper); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} + +type typeReader struct { + io.Reader + typ string +} + +// multipartReader combines the contents of multiple readers to create a multipart/related HTTP body. +// Close must be called if reads from the multipartReader are abandoned before reaching EOF. +type multipartReader struct { + pr *io.PipeReader + ctype string + mu sync.Mutex + pipeOpen bool +} + +// boundary optionally specifies the MIME boundary +func newMultipartReader(parts []typeReader, boundary string) *multipartReader { + mp := &multipartReader{pipeOpen: true} + var pw *io.PipeWriter + mp.pr, pw = io.Pipe() + mpw := multipart.NewWriter(pw) + if boundary != "" { + mpw.SetBoundary(boundary) + } + mp.ctype = "multipart/related; boundary=" + mpw.Boundary() + go func() { + for _, part := range parts { + w, err := mpw.CreatePart(typeHeader(part.typ)) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err)) + return + } + _, err = io.Copy(w, part.Reader) + if err != nil { + mpw.Close() + pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err)) + return + } + } + + mpw.Close() + pw.Close() + }() + return mp +} + +func (mp *multipartReader) Read(data []byte) (n int, err error) { + return mp.pr.Read(data) +} + +func (mp *multipartReader) Close() error { + mp.mu.Lock() + if !mp.pipeOpen { + mp.mu.Unlock() + return nil + } + mp.pipeOpen = false + mp.mu.Unlock() + return mp.pr.Close() +} + +// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body. +// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary. +// +// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF. +func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) { + return combineBodyMedia(body, bodyContentType, media, mediaContentType, "") +} + +// combineBodyMedia is CombineBodyMedia but with an optional mimeBoundary field. +func combineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType, mimeBoundary string) (io.ReadCloser, string) { + mp := newMultipartReader([]typeReader{ + {body, bodyContentType}, + {media, mediaContentType}, + }, mimeBoundary) + return mp, mp.ctype +} + +func typeHeader(contentType string) textproto.MIMEHeader { + h := make(textproto.MIMEHeader) + if contentType != "" { + h.Set("Content-Type", contentType) + } + return h +} + +// PrepareUpload determines whether the data in the supplied reader should be +// uploaded in a single request, or in sequential chunks. +// chunkSize is the size of the chunk that media should be split into. +// +// If chunkSize is zero, media is returned as the first value, and the other +// two return values are nil, true. +// +// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the +// contents of media fit in a single chunk. +// +// After PrepareUpload has been called, media should no longer be used: the +// media content should be accessed via one of the return values. +func PrepareUpload(media io.Reader, chunkSize int, buffer []byte) (r io.Reader, mb *MediaBuffer, singleChunk bool) { + if chunkSize == 0 { // do not chunk + return media, nil, true + } + if buffer != nil { + mb = NewMediaBufferWithBuffer(media, chunkSize, buffer) + } else { + mb = NewMediaBuffer(media, chunkSize) + } + + _, _, _, err := mb.Chunk() + // If err is io.EOF, we can upload this in a single request. Otherwise, err is + // either nil or a non-EOF error. If it is the latter, then the next call to + // mb.Chunk will return the same error. Returning a MediaBuffer ensures that this + // error will be handled at some point. + return nil, mb, err == io.EOF +} + +// MediaInfo holds information for media uploads. It is intended for use by generated +// code only. +type MediaInfo struct { + // At most one of Media and MediaBuffer will be set. + media io.Reader + buffer *MediaBuffer + singleChunk bool + mType string + size int64 // mediaSize, if known. Used only for calls to progressUpdater_. + progressUpdater googleapi.ProgressUpdater +} + +// NewInfoFromMedia should be invoked from the Media method of a call. It returns a +// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer +// if needed. +func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo { + mi := &MediaInfo{} + opts := googleapi.ProcessMediaOptions(options) + if !opts.ForceEmptyContentType { + r, mi.mType = DetermineContentType(r, opts.ContentType) + } + mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize, opts.Buffer) + return mi +} + +// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a +// call. It returns a MediaInfo using the given reader, size and media type. +func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo { + rdr := ReaderAtToReader(r, size) + rdr, mType := DetermineContentType(rdr, mediaType) + return &MediaInfo{ + size: size, + mType: mType, + buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize), + media: nil, + singleChunk: false, + } +} + +// SetProgressUpdater sets the progress updater for the media info. +func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) { + if mi != nil { + mi.progressUpdater = pu + } +} + +// UploadType determines the type of upload: a single request, or a resumable +// series of requests. +func (mi *MediaInfo) UploadType() string { + if mi.singleChunk { + return "multipart" + } + return "resumable" +} + +// UploadRequest sets up an HTTP request for media upload. It adds headers +// as necessary, and returns a replacement for the body and a function for http.Request.GetBody. +func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) { + cleanup = func() {} + if mi == nil { + return body, nil, cleanup + } + var media io.Reader + if mi.media != nil { + // This only happens when the caller has turned off chunking. In that + // case, we write all of media in a single non-retryable request. + media = mi.media + } else if mi.singleChunk { + // The data fits in a single chunk, which has now been read into the MediaBuffer. + // We obtain that chunk so we can write it in a single request. The request can + // be retried because the data is stored in the MediaBuffer. + media, _, _, _ = mi.buffer.Chunk() + } + if media != nil { + fb := readerFunc(body) + fm := readerFunc(media) + combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType) + toCleanup := []io.Closer{ + combined, + } + if fb != nil && fm != nil { + getBody = func() (io.ReadCloser, error) { + rb := ioutil.NopCloser(fb()) + rm := ioutil.NopCloser(fm()) + var mimeBoundary string + if _, params, err := mime.ParseMediaType(ctype); err == nil { + mimeBoundary = params["boundary"] + } + r, _ := combineBodyMedia(rb, "application/json", rm, mi.mType, mimeBoundary) + toCleanup = append(toCleanup, r) + return r, nil + } + } + cleanup = func() { + for _, closer := range toCleanup { + _ = closer.Close() + } + + } + reqHeaders.Set("Content-Type", ctype) + body = combined + } + if mi.buffer != nil && mi.mType != "" && !mi.singleChunk { + reqHeaders.Set("X-Upload-Content-Type", mi.mType) + } + return body, getBody, cleanup +} + +// readerFunc returns a function that always returns an io.Reader that has the same +// contents as r, provided that can be done without consuming r. Otherwise, it +// returns nil. +// See http.NewRequest (in net/http/request.go). +func readerFunc(r io.Reader) func() io.Reader { + switch r := r.(type) { + case *bytes.Buffer: + buf := r.Bytes() + return func() io.Reader { return bytes.NewReader(buf) } + case *bytes.Reader: + snapshot := *r + return func() io.Reader { r := snapshot; return &r } + case *strings.Reader: + snapshot := *r + return func() io.Reader { r := snapshot; return &r } + default: + return nil + } +} + +// ResumableUpload returns an appropriately configured ResumableUpload value if the +// upload is resumable, or nil otherwise. +func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload { + if mi == nil || mi.singleChunk { + return nil + } + return &ResumableUpload{ + URI: locURI, + Media: mi.buffer, + MediaType: mi.mType, + Callback: func(curr int64) { + if mi.progressUpdater != nil { + mi.progressUpdater(curr, mi.size) + } + }, + } +} + +// SetGetBody sets the GetBody field of req to f. This was once needed +// to gracefully support Go 1.7 and earlier which didn't have that +// field. +// +// Deprecated: the code generator no longer uses this as of +// 2019-02-19. Nothing else should be calling this anyway, but we +// won't delete this immediately; it will be deleted in as early as 6 +// months. +func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) { + req.GetBody = f +} diff --git a/vendor/google.golang.org/api/internal/gensupport/params.go b/vendor/google.golang.org/api/internal/gensupport/params.go new file mode 100644 index 00000000000..0e878a4255b --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/params.go @@ -0,0 +1,51 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "net/url" + + "google.golang.org/api/googleapi" +) + +// URLParams is a simplified replacement for url.Values +// that safely builds up URL parameters for encoding. +type URLParams map[string][]string + +// Get returns the first value for the given key, or "". +func (u URLParams) Get(key string) string { + vs := u[key] + if len(vs) == 0 { + return "" + } + return vs[0] +} + +// Set sets the key to value. +// It replaces any existing values. +func (u URLParams) Set(key, value string) { + u[key] = []string{value} +} + +// SetMulti sets the key to an array of values. +// It replaces any existing values. +// Note that values must not be modified after calling SetMulti +// so the caller is responsible for making a copy if necessary. +func (u URLParams) SetMulti(key string, values []string) { + u[key] = values +} + +// Encode encodes the values into ``URL encoded'' form +// ("bar=baz&foo=quux") sorted by key. +func (u URLParams) Encode() string { + return url.Values(u).Encode() +} + +// SetOptions sets the URL params and any additional call options. +func SetOptions(u URLParams, opts ...googleapi.CallOption) { + for _, o := range opts { + u.Set(o.Get()) + } +} diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go new file mode 100644 index 00000000000..edc87ec24f6 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go @@ -0,0 +1,258 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "sync" + "time" + + gax "github.com/googleapis/gax-go/v2" +) + +// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their +// own implementation. +type Backoff interface { + Pause() time.Duration +} + +// These are declared as global variables so that tests can overwrite them. +var ( + retryDeadline = 32 * time.Second + backoff = func() Backoff { + return &gax.Backoff{Initial: 100 * time.Millisecond} + } + // isRetryable is a platform-specific hook, specified in retryable_linux.go + syscallRetryable func(error) bool = func(err error) bool { return false } +) + +const ( + // statusTooManyRequests is returned by the storage API if the + // per-project limits have been temporarily exceeded. The request + // should be retried. + // https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes + statusTooManyRequests = 429 +) + +// ResumableUpload is used by the generated APIs to provide resumable uploads. +// It is not used by developers directly. +type ResumableUpload struct { + Client *http.Client + // URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable". + URI string + UserAgent string // User-Agent for header of the request + // Media is the object being uploaded. + Media *MediaBuffer + // MediaType defines the media type, e.g. "image/jpeg". + MediaType string + + mu sync.Mutex // guards progress + progress int64 // number of bytes uploaded so far + + // Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded. + Callback func(int64) +} + +// Progress returns the number of bytes uploaded at this point. +func (rx *ResumableUpload) Progress() int64 { + rx.mu.Lock() + defer rx.mu.Unlock() + return rx.progress +} + +// doUploadRequest performs a single HTTP request to upload data. +// off specifies the offset in rx.Media from which data is drawn. +// size is the number of bytes in data. +// final specifies whether data is the final chunk to be uploaded. +func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) { + req, err := http.NewRequest("POST", rx.URI, data) + if err != nil { + return nil, err + } + + req.ContentLength = size + var contentRange string + if final { + if size == 0 { + contentRange = fmt.Sprintf("bytes */%v", off) + } else { + contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size) + } + } else { + contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1) + } + req.Header.Set("Content-Range", contentRange) + req.Header.Set("Content-Type", rx.MediaType) + req.Header.Set("User-Agent", rx.UserAgent) + + // Google's upload endpoint uses status code 308 for a + // different purpose than the "308 Permanent Redirect" + // since-standardized in RFC 7238. Because of the conflict in + // semantics, Google added this new request header which + // causes it to not use "308" and instead reply with 200 OK + // and sets the upload-specific "X-HTTP-Status-Code-Override: + // 308" response header. + req.Header.Set("X-GUploader-No-308", "yes") + + return SendRequest(ctx, rx.Client, req) +} + +func statusResumeIncomplete(resp *http.Response) bool { + // This is how the server signals "status resume incomplete" + // when X-GUploader-No-308 is set to "yes": + return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308" +} + +// reportProgress calls a user-supplied callback to report upload progress. +// If old==updated, the callback is not called. +func (rx *ResumableUpload) reportProgress(old, updated int64) { + if updated-old == 0 { + return + } + rx.mu.Lock() + rx.progress = updated + rx.mu.Unlock() + if rx.Callback != nil { + rx.Callback(updated) + } +} + +// transferChunk performs a single HTTP request to upload a single chunk from rx.Media. +func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) { + chunk, off, size, err := rx.Media.Chunk() + + done := err == io.EOF + if !done && err != nil { + return nil, err + } + + res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done) + if err != nil { + return res, err + } + + // We sent "X-GUploader-No-308: yes" (see comment elsewhere in + // this file), so we don't expect to get a 308. + if res.StatusCode == 308 { + return nil, errors.New("unexpected 308 response status code") + } + + if res.StatusCode == http.StatusOK { + rx.reportProgress(off, off+int64(size)) + } + + if statusResumeIncomplete(res) { + rx.Media.Next() + } + return res, nil +} + +// Upload starts the process of a resumable upload with a cancellable context. +// It retries using the provided back off strategy until cancelled or the +// strategy indicates to stop retrying. +// It is called from the auto-generated API code and is not visible to the user. +// Before sending an HTTP request, Upload calls any registered hook functions, +// and calls the returned functions after the request returns (see send.go). +// rx is private to the auto-generated API code. +// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. +func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { + + // There are a couple of cases where it's possible for err and resp to both + // be non-nil. However, we expose a simpler contract to our callers: exactly + // one of resp and err will be non-nil. This means that any response body + // must be closed here before returning a non-nil error. + var prepareReturn = func(resp *http.Response, err error) (*http.Response, error) { + if err != nil { + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return nil, err + } + return resp, nil + } + + // Send all chunks. + for { + var pause time.Duration + + // Each chunk gets its own initialized-at-zero retry. + bo := backoff() + quitAfter := time.After(retryDeadline) + + // Retry loop for a single chunk. + for { + select { + case <-ctx.Done(): + if err == nil { + err = ctx.Err() + } + return prepareReturn(resp, err) + case <-time.After(pause): + case <-quitAfter: + return prepareReturn(resp, err) + } + + resp, err = rx.transferChunk(ctx) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we should retry the request. + if !shouldRetry(status, err) { + break + } + + pause = bo.Pause() + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + } + + // If the chunk was uploaded successfully, but there's still + // more to go, upload the next chunk without any delay. + if statusResumeIncomplete(resp) { + resp.Body.Close() + continue + } + + return prepareReturn(resp, err) + } +} + +// shouldRetry indicates whether an error is retryable for the purposes of this +// package, following guidance from +// https://cloud.google.com/storage/docs/exponential-backoff . +func shouldRetry(status int, err error) bool { + if 500 <= status && status <= 599 { + return true + } + if status == statusTooManyRequests { + return true + } + if err == io.ErrUnexpectedEOF { + return true + } + // Transient network errors should be retried. + if syscallRetryable(err) { + return true + } + if err, ok := err.(interface{ Temporary() bool }); ok { + if err.Temporary() { + return true + } + } + // If Go 1.13 error unwrapping is available, use this to examine wrapped + // errors. + if err, ok := err.(interface{ Unwrap() error }); ok { + return shouldRetry(status, err.Unwrap()) + } + return false +} diff --git a/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go new file mode 100644 index 00000000000..fed998b5d07 --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/retryable_linux.go @@ -0,0 +1,15 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package gensupport + +import "syscall" + +func init() { + // Initialize syscallRetryable to return true on transient socket-level + // errors. These errors are specific to Linux. + syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } +} diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go new file mode 100644 index 00000000000..3338c8d193a --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/send.go @@ -0,0 +1,172 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "time" +) + +// Hook is the type of a function that is called once before each HTTP request +// that is sent by a generated API. It returns a function that is called after +// the request returns. +// Hooks are not called if the context is nil. +type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) + +var hooks []Hook + +// RegisterHook registers a Hook to be called before each HTTP request by a +// generated API. Hooks are called in the order they are registered. Each +// hook can return a function; if it is non-nil, it is called after the HTTP +// request returns. These functions are called in the reverse order. +// RegisterHook should not be called concurrently with itself or SendRequest. +func RegisterHook(h Hook) { + hooks = append(hooks, h) +} + +// SendRequest sends a single HTTP request using the given client. +// If ctx is non-nil, it calls all hooks, then sends the request with +// req.WithContext, then calls any functions returned by the hooks in +// reverse order. +func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + // Call hooks in order of registration, store returned funcs. + post := make([]func(resp *http.Response), len(hooks)) + for i, h := range hooks { + fn := h(ctx, req) + post[i] = fn + } + + // Send request. + resp, err := send(ctx, client, req) + + // Call returned funcs in reverse order. + for i := len(post) - 1; i >= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} + +func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// SendRequestWithRetry sends a single HTTP request using the given client, +// with retries if a retryable error is returned. +// If ctx is non-nil, it calls all hooks, then sends the request with +// req.WithContext, then calls any functions returned by the hooks in +// reverse order. +func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + // Disallow Accept-Encoding because it interferes with the automatic gzip handling + // done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219. + if _, ok := req.Header["Accept-Encoding"]; ok { + return nil, errors.New("google api: custom Accept-Encoding headers not allowed") + } + if ctx == nil { + return client.Do(req) + } + // Call hooks in order of registration, store returned funcs. + post := make([]func(resp *http.Response), len(hooks)) + for i, h := range hooks { + fn := h(ctx, req) + post[i] = fn + } + + // Send request with retry. + resp, err := sendAndRetry(ctx, client, req) + + // Call returned funcs in reverse order. + for i := len(post) - 1; i >= 0; i-- { + if fn := post[i]; fn != nil { + fn(resp) + } + } + return resp, err +} + +func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + var resp *http.Response + var err error + + // Loop to retry the request, up to the context deadline. + var pause time.Duration + bo := backoff() + + for { + select { + case <-ctx.Done(): + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err == nil { + err = ctx.Err() + } + return resp, err + case <-time.After(pause): + } + + resp, err = client.Do(req.WithContext(ctx)) + + var status int + if resp != nil { + status = resp.StatusCode + } + + // Check if we can retry the request. A retry can only be done if the error + // is retryable and the request body can be re-created using GetBody (this + // will not be possible if the body was unbuffered). + if req.GetBody == nil || !shouldRetry(status, err) { + break + } + var errBody error + req.Body, errBody = req.GetBody() + if errBody != nil { + break + } + + pause = bo.Pause() + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + } + return resp, err +} + +// DecodeResponse decodes the body of res into target. If there is no body, +// target is unchanged. +func DecodeResponse(target interface{}, res *http.Response) error { + if res.StatusCode == http.StatusNoContent { + return nil + } + return json.NewDecoder(res.Body).Decode(target) +} diff --git a/vendor/google.golang.org/api/internal/gensupport/version.go b/vendor/google.golang.org/api/internal/gensupport/version.go new file mode 100644 index 00000000000..23f6aa24eaf --- /dev/null +++ b/vendor/google.golang.org/api/internal/gensupport/version.go @@ -0,0 +1,53 @@ +// Copyright 2020 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "runtime" + "strings" + "unicode" +) + +// GoVersion returns the Go runtime version. The returned string +// has no whitespace. +func GoVersion() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return !strings.ContainsRune("0123456789.", r) +} diff --git a/vendor/google.golang.org/api/internal/impersonate/impersonate.go b/vendor/google.golang.org/api/internal/impersonate/impersonate.go new file mode 100644 index 00000000000..b465bbcd12e --- /dev/null +++ b/vendor/google.golang.org/api/internal/impersonate/impersonate.go @@ -0,0 +1,128 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package impersonate is used to impersonate Google Credentials. +package impersonate + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "golang.org/x/oauth2" +) + +// Config for generating impersonated credentials. +type Config struct { + // Target is the service account to impersonate. Required. + Target string + // Scopes the impersonated credential should have. Required. + Scopes []string + // Delegates are the service accounts in a delegation chain. Each service + // account must be granted roles/iam.serviceAccountTokenCreator on the next + // service account in the chain. Optional. + Delegates []string +} + +// TokenSource returns an impersonated TokenSource configured with the provided +// config using ts as the base credential provider for making requests. +func TokenSource(ctx context.Context, ts oauth2.TokenSource, config *Config) (oauth2.TokenSource, error) { + if len(config.Scopes) == 0 { + return nil, fmt.Errorf("impersonate: scopes must be provided") + } + its := impersonatedTokenSource{ + ctx: ctx, + ts: ts, + name: formatIAMServiceAccountName(config.Target), + // Default to the longest acceptable value of one hour as the token will + // be refreshed automatically. + lifetime: "3600s", + } + + its.delegates = make([]string, len(config.Delegates)) + for i, v := range config.Delegates { + its.delegates[i] = formatIAMServiceAccountName(v) + } + its.scopes = make([]string, len(config.Scopes)) + copy(its.scopes, config.Scopes) + + return oauth2.ReuseTokenSource(nil, its), nil +} + +func formatIAMServiceAccountName(name string) string { + return fmt.Sprintf("projects/-/serviceAccounts/%s", name) +} + +type generateAccessTokenReq struct { + Delegates []string `json:"delegates,omitempty"` + Lifetime string `json:"lifetime,omitempty"` + Scope []string `json:"scope,omitempty"` +} + +type generateAccessTokenResp struct { + AccessToken string `json:"accessToken"` + ExpireTime string `json:"expireTime"` +} + +type impersonatedTokenSource struct { + ctx context.Context + ts oauth2.TokenSource + + name string + lifetime string + scopes []string + delegates []string +} + +// Token returns an impersonated Token. +func (i impersonatedTokenSource) Token() (*oauth2.Token, error) { + hc := oauth2.NewClient(i.ctx, i.ts) + reqBody := generateAccessTokenReq{ + Delegates: i.delegates, + Lifetime: i.lifetime, + Scope: i.scopes, + } + b, err := json.Marshal(reqBody) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to marshal request: %v", err) + } + url := fmt.Sprintf("https://iamcredentials.googleapis.com/v1/%s:generateAccessToken", i.name) + req, err := http.NewRequest("POST", url, bytes.NewReader(b)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to create request: %v", err) + } + req = req.WithContext(i.ctx) + req.Header.Set("Content-Type", "application/json") + + resp, err := hc.Do(req) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to generate access token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to read body: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, fmt.Errorf("impersonate: status code %d: %s", c, body) + } + + var accessTokenResp generateAccessTokenResp + if err := json.Unmarshal(body, &accessTokenResp); err != nil { + return nil, fmt.Errorf("impersonate: unable to parse response: %v", err) + } + expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime) + if err != nil { + return nil, fmt.Errorf("impersonate: unable to parse expiry: %v", err) + } + return &oauth2.Token{ + AccessToken: accessTokenResp.AccessToken, + Expiry: expiry, + }, nil +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go new file mode 100644 index 00000000000..26259b82abb --- /dev/null +++ b/vendor/google.golang.org/api/internal/settings.go @@ -0,0 +1,114 @@ +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal supports the options and transport packages. +package internal + +import ( + "crypto/tls" + "errors" + "net/http" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/internal/impersonate" + "google.golang.org/grpc" +) + +// DialSettings holds information needed to establish a connection with a +// Google API service. +type DialSettings struct { + Endpoint string + DefaultEndpoint string + DefaultMTLSEndpoint string + Scopes []string + TokenSource oauth2.TokenSource + Credentials *google.Credentials + CredentialsFile string // if set, Token Source is ignored. + CredentialsJSON []byte + UserAgent string + APIKey string + Audiences []string + HTTPClient *http.Client + GRPCDialOpts []grpc.DialOption + GRPCConn *grpc.ClientConn + GRPCConnPool ConnPool + GRPCConnPoolSize int + NoAuth bool + TelemetryDisabled bool + ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + CustomClaims map[string]interface{} + SkipValidation bool + ImpersonationConfig *impersonate.Config + + // Google API system parameters. For more information please read: + // https://cloud.google.com/apis/docs/system-parameters + QuotaProject string + RequestReason string +} + +// Validate reports an error if ds is invalid. +func (ds *DialSettings) Validate() error { + if ds.SkipValidation { + return nil + } + hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil + if ds.NoAuth && hasCreds { + return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials") + } + // Credentials should not appear with other options. + // We currently allow TokenSource and CredentialsFile to coexist. + // TODO(jba): make TokenSource & CredentialsFile an error (breaking change). + nCreds := 0 + if ds.Credentials != nil { + nCreds++ + } + if ds.CredentialsJSON != nil { + nCreds++ + } + if ds.CredentialsFile != "" { + nCreds++ + } + if ds.APIKey != "" { + nCreds++ + } + if ds.TokenSource != nil { + nCreds++ + } + if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 { + return errors.New("WithScopes is incompatible with WithAudience") + } + // Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility. + if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") { + return errors.New("multiple credential options provided") + } + if ds.GRPCConn != nil && ds.GRPCConnPool != nil { + return errors.New("WithGRPCConn is incompatible with WithConnPool") + } + if ds.HTTPClient != nil && ds.GRPCConnPool != nil { + return errors.New("WithHTTPClient is incompatible with WithConnPool") + } + if ds.HTTPClient != nil && ds.GRPCConn != nil { + return errors.New("WithHTTPClient is incompatible with WithGRPCConn") + } + if ds.HTTPClient != nil && ds.GRPCDialOpts != nil { + return errors.New("WithHTTPClient is incompatible with gRPC dial options") + } + if ds.HTTPClient != nil && ds.QuotaProject != "" { + return errors.New("WithHTTPClient is incompatible with QuotaProject") + } + if ds.HTTPClient != nil && ds.RequestReason != "" { + return errors.New("WithHTTPClient is incompatible with RequestReason") + } + if ds.HTTPClient != nil && ds.ClientCertSource != nil { + return errors.New("WithHTTPClient is incompatible with WithClientCertSource") + } + if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) { + return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible") + } + if ds.ImpersonationConfig != nil && len(ds.ImpersonationConfig.Scopes) == 0 && len(ds.Scopes) == 0 { + return errors.New("WithImpersonatedCredentials requires scopes being provided") + } + return nil +} diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE new file mode 100644 index 00000000000..7109c6ef932 --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Joshua Tacoma. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA new file mode 100644 index 00000000000..c7f86fcd5fd --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA @@ -0,0 +1,14 @@ +name: "uritemplates" +description: + "Package uritemplates is a level 4 implementation of RFC 6570 (URI " + "Template, http://tools.ietf.org/html/rfc6570)." + +third_party { + url { + type: GIT + value: "https://github.com/jtacoma/uritemplates" + } + version: "0.1" + last_upgrade_date { year: 2014 month: 8 day: 18 } + license_type: NOTICE +} diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go new file mode 100644 index 00000000000..8c27d19d752 --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/uritemplates.go @@ -0,0 +1,248 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 3 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// uritemplates does not support composite values (in Go: slices or maps) +// and so does not qualify as a level 4 implementation. +package uritemplates + +import ( + "bytes" + "errors" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +// pairWriter is a convenience struct which allows escaped and unescaped +// versions of the template to be written in parallel. +type pairWriter struct { + escaped, unescaped bytes.Buffer +} + +// Write writes the provided string directly without any escaping. +func (w *pairWriter) Write(s string) { + w.escaped.WriteString(s) + w.unescaped.WriteString(s) +} + +// Escape writes the provided string, escaping the string for the +// escaped output. +func (w *pairWriter) Escape(s string, allowReserved bool) { + w.unescaped.WriteString(s) + if allowReserved { + w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } +} + +// Escaped returns the escaped string. +func (w *pairWriter) Escaped() string { + return w.escaped.String() +} + +// Unescaped returns the unescaped string. +func (w *pairWriter) Unescaped() string { + return w.unescaped.String() +} + +// A uriTemplate is a parsed representation of a URI template. +type uriTemplate struct { + raw string + parts []templatePart +} + +// parse parses a URI template string into a uriTemplate object. +func parse(rawTemplate string) (*uriTemplate, error) { + split := strings.Split(rawTemplate, "{") + parts := make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + return nil, errors.New("unexpected }") + } + parts[i].raw = s + continue + } + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + return nil, errors.New("malformed template") + } + expression := subsplit[0] + var err error + parts[i*2-1], err = parseExpression(expression) + if err != nil { + return nil, err + } + parts[i*2].raw = subsplit[1] + } + return &uriTemplate{ + raw: rawTemplate, + parts: parts, + }, nil +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + // TODO(djd): Remove "*" suffix parsing once we check that no APIs have + // mistakenly used that attribute. + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifiers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce the +// resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) { + var w pairWriter + for _, p := range t.parts { + p.expand(&w, values) + } + return w.Escaped(), w.Unescaped() +} + +func (tp *templatePart) expand(w *pairWriter, values map[string]string) { + if len(tp.raw) > 0 { + w.Write(tp.raw) + return + } + var first = true + for _, term := range tp.terms { + value, exists := values[term.name] + if !exists { + continue + } + if first { + w.Write(tp.first) + first = false + } else { + w.Write(tp.sep) + } + tp.expandString(w, term, value) + } +} + +func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) { + if tp.named { + w.Write(name) + if empty { + w.Write(tp.ifemp) + } else { + w.Write("=") + } + } +} + +func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + tp.expandName(w, t.name, len(s) == 0) + w.Escape(s, tp.allowReserved) +} diff --git a/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go new file mode 100644 index 00000000000..2e70b81543d --- /dev/null +++ b/vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go @@ -0,0 +1,17 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uritemplates + +// Expand parses then expands a URI template with a set of values to produce +// the resultant URI. Two forms of the result are returned: one with all the +// elements escaped, and one with the elements unescaped. +func Expand(path string, values map[string]string) (escaped, unescaped string, err error) { + template, err := parse(path) + if err != nil { + return "", "", err + } + escaped, unescaped = template.Expand(values) + return escaped, unescaped, nil +} diff --git a/vendor/google.golang.org/api/option/credentials_go19.go b/vendor/google.golang.org/api/option/credentials_go19.go new file mode 100644 index 00000000000..d06f918b0e6 --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_go19.go @@ -0,0 +1,23 @@ +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.Credentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.Credentials)(w) +} + +// WithCredentials returns a ClientOption that authenticates API calls. +func WithCredentials(creds *google.Credentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/credentials_notgo19.go b/vendor/google.golang.org/api/option/credentials_notgo19.go new file mode 100644 index 00000000000..0ce107a624a --- /dev/null +++ b/vendor/google.golang.org/api/option/credentials_notgo19.go @@ -0,0 +1,22 @@ +// Copyright 2018 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package option + +import ( + "golang.org/x/oauth2/google" + "google.golang.org/api/internal" +) + +type withCreds google.DefaultCredentials + +func (w *withCreds) Apply(o *internal.DialSettings) { + o.Credentials = (*google.DefaultCredentials)(w) +} + +func WithCredentials(creds *google.DefaultCredentials) ClientOption { + return (*withCreds)(creds) +} diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go new file mode 100644 index 00000000000..b4d78a830ae --- /dev/null +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -0,0 +1,52 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internaloption contains options used internally by Google client code. +package internaloption + +import ( + "google.golang.org/api/internal" + "google.golang.org/api/option" +) + +type defaultEndpointOption string + +func (o defaultEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultEndpoint = string(o) +} + +// WithDefaultEndpoint is an option that indicates the default endpoint. +// +// It should only be used internally by generated clients. +// +// This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint. +func WithDefaultEndpoint(url string) option.ClientOption { + return defaultEndpointOption(url) +} + +type defaultMTLSEndpointOption string + +func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) { + settings.DefaultMTLSEndpoint = string(o) +} + +// WithDefaultMTLSEndpoint is an option that indicates the default mTLS endpoint. +// +// It should only be used internally by generated clients. +func WithDefaultMTLSEndpoint(url string) option.ClientOption { + return defaultMTLSEndpointOption(url) +} + +// SkipDialSettingsValidation bypasses validation on ClientOptions. +// +// It should only be used internally. +func SkipDialSettingsValidation() option.ClientOption { + return skipDialSettingsValidation{} +} + +type skipDialSettingsValidation struct{} + +func (s skipDialSettingsValidation) Apply(settings *internal.DialSettings) { + settings.SkipValidation = true +} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go new file mode 100644 index 00000000000..686476f9cbb --- /dev/null +++ b/vendor/google.golang.org/api/option/option.go @@ -0,0 +1,326 @@ +// Copyright 2017 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package option contains options for Google API clients. +package option + +import ( + "crypto/tls" + "net/http" + + "golang.org/x/oauth2" + "google.golang.org/api/internal" + "google.golang.org/api/internal/impersonate" + "google.golang.org/grpc" +) + +// A ClientOption is an option for a Google API client. +type ClientOption interface { + Apply(*internal.DialSettings) +} + +// WithTokenSource returns a ClientOption that specifies an OAuth2 token +// source to be used as the basis for authentication. +func WithTokenSource(s oauth2.TokenSource) ClientOption { + return withTokenSource{s} +} + +type withTokenSource struct{ ts oauth2.TokenSource } + +func (w withTokenSource) Apply(o *internal.DialSettings) { + o.TokenSource = w.ts +} + +type withCredFile string + +func (w withCredFile) Apply(o *internal.DialSettings) { + o.CredentialsFile = string(w) +} + +// WithCredentialsFile returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials file. +func WithCredentialsFile(filename string) ClientOption { + return withCredFile(filename) +} + +// WithServiceAccountFile returns a ClientOption that uses a Google service +// account credentials file to authenticate. +// +// Deprecated: Use WithCredentialsFile instead. +func WithServiceAccountFile(filename string) ClientOption { + return WithCredentialsFile(filename) +} + +// WithCredentialsJSON returns a ClientOption that authenticates +// API calls with the given service account or refresh token JSON +// credentials. +func WithCredentialsJSON(p []byte) ClientOption { + return withCredentialsJSON(p) +} + +type withCredentialsJSON []byte + +func (w withCredentialsJSON) Apply(o *internal.DialSettings) { + o.CredentialsJSON = make([]byte, len(w)) + copy(o.CredentialsJSON, w) +} + +// WithEndpoint returns a ClientOption that overrides the default endpoint +// to be used for a service. +func WithEndpoint(url string) ClientOption { + return withEndpoint(url) +} + +type withEndpoint string + +func (w withEndpoint) Apply(o *internal.DialSettings) { + o.Endpoint = string(w) +} + +// WithScopes returns a ClientOption that overrides the default OAuth2 scopes +// to be used for a service. +func WithScopes(scope ...string) ClientOption { + return withScopes(scope) +} + +type withScopes []string + +func (w withScopes) Apply(o *internal.DialSettings) { + o.Scopes = make([]string, len(w)) + copy(o.Scopes, w) +} + +// WithUserAgent returns a ClientOption that sets the User-Agent. +func WithUserAgent(ua string) ClientOption { + return withUA(ua) +} + +type withUA string + +func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) } + +// WithHTTPClient returns a ClientOption that specifies the HTTP client to use +// as the basis of communications. This option may only be used with services +// that support HTTP as their communication transport. When used, the +// WithHTTPClient option takes precedent over all other supplied options. +func WithHTTPClient(client *http.Client) ClientOption { + return withHTTPClient{client} +} + +type withHTTPClient struct{ client *http.Client } + +func (w withHTTPClient) Apply(o *internal.DialSettings) { + o.HTTPClient = w.client +} + +// WithGRPCConn returns a ClientOption that specifies the gRPC client +// connection to use as the basis of communications. This option may only be +// used with services that support gRPC as their communication transport. When +// used, the WithGRPCConn option takes precedent over all other supplied +// options. +func WithGRPCConn(conn *grpc.ClientConn) ClientOption { + return withGRPCConn{conn} +} + +type withGRPCConn struct{ conn *grpc.ClientConn } + +func (w withGRPCConn) Apply(o *internal.DialSettings) { + o.GRPCConn = w.conn +} + +// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption +// to an underlying gRPC dial. It does not work with WithGRPCConn. +func WithGRPCDialOption(opt grpc.DialOption) ClientOption { + return withGRPCDialOption{opt} +} + +type withGRPCDialOption struct{ opt grpc.DialOption } + +func (w withGRPCDialOption) Apply(o *internal.DialSettings) { + o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt) +} + +// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC +// connections that requests will be balanced between. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithGRPCConnectionPool(size int) ClientOption { + return withGRPCConnectionPool(size) +} + +type withGRPCConnectionPool int + +func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) { + o.GRPCConnPoolSize = int(w) +} + +// WithAPIKey returns a ClientOption that specifies an API key to be used +// as the basis for authentication. +// +// API Keys can only be used for JSON-over-HTTP APIs, including those under +// the import path google.golang.org/api/.... +func WithAPIKey(apiKey string) ClientOption { + return withAPIKey(apiKey) +} + +type withAPIKey string + +func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) } + +// WithAudiences returns a ClientOption that specifies an audience to be used +// as the audience field ("aud") for the JWT token authentication. +func WithAudiences(audience ...string) ClientOption { + return withAudiences(audience) +} + +type withAudiences []string + +func (w withAudiences) Apply(o *internal.DialSettings) { + o.Audiences = make([]string, len(w)) + copy(o.Audiences, w) +} + +// WithoutAuthentication returns a ClientOption that specifies that no +// authentication should be used. It is suitable only for testing and for +// accessing public resources, like public Google Cloud Storage buckets. +// It is an error to provide both WithoutAuthentication and any of WithAPIKey, +// WithTokenSource, WithCredentialsFile or WithServiceAccountFile. +func WithoutAuthentication() ClientOption { + return withoutAuthentication{} +} + +type withoutAuthentication struct{} + +func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true } + +// WithQuotaProject returns a ClientOption that specifies the project used +// for quota and billing purposes. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithQuotaProject(quotaProject string) ClientOption { + return withQuotaProject(quotaProject) +} + +type withQuotaProject string + +func (w withQuotaProject) Apply(o *internal.DialSettings) { + o.QuotaProject = string(w) +} + +// WithRequestReason returns a ClientOption that specifies a reason for +// making the request, which is intended to be recorded in audit logging. +// An example reason would be a support-case ticket number. +// +// For more information please read: +// https://cloud.google.com/apis/docs/system-parameters +func WithRequestReason(requestReason string) ClientOption { + return withRequestReason(requestReason) +} + +type withRequestReason string + +func (w withRequestReason) Apply(o *internal.DialSettings) { + o.RequestReason = string(w) +} + +// WithTelemetryDisabled returns a ClientOption that disables default telemetry (OpenCensus) +// settings on gRPC and HTTP clients. +// An example reason would be to bind custom telemetry that overrides the defaults. +func WithTelemetryDisabled() ClientOption { + return withTelemetryDisabled{} +} + +type withTelemetryDisabled struct{} + +func (w withTelemetryDisabled) Apply(o *internal.DialSettings) { + o.TelemetryDisabled = true +} + +// ClientCertSource is a function that returns a TLS client certificate to be used +// when opening TLS connections. +// +// It follows the same semantics as crypto/tls.Config.GetClientCertificate. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +type ClientCertSource = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// WithClientCertSource returns a ClientOption that specifies a +// callback function for obtaining a TLS client certificate. +// +// This option is used for supporting mTLS authentication, where the +// server validates the client certifcate when establishing a connection. +// +// The callback function will be invoked whenever the server requests a +// certificate from the client. Implementations of the callback function +// should try to ensure that a valid certificate can be repeatedly returned +// on demand for the entire life cycle of the transport client. If a nil +// Certificate is returned (i.e. no Certificate can be obtained), an error +// should be returned. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func WithClientCertSource(s ClientCertSource) ClientOption { + return withClientCertSource{s} +} + +type withClientCertSource struct{ s ClientCertSource } + +func (w withClientCertSource) Apply(o *internal.DialSettings) { + o.ClientCertSource = w.s +} + +// ImpersonateCredentials returns a ClientOption that will impersonate the +// target service account. +// +// In order to impersonate the target service account +// the base service account must have the Service Account Token Creator role, +// roles/iam.serviceAccountTokenCreator, on the target service account. +// See https://cloud.google.com/iam/docs/understanding-service-accounts. +// +// Optionally, delegates can be used during impersonation if the base service +// account lacks the token creator role on the target. When using delegates, +// each service account must be granted roles/iam.serviceAccountTokenCreator +// on the next service account in the chain. +// +// For example, if a base service account of SA1 is trying to impersonate target +// service account SA2 while using delegate service accounts DSA1 and DSA2, +// the following must be true: +// +// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on +// DSA1. +// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2. +// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2. +// +// The resulting impersonated credential will either have the default scopes of +// the client being instantiating or the scopes from WithScopes if provided. +// Scopes are required for creating impersonated credentials, so if this option +// is used while not using a NewClient/NewService function, WithScopes must also +// be explicitly passed in as well. +// +// If the base credential is an authorized user and not a service account, or if +// the option WithQuotaProject is set, the target service account must have a +// role that grants the serviceusage.services.use permission such as +// roles/serviceusage.serviceUsageConsumer. +// +// This is an EXPERIMENTAL API and may be changed or removed in the future. +func ImpersonateCredentials(target string, delegates ...string) ClientOption { + return impersonateServiceAccount{ + target: target, + delegates: delegates, + } +} + +type impersonateServiceAccount struct { + target string + delegates []string +} + +func (i impersonateServiceAccount) Apply(o *internal.DialSettings) { + o.ImpersonationConfig = &impersonate.Config{ + Target: i.target, + } + o.ImpersonationConfig.Delegates = make([]string, len(i.delegates)) + copy(o.ImpersonationConfig.Delegates, i.delegates) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json new file mode 100644 index 00000000000..1e076ab66d4 --- /dev/null +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -0,0 +1,4455 @@ +{ + "auth": { + "oauth2": { + "scopes": { + "https://www.googleapis.com/auth/cloud-platform": { + "description": "View and manage your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/cloud-platform.read-only": { + "description": "View your data across Google Cloud Platform services" + }, + "https://www.googleapis.com/auth/devstorage.full_control": { + "description": "Manage your data and permissions in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_only": { + "description": "View your data in Google Cloud Storage" + }, + "https://www.googleapis.com/auth/devstorage.read_write": { + "description": "Manage your data in Google Cloud Storage" + } + } + } + }, + "basePath": "/storage/v1/", + "baseUrl": "https://storage.googleapis.com/storage/v1/", + "batchPath": "batch/storage/v1", + "description": "Stores and retrieves potentially large, immutable data objects.", + "discoveryVersion": "v1", + "documentationLink": "https://developers.google.com/storage/docs/json_api/", + "etag": "\"3133373531323239383338313531333236393038\"", + "icons": { + "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", + "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" + }, + "id": "storage:v1", + "kind": "discovery#restDescription", + "labels": [ + "labs" + ], + "name": "storage", + "ownerDomain": "google.com", + "ownerName": "Google", + "parameters": { + "alt": { + "default": "json", + "description": "Data format for the response.", + "enum": [ + "json" + ], + "enumDescriptions": [ + "Responses with Content-Type of application/json" + ], + "location": "query", + "type": "string" + }, + "fields": { + "description": "Selector specifying which fields to include in a partial response.", + "location": "query", + "type": "string" + }, + "key": { + "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.", + "location": "query", + "type": "string" + }, + "oauth_token": { + "description": "OAuth 2.0 token for the current user.", + "location": "query", + "type": "string" + }, + "prettyPrint": { + "default": "true", + "description": "Returns response with indentations and line breaks.", + "location": "query", + "type": "boolean" + }, + "quotaUser": { + "description": "An opaque string that represents a user for quota purposes. Must not exceed 40 characters.", + "location": "query", + "type": "string" + }, + "userIp": { + "description": "Deprecated. Please use quotaUser instead.", + "location": "query", + "type": "string" + } + }, + "protocol": "rest", + "resources": { + "bucketAccessControls": { + "methods": { + "delete": { + "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + "httpMethod": "DELETE", + "id": "storage.bucketAccessControls.delete", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "description": "Returns the ACL entry for the specified entity on the specified bucket.", + "httpMethod": "GET", + "id": "storage.bucketAccessControls.get", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "description": "Creates a new ACL entry on the specified bucket.", + "httpMethod": "POST", + "id": "storage.bucketAccessControls.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl", + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "description": "Retrieves ACL entries on the specified bucket.", + "httpMethod": "GET", + "id": "storage.bucketAccessControls.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl", + "response": { + "$ref": "BucketAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "description": "Patches an ACL entry on the specified bucket.", + "httpMethod": "PATCH", + "id": "storage.bucketAccessControls.patch", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "description": "Updates an ACL entry on the specified bucket.", + "httpMethod": "PUT", + "id": "storage.bucketAccessControls.update", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/acl/{entity}", + "request": { + "$ref": "BucketAccessControl" + }, + "response": { + "$ref": "BucketAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "buckets": { + "methods": { + "delete": { + "description": "Permanently deletes an empty bucket.", + "httpMethod": "DELETE", + "id": "storage.buckets.delete", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If set, only deletes the bucket if its metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If set, only deletes the bucket if its metageneration does not match this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns metadata for the specified bucket.", + "httpMethod": "GET", + "id": "storage.buckets.get", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "getIamPolicy": { + "description": "Returns an IAM policy for the specified bucket.", + "httpMethod": "GET", + "id": "storage.buckets.getIamPolicy", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "optionsRequestedPolicyVersion": { + "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/iam", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "description": "Creates a new bucket.", + "httpMethod": "POST", + "id": "storage.buckets.insert", + "parameterOrder": [ + "project" + ], + "parameters": { + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query", + "type": "string" + }, + "predefinedDefaultObjectAcl": { + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "project": { + "description": "A valid API project identifier.", + "location": "query", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "b", + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Retrieves a list of buckets for a given project.", + "httpMethod": "GET", + "id": "storage.buckets.list", + "parameterOrder": [ + "project" + ], + "parameters": { + "maxResults": { + "default": "1000", + "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to buckets whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "project": { + "description": "A valid API project identifier.", + "location": "query", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "b", + "response": { + "$ref": "Buckets" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "lockRetentionPolicy": { + "description": "Locks retention policy on a bucket.", + "httpMethod": "POST", + "id": "storage.buckets.lockRetentionPolicy", + "parameterOrder": [ + "bucket", + "ifMetagenerationMatch" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/lockRetentionPolicy", + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "patch": { + "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + "httpMethod": "PATCH", + "id": "storage.buckets.patch", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query", + "type": "string" + }, + "predefinedDefaultObjectAcl": { + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "setIamPolicy": { + "description": "Updates an IAM policy for the specified bucket.", + "httpMethod": "PUT", + "id": "storage.buckets.setIamPolicy", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/iam", + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "testIamPermissions": { + "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + "httpMethod": "GET", + "id": "storage.buckets.testIamPermissions", + "parameterOrder": [ + "bucket", + "permissions" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "permissions": { + "description": "Permissions to test.", + "location": "query", + "repeated": true, + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/iam/testPermissions", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + "httpMethod": "PUT", + "id": "storage.buckets.update", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this bucket.", + "enum": [ + "authenticatedRead", + "private", + "projectPrivate", + "publicRead", + "publicReadWrite" + ], + "enumDescriptions": [ + "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + "Project team owners get OWNER access.", + "Project team members get access according to their roles.", + "Project team owners get OWNER access, and allUsers get READER access.", + "Project team owners get OWNER access, and allUsers get WRITER access." + ], + "location": "query", + "type": "string" + }, + "predefinedDefaultObjectAcl": { + "description": "Apply a predefined set of default object access controls to this bucket.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit owner, acl and defaultObjectAcl properties." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}", + "request": { + "$ref": "Bucket" + }, + "response": { + "$ref": "Bucket" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "channels": { + "methods": { + "stop": { + "description": "Stop watching resources through this channel", + "httpMethod": "POST", + "id": "storage.channels.stop", + "path": "channels/stop", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "defaultObjectAccessControls": { + "methods": { + "delete": { + "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + "httpMethod": "DELETE", + "id": "storage.defaultObjectAccessControls.delete", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + "httpMethod": "GET", + "id": "storage.defaultObjectAccessControls.get", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "description": "Creates a new default object ACL entry on the specified bucket.", + "httpMethod": "POST", + "id": "storage.defaultObjectAccessControls.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "description": "Retrieves default object ACL entries on the specified bucket.", + "httpMethod": "GET", + "id": "storage.defaultObjectAccessControls.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl", + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "description": "Patches a default object ACL entry on the specified bucket.", + "httpMethod": "PATCH", + "id": "storage.defaultObjectAccessControls.patch", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "description": "Updates a default object ACL entry on the specified bucket.", + "httpMethod": "PUT", + "id": "storage.defaultObjectAccessControls.update", + "parameterOrder": [ + "bucket", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/defaultObjectAcl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "notifications": { + "methods": { + "delete": { + "description": "Permanently deletes a notification subscription.", + "httpMethod": "DELETE", + "id": "storage.notifications.delete", + "parameterOrder": [ + "bucket", + "notification" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the notification.", + "location": "path", + "required": true, + "type": "string" + }, + "notification": { + "description": "ID of the notification to delete.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs/{notification}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "View a notification configuration.", + "httpMethod": "GET", + "id": "storage.notifications.get", + "parameterOrder": [ + "bucket", + "notification" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the notification.", + "location": "path", + "required": true, + "type": "string" + }, + "notification": { + "description": "Notification ID", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs/{notification}", + "response": { + "$ref": "Notification" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates a notification subscription for a given bucket.", + "httpMethod": "POST", + "id": "storage.notifications.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the notification.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs", + "request": { + "$ref": "Notification" + }, + "response": { + "$ref": "Notification" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Retrieves a list of notification subscriptions for a given bucket.", + "httpMethod": "GET", + "id": "storage.notifications.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of a Google Cloud Storage bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/notificationConfigs", + "response": { + "$ref": "Notifications" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, + "objectAccessControls": { + "methods": { + "delete": { + "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + "httpMethod": "DELETE", + "id": "storage.objectAccessControls.delete", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "get": { + "description": "Returns the ACL entry for the specified entity on the specified object.", + "httpMethod": "GET", + "id": "storage.objectAccessControls.get", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "insert": { + "description": "Creates a new ACL entry on the specified object.", + "httpMethod": "POST", + "id": "storage.objectAccessControls.insert", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "list": { + "description": "Retrieves ACL entries on the specified object.", + "httpMethod": "GET", + "id": "storage.objectAccessControls.list", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl", + "response": { + "$ref": "ObjectAccessControls" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "patch": { + "description": "Patches an ACL entry on the specified object.", + "httpMethod": "PATCH", + "id": "storage.objectAccessControls.patch", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "update": { + "description": "Updates an ACL entry on the specified object.", + "httpMethod": "PUT", + "id": "storage.objectAccessControls.update", + "parameterOrder": [ + "bucket", + "object", + "entity" + ], + "parameters": { + "bucket": { + "description": "Name of a bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "entity": { + "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/acl/{entity}", + "request": { + "$ref": "ObjectAccessControl" + }, + "response": { + "$ref": "ObjectAccessControl" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "objects": { + "methods": { + "compose": { + "description": "Concatenates a list of existing objects into a new object in the same bucket.", + "httpMethod": "POST", + "id": "storage.objects.compose", + "parameterOrder": [ + "destinationBucket", + "destinationObject" + ], + "parameters": { + "destinationBucket": { + "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationObject": { + "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationPredefinedAcl": { + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "kmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{destinationBucket}/o/{destinationObject}/compose", + "request": { + "$ref": "ComposeRequest" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "copy": { + "description": "Copies a source object to a destination object. Optionally overrides metadata.", + "httpMethod": "POST", + "id": "storage.objects.copy", + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "parameters": { + "destinationBucket": { + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationKmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, + "destinationObject": { + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationPredefinedAcl": { + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "sourceBucket": { + "description": "Name of the bucket in which to find the source object.", + "location": "path", + "required": true, + "type": "string" + }, + "sourceGeneration": { + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "sourceObject": { + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "delete": { + "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + "httpMethod": "DELETE", + "id": "storage.objects.delete", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Retrieves an object or its metadata.", + "httpMethod": "GET", + "id": "storage.objects.get", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaDownload": true, + "useMediaDownloadService": true + }, + "getIamPolicy": { + "description": "Returns an IAM policy for the specified object.", + "httpMethod": "GET", + "id": "storage.objects.getIamPolicy", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/iam", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Stores a new object and metadata.", + "httpMethod": "POST", + "id": "storage.objects.insert", + "mediaUpload": { + "accept": [ + "*/*" + ], + "protocols": { + "resumable": { + "multipart": true, + "path": "/resumable/upload/storage/v1/b/{bucket}/o" + }, + "simple": { + "multipart": true, + "path": "/upload/storage/v1/b/{bucket}/o" + } + } + }, + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "location": "path", + "required": true, + "type": "string" + }, + "contentEncoding": { + "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "kmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, + "name": { + "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "query", + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsMediaUpload": true + }, + "list": { + "description": "Retrieves a list of objects matching the criteria.", + "httpMethod": "GET", + "id": "storage.objects.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for objects.", + "location": "path", + "required": true, + "type": "string" + }, + "delimiter": { + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query", + "type": "string" + }, + "endOffset": { + "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "includeTrailingDelimiter": { + "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "1000", + "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "startOffset": { + "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + }, + "versions": { + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query", + "type": "boolean" + } + }, + "path": "b/{bucket}/o", + "response": { + "$ref": "Objects" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + }, + "patch": { + "description": "Patches an object's metadata.", + "httpMethod": "PATCH", + "id": "storage.objects.patch", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request, for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "rewrite": { + "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + "httpMethod": "POST", + "id": "storage.objects.rewrite", + "parameterOrder": [ + "sourceBucket", + "sourceObject", + "destinationBucket", + "destinationObject" + ], + "parameters": { + "destinationBucket": { + "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationKmsKeyName": { + "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + "location": "query", + "type": "string" + }, + "destinationObject": { + "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "destinationPredefinedAcl": { + "description": "Apply a predefined set of access controls to the destination object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceGenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifSourceMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "maxBytesRewrittenPerCall": { + "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + "format": "int64", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "rewriteToken": { + "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + "location": "query", + "type": "string" + }, + "sourceBucket": { + "description": "Name of the bucket in which to find the source object.", + "location": "path", + "required": true, + "type": "string" + }, + "sourceGeneration": { + "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "sourceObject": { + "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "RewriteResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "setIamPolicy": { + "description": "Updates an IAM policy for the specified object.", + "httpMethod": "PUT", + "id": "storage.objects.setIamPolicy", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/iam", + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "testIamPermissions": { + "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", + "httpMethod": "GET", + "id": "storage.objects.testIamPermissions", + "parameterOrder": [ + "bucket", + "object", + "permissions" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "permissions": { + "description": "Permissions to test.", + "location": "query", + "repeated": true, + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/iam/testPermissions", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "update": { + "description": "Updates an object's metadata.", + "httpMethod": "PUT", + "id": "storage.objects.update", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "generation": { + "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "predefinedAcl": { + "description": "Apply a predefined set of access controls to this object.", + "enum": [ + "authenticatedRead", + "bucketOwnerFullControl", + "bucketOwnerRead", + "private", + "projectPrivate", + "publicRead" + ], + "enumDescriptions": [ + "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + "Object owner gets OWNER access, and project team owners get OWNER access.", + "Object owner gets OWNER access, and project team owners get READER access.", + "Object owner gets OWNER access.", + "Object owner gets OWNER access, and project team members get access according to their roles.", + "Object owner gets OWNER access, and allUsers get READER access." + ], + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "watchAll": { + "description": "Watch for changes on all objects in a bucket.", + "httpMethod": "POST", + "id": "storage.objects.watchAll", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for objects.", + "location": "path", + "required": true, + "type": "string" + }, + "delimiter": { + "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + "location": "query", + "type": "string" + }, + "endOffset": { + "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "includeTrailingDelimiter": { + "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + "location": "query", + "type": "boolean" + }, + "maxResults": { + "default": "1000", + "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "Filter results to objects whose names begin with this prefix.", + "location": "query", + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to noAcl.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "startOffset": { + "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + }, + "versions": { + "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + "location": "query", + "type": "boolean" + } + }, + "path": "b/{bucket}/o/watch", + "request": { + "$ref": "Channel", + "parameterName": "resource" + }, + "response": { + "$ref": "Channel" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ], + "supportsSubscription": true + } + } + }, + "projects": { + "resources": { + "hmacKeys": { + "methods": { + "create": { + "description": "Creates a new HMAC key for the specified service account.", + "httpMethod": "POST", + "id": "storage.projects.hmacKeys.create", + "parameterOrder": [ + "projectId", + "serviceAccountEmail" + ], + "parameters": { + "projectId": { + "description": "Project ID owning the service account.", + "location": "path", + "required": true, + "type": "string" + }, + "serviceAccountEmail": { + "description": "Email address of the service account.", + "location": "query", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys", + "response": { + "$ref": "HmacKey" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "delete": { + "description": "Deletes an HMAC key.", + "httpMethod": "DELETE", + "id": "storage.projects.hmacKeys.delete", + "parameterOrder": [ + "projectId", + "accessId" + ], + "parameters": { + "accessId": { + "description": "Name of the HMAC key to be deleted.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Project ID owning the requested key", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys/{accessId}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Retrieves an HMAC key's metadata", + "httpMethod": "GET", + "id": "storage.projects.hmacKeys.get", + "parameterOrder": [ + "projectId", + "accessId" + ], + "parameters": { + "accessId": { + "description": "Name of the HMAC key.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Project ID owning the service account of the requested key.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys/{accessId}", + "response": { + "$ref": "HmacKeyMetadata" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only" + ] + }, + "list": { + "description": "Retrieves a list of HMAC keys matching the criteria.", + "httpMethod": "GET", + "id": "storage.projects.hmacKeys.list", + "parameterOrder": [ + "projectId" + ], + "parameters": { + "maxResults": { + "default": "250", + "description": "Maximum number of items to return in a single page of responses. The service uses this parameter or 250 items, whichever is smaller. The max number of items per page will also be limited by the number of distinct service accounts in the response. If the number of service accounts in a single response is too high, the page will truncated and a next page token will be returned.", + "format": "uint32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "projectId": { + "description": "Name of the project in which to look for HMAC keys.", + "location": "path", + "required": true, + "type": "string" + }, + "serviceAccountEmail": { + "description": "If present, only keys for the given service account are returned.", + "location": "query", + "type": "string" + }, + "showDeletedKeys": { + "description": "Whether or not to show keys in the DELETED state.", + "location": "query", + "type": "boolean" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys", + "response": { + "$ref": "HmacKeysMetadata" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only" + ] + }, + "update": { + "description": "Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states.", + "httpMethod": "PUT", + "id": "storage.projects.hmacKeys.update", + "parameterOrder": [ + "projectId", + "accessId" + ], + "parameters": { + "accessId": { + "description": "Name of the HMAC key being updated.", + "location": "path", + "required": true, + "type": "string" + }, + "projectId": { + "description": "Project ID owning the service account of the updated key.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/hmacKeys/{accessId}", + "request": { + "$ref": "HmacKeyMetadata" + }, + "response": { + "$ref": "HmacKeyMetadata" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + } + } + }, + "serviceAccount": { + "methods": { + "get": { + "description": "Get the email address of this project's Google Cloud Storage service account.", + "httpMethod": "GET", + "id": "storage.projects.serviceAccount.get", + "parameterOrder": [ + "projectId" + ], + "parameters": { + "projectId": { + "description": "Project ID", + "location": "path", + "required": true, + "type": "string" + }, + "provisionalUserProject": { + "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request.", + "location": "query", + "type": "string" + } + }, + "path": "projects/{projectId}/serviceAccount", + "response": { + "$ref": "ServiceAccount" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + } + } + } + }, + "revision": "20200927", + "rootUrl": "https://storage.googleapis.com/", + "schemas": { + "Bucket": { + "description": "A bucket.", + "id": "Bucket", + "properties": { + "acl": { + "annotations": { + "required": [ + "storage.buckets.update" + ] + }, + "description": "Access controls on the bucket.", + "items": { + "$ref": "BucketAccessControl" + }, + "type": "array" + }, + "billing": { + "description": "The bucket's billing configuration.", + "properties": { + "requesterPays": { + "description": "When set to true, Requester Pays is enabled for this bucket.", + "type": "boolean" + } + }, + "type": "object" + }, + "cors": { + "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.", + "items": { + "properties": { + "maxAgeSeconds": { + "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.", + "format": "int32", + "type": "integer" + }, + "method": { + "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "origin": { + "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "responseHeader": { + "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "type": "array" + }, + "defaultEventBasedHold": { + "description": "The default value for event-based hold on newly created objects in this bucket. Event-based hold is a way to retain objects indefinitely until an event occurs, signified by the hold's release. After being released, such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false. Objects under event-based hold cannot be deleted, overwritten or archived until the hold is removed.", + "type": "boolean" + }, + "defaultObjectAcl": { + "description": "Default access controls to apply to new objects when no ACL is provided.", + "items": { + "$ref": "ObjectAccessControl" + }, + "type": "array" + }, + "encryption": { + "description": "Encryption configuration for a bucket.", + "properties": { + "defaultKmsKeyName": { + "description": "A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified.", + "type": "string" + } + }, + "type": "object" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the bucket.", + "type": "string" + }, + "iamConfiguration": { + "description": "The bucket's IAM configuration.", + "properties": { + "bucketPolicyOnly": { + "description": "The bucket's uniform bucket-level access configuration. The feature was formerly known as Bucket Policy Only. For backward compatibility, this field will be populated with identical information as the uniformBucketLevelAccess field. We recommend using the uniformBucketLevelAccess field to enable and disable the feature.", + "properties": { + "enabled": { + "description": "If set, access is controlled only by bucket-level or above IAM policies.", + "type": "boolean" + }, + "lockedTime": { + "description": "The deadline for changing iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed from true to false until the locked time, after which the field is immutable.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "uniformBucketLevelAccess": { + "description": "The bucket's uniform bucket-level access configuration.", + "properties": { + "enabled": { + "description": "If set, access is controlled only by bucket-level or above IAM policies.", + "type": "boolean" + }, + "lockedTime": { + "description": "The deadline for changing iamConfiguration.uniformBucketLevelAccess.enabled from true to false in RFC 3339 format. iamConfiguration.uniformBucketLevelAccess.enabled may be changed from true to false until the locked time, after which the field is immutable.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "id": { + "description": "The ID of the bucket. For buckets, the id and name properties are the same.", + "type": "string" + }, + "kind": { + "default": "storage#bucket", + "description": "The kind of item this is. For buckets, this is always storage#bucket.", + "type": "string" + }, + "labels": { + "additionalProperties": { + "description": "An individual label entry.", + "type": "string" + }, + "description": "User-provided labels, in key/value pairs.", + "type": "object" + }, + "lifecycle": { + "description": "The bucket's lifecycle configuration. See lifecycle management for more information.", + "properties": { + "rule": { + "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.", + "items": { + "properties": { + "action": { + "description": "The action to take.", + "properties": { + "storageClass": { + "description": "Target storage class. Required iff the type of the action is SetStorageClass.", + "type": "string" + }, + "type": { + "description": "Type of the action. Currently, only Delete and SetStorageClass are supported.", + "type": "string" + } + }, + "type": "object" + }, + "condition": { + "description": "The condition(s) under which the action will be taken.", + "properties": { + "age": { + "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.", + "format": "int32", + "type": "integer" + }, + "createdBefore": { + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.", + "format": "date", + "type": "string" + }, + "customTimeBefore": { + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when the custom time on an object is before this date in UTC.", + "format": "date", + "type": "string" + }, + "daysSinceCustomTime": { + "description": "Number of days elapsed since the user-specified timestamp set on an object. The condition is satisfied if the days elapsed is at least this number. If no custom timestamp is specified on an object, the condition does not apply.", + "format": "int32", + "type": "integer" + }, + "daysSinceNoncurrentTime": { + "description": "Number of days elapsed since the noncurrent timestamp of an object. The condition is satisfied if the days elapsed is at least this number. This condition is relevant only for versioned objects. The value of the field must be a nonnegative integer. If it's zero, the object version will become eligible for Lifecycle action as soon as it becomes noncurrent.", + "format": "int32", + "type": "integer" + }, + "isLive": { + "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects.", + "type": "boolean" + }, + "matchesPattern": { + "description": "A regular expression that satisfies the RE2 syntax. This condition is satisfied when the name of the object matches the RE2 pattern. Note: This feature is currently in the \"Early Access\" launch stage and is only available to a whitelisted set of users; that means that this feature may be changed in backward-incompatible ways and that it is not guaranteed to be released.", + "type": "string" + }, + "matchesStorageClass": { + "description": "Objects having any of the storage classes specified by this condition will be matched. Values include MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and DURABLE_REDUCED_AVAILABILITY.", + "items": { + "type": "string" + }, + "type": "array" + }, + "noncurrentTimeBefore": { + "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when the noncurrent time on an object is before this date in UTC. This condition is relevant only for versioned objects.", + "format": "date", + "type": "string" + }, + "numNewerVersions": { + "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "location": { + "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list.", + "type": "string" + }, + "locationType": { + "description": "The type of the bucket location.", + "type": "string" + }, + "logging": { + "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.", + "properties": { + "logBucket": { + "description": "The destination bucket where the current bucket's logs should be placed.", + "type": "string" + }, + "logObjectPrefix": { + "description": "A prefix for log object names.", + "type": "string" + } + }, + "type": "object" + }, + "metageneration": { + "description": "The metadata generation of this bucket.", + "format": "int64", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "storage.buckets.insert" + ] + }, + "description": "The name of the bucket.", + "type": "string" + }, + "owner": { + "description": "The owner of the bucket. This is always the project team's owner group.", + "properties": { + "entity": { + "description": "The entity, in the form project-owner-projectId.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity.", + "type": "string" + } + }, + "type": "object" + }, + "projectNumber": { + "description": "The project number of the project the bucket belongs to.", + "format": "uint64", + "type": "string" + }, + "retentionPolicy": { + "description": "The bucket's retention policy. The retention policy enforces a minimum retention time for all objects contained in the bucket, based on their creation time. Any attempt to overwrite or delete objects younger than the retention period will result in a PERMISSION_DENIED error. An unlocked retention policy can be modified or removed from the bucket via a storage.buckets.update operation. A locked retention policy cannot be removed or shortened in duration for the lifetime of the bucket. Attempting to remove or decrease period of a locked retention policy will result in a PERMISSION_DENIED error.", + "properties": { + "effectiveTime": { + "description": "Server-determined value that indicates the time from which policy was enforced and effective. This value is in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "isLocked": { + "description": "Once locked, an object retention policy cannot be modified.", + "type": "boolean" + }, + "retentionPeriod": { + "description": "The duration in seconds that objects need to be retained. Retention duration must be greater than zero and less than 100 years. Note that enforcement of retention periods less than a day is not guaranteed. Such periods should only be used for testing purposes.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "selfLink": { + "description": "The URI of this bucket.", + "type": "string" + }, + "storageClass": { + "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", + "type": "string" + }, + "timeCreated": { + "description": "The creation time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "updated": { + "description": "The modification time of the bucket in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "versioning": { + "description": "The bucket's versioning configuration.", + "properties": { + "enabled": { + "description": "While set to true, versioning is fully enabled for this bucket.", + "type": "boolean" + } + }, + "type": "object" + }, + "website": { + "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.", + "properties": { + "mainPageSuffix": { + "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages.", + "type": "string" + }, + "notFoundPage": { + "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result.", + "type": "string" + } + }, + "type": "object" + }, + "zoneAffinity": { + "description": "The zone or zones from which the bucket is intended to use zonal quota. Requests for data from outside the specified affinities are still allowed but won't be able to use zonal quota. The zone or zones need to be within the bucket location otherwise the requests will fail with a 400 Bad Request response.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "BucketAccessControl": { + "description": "An access-control entry.", + "id": "BucketAccessControl", + "properties": { + "bucket": { + "description": "The name of the bucket.", + "type": "string" + }, + "domain": { + "description": "The domain associated with the entity, if any.", + "type": "string" + }, + "email": { + "description": "The email address associated with the entity, if any.", + "type": "string" + }, + "entity": { + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + }, + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity, if any.", + "type": "string" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the access-control entry.", + "type": "string" + }, + "id": { + "description": "The ID of the access-control entry.", + "type": "string" + }, + "kind": { + "default": "storage#bucketAccessControl", + "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.", + "type": "string" + }, + "projectTeam": { + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "description": "The project number.", + "type": "string" + }, + "team": { + "description": "The team.", + "type": "string" + } + }, + "type": "object" + }, + "role": { + "annotations": { + "required": [ + "storage.bucketAccessControls.insert" + ] + }, + "description": "The access permission for the entity.", + "type": "string" + }, + "selfLink": { + "description": "The link to this access-control entry.", + "type": "string" + } + }, + "type": "object" + }, + "BucketAccessControls": { + "description": "An access-control list.", + "id": "BucketAccessControls", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "BucketAccessControl" + }, + "type": "array" + }, + "kind": { + "default": "storage#bucketAccessControls", + "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.", + "type": "string" + } + }, + "type": "object" + }, + "Buckets": { + "description": "A list of buckets.", + "id": "Buckets", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "Bucket" + }, + "type": "array" + }, + "kind": { + "default": "storage#buckets", + "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "Channel": { + "description": "An notification channel used to watch for resource changes.", + "id": "Channel", + "properties": { + "address": { + "description": "The address where notifications are delivered for this channel.", + "type": "string" + }, + "expiration": { + "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.", + "format": "int64", + "type": "string" + }, + "id": { + "description": "A UUID or similar unique string that identifies this channel.", + "type": "string" + }, + "kind": { + "default": "api#channel", + "description": "Identifies this as a notification channel used to watch for changes to a resource, which is \"api#channel\".", + "type": "string" + }, + "params": { + "additionalProperties": { + "description": "Declares a new parameter by name.", + "type": "string" + }, + "description": "Additional parameters controlling delivery channel behavior. Optional.", + "type": "object" + }, + "payload": { + "description": "A Boolean value to indicate whether payload is wanted. Optional.", + "type": "boolean" + }, + "resourceId": { + "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions.", + "type": "string" + }, + "resourceUri": { + "description": "A version-specific identifier for the watched resource.", + "type": "string" + }, + "token": { + "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional.", + "type": "string" + }, + "type": { + "description": "The type of delivery mechanism used for this channel.", + "type": "string" + } + }, + "type": "object" + }, + "ComposeRequest": { + "description": "A Compose request.", + "id": "ComposeRequest", + "properties": { + "destination": { + "$ref": "Object", + "description": "Properties of the resulting object." + }, + "kind": { + "default": "storage#composeRequest", + "description": "The kind of item this is.", + "type": "string" + }, + "sourceObjects": { + "annotations": { + "required": [ + "storage.objects.compose" + ] + }, + "description": "The list of source objects that will be concatenated into a single object.", + "items": { + "properties": { + "generation": { + "description": "The generation of this object to use as the source.", + "format": "int64", + "type": "string" + }, + "name": { + "annotations": { + "required": [ + "storage.objects.compose" + ] + }, + "description": "The source object's name. All source objects must reside in the same bucket.", + "type": "string" + }, + "objectPreconditions": { + "description": "Conditions that must be met for this operation to execute.", + "properties": { + "ifGenerationMatch": { + "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "type": "array" + } + }, + "type": "object" + }, + "Expr": { + "description": "Represents an expression text. Example: title: \"User account presence\" description: \"Determines whether the request has a user account\" expression: \"size(request.user) \u003e 0\"", + "id": "Expr", + "properties": { + "description": { + "description": "An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.", + "type": "string" + }, + "expression": { + "description": "Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported.", + "type": "string" + }, + "location": { + "description": "An optional string indicating the location of the expression for error reporting, e.g. a file name and a position in the file.", + "type": "string" + }, + "title": { + "description": "An optional title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.", + "type": "string" + } + }, + "type": "object" + }, + "HmacKey": { + "description": "JSON template to produce a JSON-style HMAC Key resource for Create responses.", + "id": "HmacKey", + "properties": { + "kind": { + "default": "storage#hmacKey", + "description": "The kind of item this is. For HMAC keys, this is always storage#hmacKey.", + "type": "string" + }, + "metadata": { + "$ref": "HmacKeyMetadata", + "description": "Key metadata." + }, + "secret": { + "description": "HMAC secret key material.", + "type": "string" + } + }, + "type": "object" + }, + "HmacKeyMetadata": { + "description": "JSON template to produce a JSON-style HMAC Key metadata resource.", + "id": "HmacKeyMetadata", + "properties": { + "accessId": { + "description": "The ID of the HMAC Key.", + "type": "string" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the HMAC key.", + "type": "string" + }, + "id": { + "description": "The ID of the HMAC key, including the Project ID and the Access ID.", + "type": "string" + }, + "kind": { + "default": "storage#hmacKeyMetadata", + "description": "The kind of item this is. For HMAC Key metadata, this is always storage#hmacKeyMetadata.", + "type": "string" + }, + "projectId": { + "description": "Project ID owning the service account to which the key authenticates.", + "type": "string" + }, + "selfLink": { + "description": "The link to this resource.", + "type": "string" + }, + "serviceAccountEmail": { + "description": "The email address of the key's associated service account.", + "type": "string" + }, + "state": { + "description": "The state of the key. Can be one of ACTIVE, INACTIVE, or DELETED.", + "type": "string" + }, + "timeCreated": { + "description": "The creation time of the HMAC key in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "updated": { + "description": "The last modification time of the HMAC key metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "HmacKeysMetadata": { + "description": "A list of hmacKeys.", + "id": "HmacKeysMetadata", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "HmacKeyMetadata" + }, + "type": "array" + }, + "kind": { + "default": "storage#hmacKeysMetadata", + "description": "The kind of item this is. For lists of hmacKeys, this is always storage#hmacKeysMetadata.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, + "Notification": { + "description": "A subscription to receive Google PubSub notifications.", + "id": "Notification", + "properties": { + "custom_attributes": { + "additionalProperties": { + "type": "string" + }, + "description": "An optional list of additional attributes to attach to each Cloud PubSub message published for this notification subscription.", + "type": "object" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for this subscription notification.", + "type": "string" + }, + "event_types": { + "description": "If present, only send notifications about listed event types. If empty, sent notifications for all event types.", + "items": { + "type": "string" + }, + "type": "array" + }, + "id": { + "description": "The ID of the notification.", + "type": "string" + }, + "kind": { + "default": "storage#notification", + "description": "The kind of item this is. For notifications, this is always storage#notification.", + "type": "string" + }, + "object_name_prefix": { + "description": "If present, only apply this notification configuration to object names that begin with this prefix.", + "type": "string" + }, + "payload_format": { + "annotations": { + "required": [ + "storage.notifications.insert" + ] + }, + "default": "JSON_API_V1", + "description": "The desired content of the Payload.", + "type": "string" + }, + "selfLink": { + "description": "The canonical URL of this notification.", + "type": "string" + }, + "topic": { + "annotations": { + "required": [ + "storage.notifications.insert" + ] + }, + "description": "The Cloud PubSub topic to which this subscription publishes. Formatted as: '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'", + "type": "string" + } + }, + "type": "object" + }, + "Notifications": { + "description": "A list of notification subscriptions.", + "id": "Notifications", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "Notification" + }, + "type": "array" + }, + "kind": { + "default": "storage#notifications", + "description": "The kind of item this is. For lists of notifications, this is always storage#notifications.", + "type": "string" + } + }, + "type": "object" + }, + "Object": { + "description": "An object.", + "id": "Object", + "properties": { + "acl": { + "annotations": { + "required": [ + "storage.objects.update" + ] + }, + "description": "Access controls on the object.", + "items": { + "$ref": "ObjectAccessControl" + }, + "type": "array" + }, + "bucket": { + "description": "The name of the bucket containing this object.", + "type": "string" + }, + "cacheControl": { + "description": "Cache-Control directive for the object data. If omitted, and the object is accessible to all anonymous users, the default will be public, max-age=3600.", + "type": "string" + }, + "componentCount": { + "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.", + "format": "int32", + "type": "integer" + }, + "contentDisposition": { + "description": "Content-Disposition of the object data.", + "type": "string" + }, + "contentEncoding": { + "description": "Content-Encoding of the object data.", + "type": "string" + }, + "contentLanguage": { + "description": "Content-Language of the object data.", + "type": "string" + }, + "contentType": { + "description": "Content-Type of the object data. If an object is stored without a Content-Type, it is served as application/octet-stream.", + "type": "string" + }, + "crc32c": { + "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices.", + "type": "string" + }, + "customTime": { + "description": "A timestamp in RFC 3339 format specified by the user for an object.", + "format": "date-time", + "type": "string" + }, + "customerEncryption": { + "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.", + "properties": { + "encryptionAlgorithm": { + "description": "The encryption algorithm.", + "type": "string" + }, + "keySha256": { + "description": "SHA256 hash value of the encryption key.", + "type": "string" + } + }, + "type": "object" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the object.", + "type": "string" + }, + "eventBasedHold": { + "description": "Whether an object is under event-based hold. Event-based hold is a way to retain objects until an event occurs, which is signified by the hold's release (i.e. this value is set to false). After being released (set to false), such objects will be subject to bucket-level retention (if any). One sample use case of this flag is for banks to hold loan documents for at least 3 years after loan is paid in full. Here, bucket-level retention is 3 years and the event is the loan being paid in full. In this example, these objects will be held intact for any number of years until the event has occurred (event-based hold on the object is released) and then 3 more years after that. That means retention duration of the objects begins from the moment event-based hold transitioned from true to false.", + "type": "boolean" + }, + "generation": { + "description": "The content generation of this object. Used for object versioning.", + "format": "int64", + "type": "string" + }, + "id": { + "description": "The ID of the object, including the bucket name, object name, and generation number.", + "type": "string" + }, + "kind": { + "default": "storage#object", + "description": "The kind of item this is. For objects, this is always storage#object.", + "type": "string" + }, + "kmsKeyName": { + "description": "Not currently supported. Specifying the parameter causes the request to fail with status code 400 - Bad Request.", + "type": "string" + }, + "md5Hash": { + "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices.", + "type": "string" + }, + "mediaLink": { + "description": "Media download link.", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "description": "An individual metadata entry.", + "type": "string" + }, + "description": "User-provided metadata, in key/value pairs.", + "type": "object" + }, + "metageneration": { + "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "The name of the object. Required if not specified by URL parameter.", + "type": "string" + }, + "owner": { + "description": "The owner of the object. This will always be the uploader of the object.", + "properties": { + "entity": { + "description": "The entity, in the form user-userId.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity.", + "type": "string" + } + }, + "type": "object" + }, + "retentionExpirationTime": { + "description": "A server-determined value that specifies the earliest time that the object's retention period expires. This value is in RFC 3339 format. Note 1: This field is not provided for objects with an active event-based hold, since retention expiration is unknown until the hold is removed. Note 2: This value can be provided even when temporary hold is set (so that the user can reason about policy without having to first unset the temporary hold).", + "format": "date-time", + "type": "string" + }, + "selfLink": { + "description": "The link to this object.", + "type": "string" + }, + "size": { + "description": "Content-Length of the data in bytes.", + "format": "uint64", + "type": "string" + }, + "storageClass": { + "description": "Storage class of the object.", + "type": "string" + }, + "temporaryHold": { + "description": "Whether an object is under temporary hold. While this flag is set to true, the object is protected against deletion and overwrites. A common use case of this flag is regulatory investigations where objects need to be retained while the investigation is ongoing. Note that unlike event-based hold, temporary hold does not impact retention expiration time of an object.", + "type": "boolean" + }, + "timeCreated": { + "description": "The creation time of the object in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "timeDeleted": { + "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", + "format": "date-time", + "type": "string" + }, + "timeStorageClassUpdated": { + "description": "The time at which the object's storage class was last changed. When the object is initially created, it will be set to timeCreated.", + "format": "date-time", + "type": "string" + }, + "updated": { + "description": "The modification time of the object metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "ObjectAccessControl": { + "description": "An access-control entry.", + "id": "ObjectAccessControl", + "properties": { + "bucket": { + "description": "The name of the bucket.", + "type": "string" + }, + "domain": { + "description": "The domain associated with the entity, if any.", + "type": "string" + }, + "email": { + "description": "The email address associated with the entity, if any.", + "type": "string" + }, + "entity": { + "annotations": { + "required": [ + "storage.defaultObjectAccessControls.insert", + "storage.objectAccessControls.insert" + ] + }, + "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.", + "type": "string" + }, + "entityId": { + "description": "The ID for the entity, if any.", + "type": "string" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the access-control entry.", + "type": "string" + }, + "generation": { + "description": "The content generation of the object, if applied to an object.", + "format": "int64", + "type": "string" + }, + "id": { + "description": "The ID of the access-control entry.", + "type": "string" + }, + "kind": { + "default": "storage#objectAccessControl", + "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.", + "type": "string" + }, + "object": { + "description": "The name of the object, if applied to an object.", + "type": "string" + }, + "projectTeam": { + "description": "The project team associated with the entity, if any.", + "properties": { + "projectNumber": { + "description": "The project number.", + "type": "string" + }, + "team": { + "description": "The team.", + "type": "string" + } + }, + "type": "object" + }, + "role": { + "annotations": { + "required": [ + "storage.defaultObjectAccessControls.insert", + "storage.objectAccessControls.insert" + ] + }, + "description": "The access permission for the entity.", + "type": "string" + }, + "selfLink": { + "description": "The link to this access-control entry.", + "type": "string" + } + }, + "type": "object" + }, + "ObjectAccessControls": { + "description": "An access-control list.", + "id": "ObjectAccessControls", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "ObjectAccessControl" + }, + "type": "array" + }, + "kind": { + "default": "storage#objectAccessControls", + "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.", + "type": "string" + } + }, + "type": "object" + }, + "Objects": { + "description": "A list of objects.", + "id": "Objects", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "Object" + }, + "type": "array" + }, + "kind": { + "default": "storage#objects", + "description": "The kind of item this is. For lists of objects, this is always storage#objects.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + }, + "prefixes": { + "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "Policy": { + "description": "A bucket/object IAM policy.", + "id": "Policy", + "properties": { + "bindings": { + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy" + ] + }, + "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.", + "items": { + "properties": { + "condition": { + "$ref": "Expr", + "description": "The condition that is associated with this binding. NOTE: an unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently." + }, + "members": { + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy" + ] + }, + "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project", + "items": { + "type": "string" + }, + "type": "array" + }, + "role": { + "annotations": { + "required": [ + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy" + ] + }, + "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.", + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "etag": { + "description": "HTTP 1.1 Entity tag for the policy.", + "format": "byte", + "type": "string" + }, + "kind": { + "default": "storage#policy", + "description": "The kind of item this is. For policies, this is always storage#policy. This field is ignored on input.", + "type": "string" + }, + "resourceId": { + "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", + "type": "string" + }, + "version": { + "description": "The IAM policy format version.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "RewriteResponse": { + "description": "A rewrite response.", + "id": "RewriteResponse", + "properties": { + "done": { + "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response.", + "type": "boolean" + }, + "kind": { + "default": "storage#rewriteResponse", + "description": "The kind of item this is.", + "type": "string" + }, + "objectSize": { + "description": "The total size of the object being copied in bytes. This property is always present in the response.", + "format": "int64", + "type": "string" + }, + "resource": { + "$ref": "Object", + "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes." + }, + "rewriteToken": { + "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy.", + "type": "string" + }, + "totalBytesRewritten": { + "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, + "ServiceAccount": { + "description": "A subscription to receive Google PubSub notifications.", + "id": "ServiceAccount", + "properties": { + "email_address": { + "description": "The ID of the notification.", + "type": "string" + }, + "kind": { + "default": "storage#serviceAccount", + "description": "The kind of item this is. For notifications, this is always storage#notification.", + "type": "string" + } + }, + "type": "object" + }, + "TestIamPermissionsResponse": { + "description": "A storage.(buckets|objects).testIamPermissions response.", + "id": "TestIamPermissionsResponse", + "properties": { + "kind": { + "default": "storage#testIamPermissionsResponse", + "description": "The kind of item this is.", + "type": "string" + }, + "permissions": { + "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + } + }, + "servicePath": "storage/v1/", + "title": "Cloud Storage JSON API", + "version": "v1" +} \ No newline at end of file diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go new file mode 100644 index 00000000000..6c80946b217 --- /dev/null +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -0,0 +1,13407 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated file. DO NOT EDIT. + +// Package storage provides access to the Cloud Storage JSON API. +// +// This package is DEPRECATED. Use package cloud.google.com/go/storage instead. +// +// For product documentation, see: https://developers.google.com/storage/docs/json_api/ +// +// Creating a client +// +// Usage example: +// +// import "google.golang.org/api/storage/v1" +// ... +// ctx := context.Background() +// storageService, err := storage.NewService(ctx) +// +// In this example, Google Application Default Credentials are used for authentication. +// +// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// +// Other authentication options +// +// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// +// storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) +// +// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// +// storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) +// +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// +// config := &oauth2.Config{...} +// // ... +// token, err := config.Exchange(ctx, ...) +// storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) +// +// See https://godoc.org/google.golang.org/api/option/ for details on options. +package storage // import "google.golang.org/api/storage/v1" + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + googleapi "google.golang.org/api/googleapi" + gensupport "google.golang.org/api/internal/gensupport" + option "google.golang.org/api/option" + internaloption "google.golang.org/api/option/internaloption" + htransport "google.golang.org/api/transport/http" +) + +// Always reference these packages, just in case the auto-generated code +// below doesn't. +var _ = bytes.NewBuffer +var _ = strconv.Itoa +var _ = fmt.Sprintf +var _ = json.NewDecoder +var _ = io.Copy +var _ = url.Parse +var _ = gensupport.MarshalJSON +var _ = googleapi.Version +var _ = errors.New +var _ = strings.Replace +var _ = context.Canceled +var _ = internaloption.WithDefaultEndpoint + +const apiId = "storage:v1" +const apiName = "storage" +const apiVersion = "v1" +const basePath = "https://storage.googleapis.com/storage/v1/" + +// OAuth2 scopes used by this API. +const ( + // View and manage your data across Google Cloud Platform services + CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" + + // View your data across Google Cloud Platform services + CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" + + // Manage your data and permissions in Google Cloud Storage + DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control" + + // View your data in Google Cloud Storage + DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only" + + // Manage your data in Google Cloud Storage + DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write" +) + +// NewService creates a new Service. +func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { + scopesOption := option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write", + ) + // NOTE: prepend, so we don't override user-specified scopes. + opts = append([]option.ClientOption{scopesOption}, opts...) + opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + client, endpoint, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + s, err := New(client) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return s, nil +} + +// New creates a new Service. It uses the provided http.Client for requests. +// +// Deprecated: please use NewService instead. +// To provide a custom HTTP client, use option.WithHTTPClient. +// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. +func New(client *http.Client) (*Service, error) { + if client == nil { + return nil, errors.New("client is nil") + } + s := &Service{client: client, BasePath: basePath} + s.BucketAccessControls = NewBucketAccessControlsService(s) + s.Buckets = NewBucketsService(s) + s.Channels = NewChannelsService(s) + s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.Notifications = NewNotificationsService(s) + s.ObjectAccessControls = NewObjectAccessControlsService(s) + s.Objects = NewObjectsService(s) + s.Projects = NewProjectsService(s) + return s, nil +} + +type Service struct { + client *http.Client + BasePath string // API endpoint base URL + UserAgent string // optional additional User-Agent fragment + + BucketAccessControls *BucketAccessControlsService + + Buckets *BucketsService + + Channels *ChannelsService + + DefaultObjectAccessControls *DefaultObjectAccessControlsService + + Notifications *NotificationsService + + ObjectAccessControls *ObjectAccessControlsService + + Objects *ObjectsService + + Projects *ProjectsService +} + +func (s *Service) userAgent() string { + if s.UserAgent == "" { + return googleapi.UserAgent + } + return googleapi.UserAgent + " " + s.UserAgent +} + +func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService { + rs := &BucketAccessControlsService{s: s} + return rs +} + +type BucketAccessControlsService struct { + s *Service +} + +func NewBucketsService(s *Service) *BucketsService { + rs := &BucketsService{s: s} + return rs +} + +type BucketsService struct { + s *Service +} + +func NewChannelsService(s *Service) *ChannelsService { + rs := &ChannelsService{s: s} + return rs +} + +type ChannelsService struct { + s *Service +} + +func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService { + rs := &DefaultObjectAccessControlsService{s: s} + return rs +} + +type DefaultObjectAccessControlsService struct { + s *Service +} + +func NewNotificationsService(s *Service) *NotificationsService { + rs := &NotificationsService{s: s} + return rs +} + +type NotificationsService struct { + s *Service +} + +func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService { + rs := &ObjectAccessControlsService{s: s} + return rs +} + +type ObjectAccessControlsService struct { + s *Service +} + +func NewObjectsService(s *Service) *ObjectsService { + rs := &ObjectsService{s: s} + return rs +} + +type ObjectsService struct { + s *Service +} + +func NewProjectsService(s *Service) *ProjectsService { + rs := &ProjectsService{s: s} + rs.HmacKeys = NewProjectsHmacKeysService(s) + rs.ServiceAccount = NewProjectsServiceAccountService(s) + return rs +} + +type ProjectsService struct { + s *Service + + HmacKeys *ProjectsHmacKeysService + + ServiceAccount *ProjectsServiceAccountService +} + +func NewProjectsHmacKeysService(s *Service) *ProjectsHmacKeysService { + rs := &ProjectsHmacKeysService{s: s} + return rs +} + +type ProjectsHmacKeysService struct { + s *Service +} + +func NewProjectsServiceAccountService(s *Service) *ProjectsServiceAccountService { + rs := &ProjectsServiceAccountService{s: s} + return rs +} + +type ProjectsServiceAccountService struct { + s *Service +} + +// Bucket: A bucket. +type Bucket struct { + // Acl: Access controls on the bucket. + Acl []*BucketAccessControl `json:"acl,omitempty"` + + // Billing: The bucket's billing configuration. + Billing *BucketBilling `json:"billing,omitempty"` + + // Cors: The bucket's Cross-Origin Resource Sharing (CORS) + // configuration. + Cors []*BucketCors `json:"cors,omitempty"` + + // DefaultEventBasedHold: The default value for event-based hold on + // newly created objects in this bucket. Event-based hold is a way to + // retain objects indefinitely until an event occurs, signified by the + // hold's release. After being released, such objects will be subject to + // bucket-level retention (if any). One sample use case of this flag is + // for banks to hold loan documents for at least 3 years after loan is + // paid in full. Here, bucket-level retention is 3 years and the event + // is loan being paid in full. In this example, these objects will be + // held intact for any number of years until the event has occurred + // (event-based hold on the object is released) and then 3 more years + // after that. That means retention duration of the objects begins from + // the moment event-based hold transitioned from true to false. Objects + // under event-based hold cannot be deleted, overwritten or archived + // until the hold is removed. + DefaultEventBasedHold bool `json:"defaultEventBasedHold,omitempty"` + + // DefaultObjectAcl: Default access controls to apply to new objects + // when no ACL is provided. + DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` + + // Encryption: Encryption configuration for a bucket. + Encryption *BucketEncryption `json:"encryption,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the bucket. + Etag string `json:"etag,omitempty"` + + // IamConfiguration: The bucket's IAM configuration. + IamConfiguration *BucketIamConfiguration `json:"iamConfiguration,omitempty"` + + // Id: The ID of the bucket. For buckets, the id and name properties are + // the same. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For buckets, this is always + // storage#bucket. + Kind string `json:"kind,omitempty"` + + // Labels: User-provided labels, in key/value pairs. + Labels map[string]string `json:"labels,omitempty"` + + // Lifecycle: The bucket's lifecycle configuration. See lifecycle + // management for more information. + Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"` + + // Location: The location of the bucket. Object data for objects in the + // bucket resides in physical storage within this region. Defaults to + // US. See the developer's guide for the authoritative list. + Location string `json:"location,omitempty"` + + // LocationType: The type of the bucket location. + LocationType string `json:"locationType,omitempty"` + + // Logging: The bucket's logging configuration, which defines the + // destination bucket and optional name prefix for the current bucket's + // logs. + Logging *BucketLogging `json:"logging,omitempty"` + + // Metageneration: The metadata generation of this bucket. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the bucket. + Name string `json:"name,omitempty"` + + // Owner: The owner of the bucket. This is always the project team's + // owner group. + Owner *BucketOwner `json:"owner,omitempty"` + + // ProjectNumber: The project number of the project the bucket belongs + // to. + ProjectNumber uint64 `json:"projectNumber,omitempty,string"` + + // RetentionPolicy: The bucket's retention policy. The retention policy + // enforces a minimum retention time for all objects contained in the + // bucket, based on their creation time. Any attempt to overwrite or + // delete objects younger than the retention period will result in a + // PERMISSION_DENIED error. An unlocked retention policy can be modified + // or removed from the bucket via a storage.buckets.update operation. A + // locked retention policy cannot be removed or shortened in duration + // for the lifetime of the bucket. Attempting to remove or decrease + // period of a locked retention policy will result in a + // PERMISSION_DENIED error. + RetentionPolicy *BucketRetentionPolicy `json:"retentionPolicy,omitempty"` + + // SelfLink: The URI of this bucket. + SelfLink string `json:"selfLink,omitempty"` + + // StorageClass: The bucket's default storage class, used whenever no + // storageClass is specified for a newly-created object. This defines + // how objects in the bucket are stored and determines the SLA and the + // cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, + // NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If + // this value is not specified when the bucket is created, it will + // default to STANDARD. For more information, see storage classes. + StorageClass string `json:"storageClass,omitempty"` + + // TimeCreated: The creation time of the bucket in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // Updated: The modification time of the bucket in RFC 3339 format. + Updated string `json:"updated,omitempty"` + + // Versioning: The bucket's versioning configuration. + Versioning *BucketVersioning `json:"versioning,omitempty"` + + // Website: The bucket's website configuration, controlling how the + // service behaves when accessing bucket contents as a web site. See the + // Static Website Examples for more information. + Website *BucketWebsite `json:"website,omitempty"` + + // ZoneAffinity: The zone or zones from which the bucket is intended to + // use zonal quota. Requests for data from outside the specified + // affinities are still allowed but won't be able to use zonal quota. + // The zone or zones need to be within the bucket location otherwise the + // requests will fail with a 400 Bad Request response. + ZoneAffinity []string `json:"zoneAffinity,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Acl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Acl") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Bucket) MarshalJSON() ([]byte, error) { + type NoMethod Bucket + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketBilling: The bucket's billing configuration. +type BucketBilling struct { + // RequesterPays: When set to true, Requester Pays is enabled for this + // bucket. + RequesterPays bool `json:"requesterPays,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RequesterPays") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "RequesterPays") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketBilling) MarshalJSON() ([]byte, error) { + type NoMethod BucketBilling + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BucketCors struct { + // MaxAgeSeconds: The value, in seconds, to return in the + // Access-Control-Max-Age header used in preflight responses. + MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"` + + // Method: The list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Method []string `json:"method,omitempty"` + + // Origin: The list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origin []string `json:"origin,omitempty"` + + // ResponseHeader: The list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeader []string `json:"responseHeader,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MaxAgeSeconds") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketCors) MarshalJSON() ([]byte, error) { + type NoMethod BucketCors + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketEncryption: Encryption configuration for a bucket. +type BucketEncryption struct { + // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt + // objects inserted into this bucket, if no encryption method is + // specified. + DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DefaultKmsKeyName") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BucketEncryption) MarshalJSON() ([]byte, error) { + type NoMethod BucketEncryption + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketIamConfiguration: The bucket's IAM configuration. +type BucketIamConfiguration struct { + // BucketPolicyOnly: The bucket's uniform bucket-level access + // configuration. The feature was formerly known as Bucket Policy Only. + // For backward compatibility, this field will be populated with + // identical information as the uniformBucketLevelAccess field. We + // recommend using the uniformBucketLevelAccess field to enable and + // disable the feature. + BucketPolicyOnly *BucketIamConfigurationBucketPolicyOnly `json:"bucketPolicyOnly,omitempty"` + + // UniformBucketLevelAccess: The bucket's uniform bucket-level access + // configuration. + UniformBucketLevelAccess *BucketIamConfigurationUniformBucketLevelAccess `json:"uniformBucketLevelAccess,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BucketPolicyOnly") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "BucketPolicyOnly") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BucketIamConfiguration) MarshalJSON() ([]byte, error) { + type NoMethod BucketIamConfiguration + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketIamConfigurationBucketPolicyOnly: The bucket's uniform +// bucket-level access configuration. The feature was formerly known as +// Bucket Policy Only. For backward compatibility, this field will be +// populated with identical information as the uniformBucketLevelAccess +// field. We recommend using the uniformBucketLevelAccess field to +// enable and disable the feature. +type BucketIamConfigurationBucketPolicyOnly struct { + // Enabled: If set, access is controlled only by bucket-level or above + // IAM policies. + Enabled bool `json:"enabled,omitempty"` + + // LockedTime: The deadline for changing + // iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC + // 3339 format. iamConfiguration.bucketPolicyOnly.enabled may be changed + // from true to false until the locked time, after which the field is + // immutable. + LockedTime string `json:"lockedTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketIamConfigurationBucketPolicyOnly) MarshalJSON() ([]byte, error) { + type NoMethod BucketIamConfigurationBucketPolicyOnly + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketIamConfigurationUniformBucketLevelAccess: The bucket's uniform +// bucket-level access configuration. +type BucketIamConfigurationUniformBucketLevelAccess struct { + // Enabled: If set, access is controlled only by bucket-level or above + // IAM policies. + Enabled bool `json:"enabled,omitempty"` + + // LockedTime: The deadline for changing + // iamConfiguration.uniformBucketLevelAccess.enabled from true to false + // in RFC 3339 format. + // iamConfiguration.uniformBucketLevelAccess.enabled may be changed from + // true to false until the locked time, after which the field is + // immutable. + LockedTime string `json:"lockedTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketIamConfigurationUniformBucketLevelAccess) MarshalJSON() ([]byte, error) { + type NoMethod BucketIamConfigurationUniformBucketLevelAccess + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle +// management for more information. +type BucketLifecycle struct { + // Rule: A lifecycle management rule, which is made of an action to take + // and the condition(s) under which the action will be taken. + Rule []*BucketLifecycleRule `json:"rule,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Rule") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Rule") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycle) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycle + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type BucketLifecycleRule struct { + // Action: The action to take. + Action *BucketLifecycleRuleAction `json:"action,omitempty"` + + // Condition: The condition(s) under which the action will be taken. + Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Action") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Action") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycleRule + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLifecycleRuleAction: The action to take. +type BucketLifecycleRuleAction struct { + // StorageClass: Target storage class. Required iff the type of the + // action is SetStorageClass. + StorageClass string `json:"storageClass,omitempty"` + + // Type: Type of the action. Currently, only Delete and SetStorageClass + // are supported. + Type string `json:"type,omitempty"` + + // ForceSendFields is a list of field names (e.g. "StorageClass") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "StorageClass") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycleRuleAction + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLifecycleRuleCondition: The condition(s) under which the action +// will be taken. +type BucketLifecycleRuleCondition struct { + // Age: Age of an object (in days). This condition is satisfied when an + // object reaches the specified age. + Age int64 `json:"age,omitempty"` + + // CreatedBefore: A date in RFC 3339 format with only the date part (for + // instance, "2013-01-15"). This condition is satisfied when an object + // is created before midnight of the specified date in UTC. + CreatedBefore string `json:"createdBefore,omitempty"` + + // CustomTimeBefore: A date in RFC 3339 format with only the date part + // (for instance, "2013-01-15"). This condition is satisfied when the + // custom time on an object is before this date in UTC. + CustomTimeBefore string `json:"customTimeBefore,omitempty"` + + // DaysSinceCustomTime: Number of days elapsed since the user-specified + // timestamp set on an object. The condition is satisfied if the days + // elapsed is at least this number. If no custom timestamp is specified + // on an object, the condition does not apply. + DaysSinceCustomTime int64 `json:"daysSinceCustomTime,omitempty"` + + // DaysSinceNoncurrentTime: Number of days elapsed since the noncurrent + // timestamp of an object. The condition is satisfied if the days + // elapsed is at least this number. This condition is relevant only for + // versioned objects. The value of the field must be a nonnegative + // integer. If it's zero, the object version will become eligible for + // Lifecycle action as soon as it becomes noncurrent. + DaysSinceNoncurrentTime int64 `json:"daysSinceNoncurrentTime,omitempty"` + + // IsLive: Relevant only for versioned objects. If the value is true, + // this condition matches live objects; if the value is false, it + // matches archived objects. + IsLive *bool `json:"isLive,omitempty"` + + // MatchesPattern: A regular expression that satisfies the RE2 syntax. + // This condition is satisfied when the name of the object matches the + // RE2 pattern. Note: This feature is currently in the "Early Access" + // launch stage and is only available to a whitelisted set of users; + // that means that this feature may be changed in backward-incompatible + // ways and that it is not guaranteed to be released. + MatchesPattern string `json:"matchesPattern,omitempty"` + + // MatchesStorageClass: Objects having any of the storage classes + // specified by this condition will be matched. Values include + // MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, and + // DURABLE_REDUCED_AVAILABILITY. + MatchesStorageClass []string `json:"matchesStorageClass,omitempty"` + + // NoncurrentTimeBefore: A date in RFC 3339 format with only the date + // part (for instance, "2013-01-15"). This condition is satisfied when + // the noncurrent time on an object is before this date in UTC. This + // condition is relevant only for versioned objects. + NoncurrentTimeBefore string `json:"noncurrentTimeBefore,omitempty"` + + // NumNewerVersions: Relevant only for versioned objects. If the value + // is N, this condition is satisfied when there are at least N versions + // (including the live version) newer than this version of the object. + NumNewerVersions int64 `json:"numNewerVersions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Age") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Age") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) { + type NoMethod BucketLifecycleRuleCondition + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketLogging: The bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's +// logs. +type BucketLogging struct { + // LogBucket: The destination bucket where the current bucket's logs + // should be placed. + LogBucket string `json:"logBucket,omitempty"` + + // LogObjectPrefix: A prefix for log object names. + LogObjectPrefix string `json:"logObjectPrefix,omitempty"` + + // ForceSendFields is a list of field names (e.g. "LogBucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "LogBucket") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketLogging) MarshalJSON() ([]byte, error) { + type NoMethod BucketLogging + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketOwner: The owner of the bucket. This is always the project +// team's owner group. +type BucketOwner struct { + // Entity: The entity, in the form project-owner-projectId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entity") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entity") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketOwner) MarshalJSON() ([]byte, error) { + type NoMethod BucketOwner + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketRetentionPolicy: The bucket's retention policy. The retention +// policy enforces a minimum retention time for all objects contained in +// the bucket, based on their creation time. Any attempt to overwrite or +// delete objects younger than the retention period will result in a +// PERMISSION_DENIED error. An unlocked retention policy can be modified +// or removed from the bucket via a storage.buckets.update operation. A +// locked retention policy cannot be removed or shortened in duration +// for the lifetime of the bucket. Attempting to remove or decrease +// period of a locked retention policy will result in a +// PERMISSION_DENIED error. +type BucketRetentionPolicy struct { + // EffectiveTime: Server-determined value that indicates the time from + // which policy was enforced and effective. This value is in RFC 3339 + // format. + EffectiveTime string `json:"effectiveTime,omitempty"` + + // IsLocked: Once locked, an object retention policy cannot be modified. + IsLocked bool `json:"isLocked,omitempty"` + + // RetentionPeriod: The duration in seconds that objects need to be + // retained. Retention duration must be greater than zero and less than + // 100 years. Note that enforcement of retention periods less than a day + // is not guaranteed. Such periods should only be used for testing + // purposes. + RetentionPeriod int64 `json:"retentionPeriod,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "EffectiveTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EffectiveTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) { + type NoMethod BucketRetentionPolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketVersioning: The bucket's versioning configuration. +type BucketVersioning struct { + // Enabled: While set to true, versioning is fully enabled for this + // bucket. + Enabled bool `json:"enabled,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Enabled") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Enabled") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketVersioning) MarshalJSON() ([]byte, error) { + type NoMethod BucketVersioning + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketWebsite: The bucket's website configuration, controlling how +// the service behaves when accessing bucket contents as a web site. See +// the Static Website Examples for more information. +type BucketWebsite struct { + // MainPageSuffix: If the requested object path is missing, the service + // will ensure the path has a trailing '/', append this suffix, and + // attempt to retrieve the resulting object. This allows the creation of + // index.html objects to represent directory pages. + MainPageSuffix string `json:"mainPageSuffix,omitempty"` + + // NotFoundPage: If the requested object path is missing, and any + // mainPageSuffix object is missing, if applicable, the service will + // return the named object from this bucket as the content for a 404 Not + // Found result. + NotFoundPage string `json:"notFoundPage,omitempty"` + + // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "MainPageSuffix") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BucketWebsite) MarshalJSON() ([]byte, error) { + type NoMethod BucketWebsite + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketAccessControl: An access-control entry. +type BucketAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For bucket access control entries, + // this is always storage#bucketAccessControl. + Kind string `json:"kind,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketAccessControl) MarshalJSON() ([]byte, error) { + type NoMethod BucketAccessControl + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketAccessControlProjectTeam: The project team associated with the +// entity, if any. +type BucketAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. + Team string `json:"team,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProjectNumber") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type NoMethod BucketAccessControlProjectTeam + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// BucketAccessControls: An access-control list. +type BucketAccessControls struct { + // Items: The list of items. + Items []*BucketAccessControl `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of bucket access control + // entries, this is always storage#bucketAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketAccessControls) MarshalJSON() ([]byte, error) { + type NoMethod BucketAccessControls + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Buckets: A list of buckets. +type Buckets struct { + // Items: The list of items. + Items []*Bucket `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of buckets, this is always + // storage#buckets. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Buckets) MarshalJSON() ([]byte, error) { + type NoMethod Buckets + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Channel: An notification channel used to watch for resource changes. +type Channel struct { + // Address: The address where notifications are delivered for this + // channel. + Address string `json:"address,omitempty"` + + // Expiration: Date and time of notification channel expiration, + // expressed as a Unix timestamp, in milliseconds. Optional. + Expiration int64 `json:"expiration,omitempty,string"` + + // Id: A UUID or similar unique string that identifies this channel. + Id string `json:"id,omitempty"` + + // Kind: Identifies this as a notification channel used to watch for + // changes to a resource, which is "api#channel". + Kind string `json:"kind,omitempty"` + + // Params: Additional parameters controlling delivery channel behavior. + // Optional. + Params map[string]string `json:"params,omitempty"` + + // Payload: A Boolean value to indicate whether payload is wanted. + // Optional. + Payload bool `json:"payload,omitempty"` + + // ResourceId: An opaque ID that identifies the resource being watched + // on this channel. Stable across different API versions. + ResourceId string `json:"resourceId,omitempty"` + + // ResourceUri: A version-specific identifier for the watched resource. + ResourceUri string `json:"resourceUri,omitempty"` + + // Token: An arbitrary string delivered to the target address with each + // notification delivered over this channel. Optional. + Token string `json:"token,omitempty"` + + // Type: The type of delivery mechanism used for this channel. + Type string `json:"type,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Address") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Address") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Channel) MarshalJSON() ([]byte, error) { + type NoMethod Channel + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ComposeRequest: A Compose request. +type ComposeRequest struct { + // Destination: Properties of the resulting object. + Destination *Object `json:"destination,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // SourceObjects: The list of source objects that will be concatenated + // into a single object. + SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Destination") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Destination") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ComposeRequest) MarshalJSON() ([]byte, error) { + type NoMethod ComposeRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type ComposeRequestSourceObjects struct { + // Generation: The generation of this object to use as the source. + Generation int64 `json:"generation,omitempty,string"` + + // Name: The source object's name. All source objects must reside in the + // same bucket. + Name string `json:"name,omitempty"` + + // ObjectPreconditions: Conditions that must be met for this operation + // to execute. + ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Generation") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Generation") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) { + type NoMethod ComposeRequestSourceObjects + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must +// be met for this operation to execute. +type ComposeRequestSourceObjectsObjectPreconditions struct { + // IfGenerationMatch: Only perform the composition if the generation of + // the source object that would be used matches this value. If this + // value and a generation are both specified, they must be the same + // value or the call will fail. + IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "IfGenerationMatch") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "IfGenerationMatch") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) { + type NoMethod ComposeRequestSourceObjectsObjectPreconditions + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Expr: Represents an expression text. Example: title: "User account +// presence" description: "Determines whether the request has a user +// account" expression: "size(request.user) > 0" +type Expr struct { + // Description: An optional description of the expression. This is a + // longer text which describes the expression, e.g. when hovered over it + // in a UI. + Description string `json:"description,omitempty"` + + // Expression: Textual representation of an expression in Common + // Expression Language syntax. The application context of the containing + // message determines which well-known feature set of CEL is supported. + Expression string `json:"expression,omitempty"` + + // Location: An optional string indicating the location of the + // expression for error reporting, e.g. a file name and a position in + // the file. + Location string `json:"location,omitempty"` + + // Title: An optional title for the expression, i.e. a short string + // describing its purpose. This can be used e.g. in UIs which allow to + // enter the expression. + Title string `json:"title,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Description") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Description") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Expr) MarshalJSON() ([]byte, error) { + type NoMethod Expr + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HmacKey: JSON template to produce a JSON-style HMAC Key resource for +// Create responses. +type HmacKey struct { + // Kind: The kind of item this is. For HMAC keys, this is always + // storage#hmacKey. + Kind string `json:"kind,omitempty"` + + // Metadata: Key metadata. + Metadata *HmacKeyMetadata `json:"metadata,omitempty"` + + // Secret: HMAC secret key material. + Secret string `json:"secret,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HmacKey) MarshalJSON() ([]byte, error) { + type NoMethod HmacKey + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HmacKeyMetadata: JSON template to produce a JSON-style HMAC Key +// metadata resource. +type HmacKeyMetadata struct { + // AccessId: The ID of the HMAC Key. + AccessId string `json:"accessId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the HMAC key. + Etag string `json:"etag,omitempty"` + + // Id: The ID of the HMAC key, including the Project ID and the Access + // ID. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For HMAC Key metadata, this is always + // storage#hmacKeyMetadata. + Kind string `json:"kind,omitempty"` + + // ProjectId: Project ID owning the service account to which the key + // authenticates. + ProjectId string `json:"projectId,omitempty"` + + // SelfLink: The link to this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServiceAccountEmail: The email address of the key's associated + // service account. + ServiceAccountEmail string `json:"serviceAccountEmail,omitempty"` + + // State: The state of the key. Can be one of ACTIVE, INACTIVE, or + // DELETED. + State string `json:"state,omitempty"` + + // TimeCreated: The creation time of the HMAC key in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // Updated: The last modification time of the HMAC key metadata in RFC + // 3339 format. + Updated string `json:"updated,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "AccessId") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AccessId") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HmacKeyMetadata) MarshalJSON() ([]byte, error) { + type NoMethod HmacKeyMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// HmacKeysMetadata: A list of hmacKeys. +type HmacKeysMetadata struct { + // Items: The list of items. + Items []*HmacKeyMetadata `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of hmacKeys, this is always + // storage#hmacKeysMetadata. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *HmacKeysMetadata) MarshalJSON() ([]byte, error) { + type NoMethod HmacKeysMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Notification: A subscription to receive Google PubSub notifications. +type Notification struct { + // CustomAttributes: An optional list of additional attributes to attach + // to each Cloud PubSub message published for this notification + // subscription. + CustomAttributes map[string]string `json:"custom_attributes,omitempty"` + + // Etag: HTTP 1.1 Entity tag for this subscription notification. + Etag string `json:"etag,omitempty"` + + // EventTypes: If present, only send notifications about listed event + // types. If empty, sent notifications for all event types. + EventTypes []string `json:"event_types,omitempty"` + + // Id: The ID of the notification. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For notifications, this is always + // storage#notification. + Kind string `json:"kind,omitempty"` + + // ObjectNamePrefix: If present, only apply this notification + // configuration to object names that begin with this prefix. + ObjectNamePrefix string `json:"object_name_prefix,omitempty"` + + // PayloadFormat: The desired content of the Payload. + PayloadFormat string `json:"payload_format,omitempty"` + + // SelfLink: The canonical URL of this notification. + SelfLink string `json:"selfLink,omitempty"` + + // Topic: The Cloud PubSub topic to which this subscription publishes. + // Formatted as: + // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topi + // c}' + Topic string `json:"topic,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "CustomAttributes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CustomAttributes") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *Notification) MarshalJSON() ([]byte, error) { + type NoMethod Notification + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Notifications: A list of notification subscriptions. +type Notifications struct { + // Items: The list of items. + Items []*Notification `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of notifications, this is + // always storage#notifications. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Notifications) MarshalJSON() ([]byte, error) { + type NoMethod Notifications + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Object: An object. +type Object struct { + // Acl: Access controls on the object. + Acl []*ObjectAccessControl `json:"acl,omitempty"` + + // Bucket: The name of the bucket containing this object. + Bucket string `json:"bucket,omitempty"` + + // CacheControl: Cache-Control directive for the object data. If + // omitted, and the object is accessible to all anonymous users, the + // default will be public, max-age=3600. + CacheControl string `json:"cacheControl,omitempty"` + + // ComponentCount: Number of underlying components that make up this + // object. Components are accumulated by compose operations. + ComponentCount int64 `json:"componentCount,omitempty"` + + // ContentDisposition: Content-Disposition of the object data. + ContentDisposition string `json:"contentDisposition,omitempty"` + + // ContentEncoding: Content-Encoding of the object data. + ContentEncoding string `json:"contentEncoding,omitempty"` + + // ContentLanguage: Content-Language of the object data. + ContentLanguage string `json:"contentLanguage,omitempty"` + + // ContentType: Content-Type of the object data. If an object is stored + // without a Content-Type, it is served as application/octet-stream. + ContentType string `json:"contentType,omitempty"` + + // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; + // encoded using base64 in big-endian byte order. For more information + // about using the CRC32c checksum, see Hashes and ETags: Best + // Practices. + Crc32c string `json:"crc32c,omitempty"` + + // CustomTime: A timestamp in RFC 3339 format specified by the user for + // an object. + CustomTime string `json:"customTime,omitempty"` + + // CustomerEncryption: Metadata of customer-supplied encryption key, if + // the object is encrypted by such a key. + CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the object. + Etag string `json:"etag,omitempty"` + + // EventBasedHold: Whether an object is under event-based hold. + // Event-based hold is a way to retain objects until an event occurs, + // which is signified by the hold's release (i.e. this value is set to + // false). After being released (set to false), such objects will be + // subject to bucket-level retention (if any). One sample use case of + // this flag is for banks to hold loan documents for at least 3 years + // after loan is paid in full. Here, bucket-level retention is 3 years + // and the event is the loan being paid in full. In this example, these + // objects will be held intact for any number of years until the event + // has occurred (event-based hold on the object is released) and then 3 + // more years after that. That means retention duration of the objects + // begins from the moment event-based hold transitioned from true to + // false. + EventBasedHold bool `json:"eventBasedHold,omitempty"` + + // Generation: The content generation of this object. Used for object + // versioning. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the object, including the bucket name, object name, and + // generation number. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For objects, this is always + // storage#object. + Kind string `json:"kind,omitempty"` + + // KmsKeyName: Not currently supported. Specifying the parameter causes + // the request to fail with status code 400 - Bad Request. + KmsKeyName string `json:"kmsKeyName,omitempty"` + + // Md5Hash: MD5 hash of the data; encoded using base64. For more + // information about using the MD5 hash, see Hashes and ETags: Best + // Practices. + Md5Hash string `json:"md5Hash,omitempty"` + + // MediaLink: Media download link. + MediaLink string `json:"mediaLink,omitempty"` + + // Metadata: User-provided metadata, in key/value pairs. + Metadata map[string]string `json:"metadata,omitempty"` + + // Metageneration: The version of the metadata for this object at this + // generation. Used for preconditions and for detecting changes in + // metadata. A metageneration number is only meaningful in the context + // of a particular generation of a particular object. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the object. Required if not specified by URL + // parameter. + Name string `json:"name,omitempty"` + + // Owner: The owner of the object. This will always be the uploader of + // the object. + Owner *ObjectOwner `json:"owner,omitempty"` + + // RetentionExpirationTime: A server-determined value that specifies the + // earliest time that the object's retention period expires. This value + // is in RFC 3339 format. Note 1: This field is not provided for objects + // with an active event-based hold, since retention expiration is + // unknown until the hold is removed. Note 2: This value can be provided + // even when temporary hold is set (so that the user can reason about + // policy without having to first unset the temporary hold). + RetentionExpirationTime string `json:"retentionExpirationTime,omitempty"` + + // SelfLink: The link to this object. + SelfLink string `json:"selfLink,omitempty"` + + // Size: Content-Length of the data in bytes. + Size uint64 `json:"size,omitempty,string"` + + // StorageClass: Storage class of the object. + StorageClass string `json:"storageClass,omitempty"` + + // TemporaryHold: Whether an object is under temporary hold. While this + // flag is set to true, the object is protected against deletion and + // overwrites. A common use case of this flag is regulatory + // investigations where objects need to be retained while the + // investigation is ongoing. Note that unlike event-based hold, + // temporary hold does not impact retention expiration time of an + // object. + TemporaryHold bool `json:"temporaryHold,omitempty"` + + // TimeCreated: The creation time of the object in RFC 3339 format. + TimeCreated string `json:"timeCreated,omitempty"` + + // TimeDeleted: The deletion time of the object in RFC 3339 format. Will + // be returned if and only if this version of the object has been + // deleted. + TimeDeleted string `json:"timeDeleted,omitempty"` + + // TimeStorageClassUpdated: The time at which the object's storage class + // was last changed. When the object is initially created, it will be + // set to timeCreated. + TimeStorageClassUpdated string `json:"timeStorageClassUpdated,omitempty"` + + // Updated: The modification time of the object metadata in RFC 3339 + // format. + Updated string `json:"updated,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Acl") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Acl") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Object) MarshalJSON() ([]byte, error) { + type NoMethod Object + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectCustomerEncryption: Metadata of customer-supplied encryption +// key, if the object is encrypted by such a key. +type ObjectCustomerEncryption struct { + // EncryptionAlgorithm: The encryption algorithm. + EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` + + // KeySha256: SHA256 hash value of the encryption key. + KeySha256 string `json:"keySha256,omitempty"` + + // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EncryptionAlgorithm") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) { + type NoMethod ObjectCustomerEncryption + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectOwner: The owner of the object. This will always be the +// uploader of the object. +type ObjectOwner struct { + // Entity: The entity, in the form user-userId. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity. + EntityId string `json:"entityId,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Entity") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Entity") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectOwner) MarshalJSON() ([]byte, error) { + type NoMethod ObjectOwner + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControl: An access-control entry. +type ObjectAccessControl struct { + // Bucket: The name of the bucket. + Bucket string `json:"bucket,omitempty"` + + // Domain: The domain associated with the entity, if any. + Domain string `json:"domain,omitempty"` + + // Email: The email address associated with the entity, if any. + Email string `json:"email,omitempty"` + + // Entity: The entity holding the permission, in one of the following + // forms: + // - user-userId + // - user-email + // - group-groupId + // - group-email + // - domain-domain + // - project-team-projectId + // - allUsers + // - allAuthenticatedUsers Examples: + // - The user liz@example.com would be user-liz@example.com. + // - The group example@googlegroups.com would be + // group-example@googlegroups.com. + // - To refer to all members of the Google Apps for Business domain + // example.com, the entity would be domain-example.com. + Entity string `json:"entity,omitempty"` + + // EntityId: The ID for the entity, if any. + EntityId string `json:"entityId,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the access-control entry. + Etag string `json:"etag,omitempty"` + + // Generation: The content generation of the object, if applied to an + // object. + Generation int64 `json:"generation,omitempty,string"` + + // Id: The ID of the access-control entry. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For object access control entries, + // this is always storage#objectAccessControl. + Kind string `json:"kind,omitempty"` + + // Object: The name of the object, if applied to an object. + Object string `json:"object,omitempty"` + + // ProjectTeam: The project team associated with the entity, if any. + ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"` + + // Role: The access permission for the entity. + Role string `json:"role,omitempty"` + + // SelfLink: The link to this access-control entry. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) { + type NoMethod ObjectAccessControl + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControlProjectTeam: The project team associated with the +// entity, if any. +type ObjectAccessControlProjectTeam struct { + // ProjectNumber: The project number. + ProjectNumber string `json:"projectNumber,omitempty"` + + // Team: The team. + Team string `json:"team,omitempty"` + + // ForceSendFields is a list of field names (e.g. "ProjectNumber") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "ProjectNumber") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) { + type NoMethod ObjectAccessControlProjectTeam + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ObjectAccessControls: An access-control list. +type ObjectAccessControls struct { + // Items: The list of items. + Items []*ObjectAccessControl `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of object access control + // entries, this is always storage#objectAccessControls. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) { + type NoMethod ObjectAccessControls + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Objects: A list of objects. +type Objects struct { + // Items: The list of items. + Items []*Object `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of objects, this is always + // storage#objects. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Prefixes: The list of prefixes of objects matching-but-not-listed up + // to and including the requested delimiter. + Prefixes []string `json:"prefixes,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Objects) MarshalJSON() ([]byte, error) { + type NoMethod Objects + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// Policy: A bucket/object IAM policy. +type Policy struct { + // Bindings: An association between a role, which comes with a set of + // permissions, and members who may assume that role. + Bindings []*PolicyBindings `json:"bindings,omitempty"` + + // Etag: HTTP 1.1 Entity tag for the policy. + Etag string `json:"etag,omitempty"` + + // Kind: The kind of item this is. For policies, this is always + // storage#policy. This field is ignored on input. + Kind string `json:"kind,omitempty"` + + // ResourceId: The ID of the resource to which this policy belongs. Will + // be of the form projects/_/buckets/bucket for buckets, and + // projects/_/buckets/bucket/objects/object for objects. A specific + // generation may be specified by appending #generationNumber to the end + // of the object name, e.g. + // projects/_/buckets/my-bucket/objects/data.txt#17. The current + // generation can be denoted with #0. This field is ignored on input. + ResourceId string `json:"resourceId,omitempty"` + + // Version: The IAM policy format version. + Version int64 `json:"version,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bindings") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bindings") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *Policy) MarshalJSON() ([]byte, error) { + type NoMethod Policy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +type PolicyBindings struct { + // Condition: The condition that is associated with this binding. NOTE: + // an unsatisfied condition will not allow user access via current + // binding. Different bindings, including their conditions, are examined + // independently. + Condition *Expr `json:"condition,omitempty"` + + // Members: A collection of identifiers for members who may assume the + // provided role. Recognized identifiers are as follows: + // - allUsers — A special identifier that represents anyone on the + // internet; with or without a Google account. + // - allAuthenticatedUsers — A special identifier that represents + // anyone who is authenticated with a Google account or a service + // account. + // - user:emailid — An email address that represents a specific + // account. For example, user:alice@gmail.com or user:joe@example.com. + // + // - serviceAccount:emailid — An email address that represents a + // service account. For example, + // serviceAccount:my-other-app@appspot.gserviceaccount.com . + // - group:emailid — An email address that represents a Google group. + // For example, group:admins@example.com. + // - domain:domain — A Google Apps domain name that represents all the + // users of that domain. For example, domain:google.com or + // domain:example.com. + // - projectOwner:projectid — Owners of the given project. For + // example, projectOwner:my-example-project + // - projectEditor:projectid — Editors of the given project. For + // example, projectEditor:my-example-project + // - projectViewer:projectid — Viewers of the given project. For + // example, projectViewer:my-example-project + Members []string `json:"members,omitempty"` + + // Role: The role to which members belong. Two types of roles are + // supported: new IAM roles, which grant permissions that do not map + // directly to those provided by ACLs, and legacy IAM roles, which do + // map directly to ACL permissions. All roles are of the format + // roles/storage.specificRole. + // The new IAM roles are: + // - roles/storage.admin — Full control of Google Cloud Storage + // resources. + // - roles/storage.objectViewer — Read-Only access to Google Cloud + // Storage objects. + // - roles/storage.objectCreator — Access to create objects in Google + // Cloud Storage. + // - roles/storage.objectAdmin — Full control of Google Cloud Storage + // objects. The legacy IAM roles are: + // - roles/storage.legacyObjectReader — Read-only access to objects + // without listing. Equivalent to an ACL entry on an object with the + // READER role. + // - roles/storage.legacyObjectOwner — Read/write access to existing + // objects without listing. Equivalent to an ACL entry on an object with + // the OWNER role. + // - roles/storage.legacyBucketReader — Read access to buckets with + // object listing. Equivalent to an ACL entry on a bucket with the + // READER role. + // - roles/storage.legacyBucketWriter — Read access to buckets with + // object listing/creation/deletion. Equivalent to an ACL entry on a + // bucket with the WRITER role. + // - roles/storage.legacyBucketOwner — Read and write access to + // existing buckets with object listing/creation/deletion. Equivalent to + // an ACL entry on a bucket with the OWNER role. + Role string `json:"role,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Condition") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Condition") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *PolicyBindings) MarshalJSON() ([]byte, error) { + type NoMethod PolicyBindings + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// RewriteResponse: A rewrite response. +type RewriteResponse struct { + // Done: true if the copy is finished; otherwise, false if the copy is + // in progress. This property is always present in the response. + Done bool `json:"done,omitempty"` + + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // ObjectSize: The total size of the object being copied in bytes. This + // property is always present in the response. + ObjectSize int64 `json:"objectSize,omitempty,string"` + + // Resource: A resource containing the metadata for the copied-to + // object. This property is present in the response only when copying + // completes. + Resource *Object `json:"resource,omitempty"` + + // RewriteToken: A token to use in subsequent requests to continue + // copying data. This token is present in the response only when there + // is more data to copy. + RewriteToken string `json:"rewriteToken,omitempty"` + + // TotalBytesRewritten: The total bytes written so far, which can be + // used to provide a waiting user with a progress indicator. This + // property is always present in the response. + TotalBytesRewritten int64 `json:"totalBytesRewritten,omitempty,string"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *RewriteResponse) MarshalJSON() ([]byte, error) { + type NoMethod RewriteResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ServiceAccount: A subscription to receive Google PubSub +// notifications. +type ServiceAccount struct { + // EmailAddress: The ID of the notification. + EmailAddress string `json:"email_address,omitempty"` + + // Kind: The kind of item this is. For notifications, this is always + // storage#notification. + Kind string `json:"kind,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "EmailAddress") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EmailAddress") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ServiceAccount) MarshalJSON() ([]byte, error) { + type NoMethod ServiceAccount + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// TestIamPermissionsResponse: A +// storage.(buckets|objects).testIamPermissions response. +type TestIamPermissionsResponse struct { + // Kind: The kind of item this is. + Kind string `json:"kind,omitempty"` + + // Permissions: The permissions held by the caller. Permissions are + // always of the format storage.resource.capability, where resource is + // one of buckets or objects. The supported permissions are as follows: + // + // - storage.buckets.delete — Delete bucket. + // - storage.buckets.get — Read bucket metadata. + // - storage.buckets.getIamPolicy — Read bucket IAM policy. + // - storage.buckets.create — Create bucket. + // - storage.buckets.list — List buckets. + // - storage.buckets.setIamPolicy — Update bucket IAM policy. + // - storage.buckets.update — Update bucket metadata. + // - storage.objects.delete — Delete object. + // - storage.objects.get — Read object data and metadata. + // - storage.objects.getIamPolicy — Read object IAM policy. + // - storage.objects.create — Create object. + // - storage.objects.list — List objects. + // - storage.objects.setIamPolicy — Update object IAM policy. + // - storage.objects.update — Update object metadata. + Permissions []string `json:"permissions,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Kind") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { + type NoMethod TestIamPermissionsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// method id "storage.bucketAccessControls.delete": + +type BucketAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified bucket. +func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall { + c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsDeleteCall) UserProject(userProject string) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.delete" call. +func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.bucketAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.get": + +type BucketAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the ACL entry for the specified entity on the specified +// bucket. +func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall { + c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsGetCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsGetCall) UserProject(userProject string) *BucketAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.get" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.insert": + +type BucketAccessControlsInsertCall struct { + s *Service + bucket string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall { + c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsInsertCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsInsertCall) UserProject(userProject string) *BucketAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.insert" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.bucketAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.list": + +type BucketAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves ACL entries on the specified bucket. +func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall { + c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsListCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsListCall) UserProject(userProject string) *BucketAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.list" call. +// Exactly one of *BucketAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.bucketAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl", + // "response": { + // "$ref": "BucketAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.patch": + +type BucketAccessControlsPatchCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall { + c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsPatchCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsPatchCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsPatchCall) UserProject(userProject string) *BucketAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.patch" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches an ACL entry on the specified bucket.", + // "httpMethod": "PATCH", + // "id": "storage.bucketAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.bucketAccessControls.update": + +type BucketAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + bucketaccesscontrol *BucketAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an ACL entry on the specified bucket. +func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall { + c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.bucketaccesscontrol = bucketaccesscontrol + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketAccessControlsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketAccessControlsUpdateCall) UserProject(userProject string) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.bucketAccessControls.update" call. +// Exactly one of *BucketAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *BucketAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &BucketAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.bucketAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/acl/{entity}", + // "request": { + // "$ref": "BucketAccessControl" + // }, + // "response": { + // "$ref": "BucketAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.delete": + +type BucketsDeleteCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes an empty bucket. +func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall { + c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the bucket if its +// metageneration matches this value. +func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the bucket if its +// metageneration does not match this value. +func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *BucketsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsDeleteCall) UserProject(userProject string) *BucketsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.delete" call. +func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes an empty bucket.", + // "httpMethod": "DELETE", + // "id": "storage.buckets.delete", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the bucket if its metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the bucket if its metageneration does not match this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.get": + +type BucketsGetCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns metadata for the specified bucket. +func (r *BucketsService) Get(bucket string) *BucketsGetCall { + c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsGetCall) ProvisionalUserProject(provisionalUserProject string) *BucketsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsGetCall) UserProject(userProject string) *BucketsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.get" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns metadata for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.get", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.getIamPolicy": + +type BucketsGetIamPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified bucket. +func (r *BucketsService) GetIamPolicy(bucket string) *BucketsGetIamPolicyCall { + c := &BucketsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older +// version that doesn't support part of the requested IAM policy, the +// request fails. +func (c *BucketsGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *BucketsGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsGetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *BucketsGetIamPolicyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsGetIamPolicyCall) UserProject(userProject string) *BucketsGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsGetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsGetIamPolicyCall) IfNoneMatch(entityTag string) *BucketsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsGetIamPolicyCall) Context(ctx context.Context) *BucketsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns an IAM policy for the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.buckets.getIamPolicy", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "optionsRequestedPolicyVersion": { + // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + // "format": "int32", + // "location": "query", + // "minimum": "1", + // "type": "integer" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/iam", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.insert": + +type BucketsInsertCall struct { + s *Service + bucket *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new bucket. +func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall { + c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + c.bucket = bucket + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the bucket resource +// specifies acl or defaultObjectAcl properties, when it defaults to +// full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsInsertCall) ProvisionalUserProject(provisionalUserProject string) *BucketsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *BucketsInsertCall) UserProject(userProject string) *BucketsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.insert" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new bucket.", + // "httpMethod": "POST", + // "id": "storage.buckets.insert", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.list": + +type BucketsListCall struct { + s *Service + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of buckets for a given project. +func (r *BucketsService) List(projectid string) *BucketsListCall { + c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.urlParams_.Set("project", projectid) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of buckets to return in a single response. The service will use this +// parameter or 1,000 items, whichever is smaller. +func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// buckets whose names begin with this prefix. +func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsListCall) Projection(projection string) *BucketsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsListCall) ProvisionalUserProject(provisionalUserProject string) *BucketsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *BucketsListCall) UserProject(userProject string) *BucketsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.list" call. +// Exactly one of *Buckets or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Buckets.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Buckets{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of buckets for a given project.", + // "httpMethod": "GET", + // "id": "storage.buckets.list", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of buckets to return in a single response. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to buckets whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "A valid API project identifier.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b", + // "response": { + // "$ref": "Buckets" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.buckets.lockRetentionPolicy": + +type BucketsLockRetentionPolicyCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// LockRetentionPolicy: Locks retention policy on a bucket. +func (r *BucketsService) LockRetentionPolicy(bucket string, ifMetagenerationMatch int64) *BucketsLockRetentionPolicyCall { + c := &BucketsLockRetentionPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsLockRetentionPolicyCall) ProvisionalUserProject(provisionalUserProject string) *BucketsLockRetentionPolicyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsLockRetentionPolicyCall) UserProject(userProject string) *BucketsLockRetentionPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsLockRetentionPolicyCall) Fields(s ...googleapi.Field) *BucketsLockRetentionPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsLockRetentionPolicyCall) Context(ctx context.Context) *BucketsLockRetentionPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsLockRetentionPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/lockRetentionPolicy") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.lockRetentionPolicy" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsLockRetentionPolicyCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Locks retention policy on a bucket.", + // "httpMethod": "POST", + // "id": "storage.buckets.lockRetentionPolicy", + // "parameterOrder": [ + // "bucket", + // "ifMetagenerationMatch" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/lockRetentionPolicy", + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.patch": + +type BucketsPatchCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a bucket. Changes to the bucket will be readable +// immediately after writing, but configuration changes may take time to +// propagate. +func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall { + c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsPatchCall) ProvisionalUserProject(provisionalUserProject string) *BucketsPatchCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsPatchCall) UserProject(userProject string) *BucketsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.patch" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + // "httpMethod": "PATCH", + // "id": "storage.buckets.patch", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.setIamPolicy": + +type BucketsSetIamPolicyCall struct { + s *Service + bucket string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified bucket. +func (r *BucketsService) SetIamPolicy(bucket string, policy *Policy) *BucketsSetIamPolicyCall { + c := &BucketsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.policy = policy + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsSetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *BucketsSetIamPolicyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsSetIamPolicyCall) UserProject(userProject string) *BucketsSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsSetIamPolicyCall) Fields(s ...googleapi.Field) *BucketsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsSetIamPolicyCall) Context(ctx context.Context) *BucketsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an IAM policy for the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.buckets.setIamPolicy", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/iam", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.buckets.testIamPermissions": + +type BucketsTestIamPermissionsCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Tests a set of permissions on the given bucket to +// see which, if any, are held by the caller. +func (r *BucketsService) TestIamPermissions(bucket string, permissions []string) *BucketsTestIamPermissionsCall { + c := &BucketsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsTestIamPermissionsCall) ProvisionalUserProject(provisionalUserProject string) *BucketsTestIamPermissionsCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsTestIamPermissionsCall) UserProject(userProject string) *BucketsTestIamPermissionsCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsTestIamPermissionsCall) Fields(s ...googleapi.Field) *BucketsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *BucketsTestIamPermissionsCall) IfNoneMatch(entityTag string) *BucketsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsTestIamPermissionsCall) Context(ctx context.Context) *BucketsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/iam/testPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *BucketsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Tests a set of permissions on the given bucket to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.buckets.testIamPermissions", + // "parameterOrder": [ + // "bucket", + // "permissions" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissions": { + // "description": "Permissions to test.", + // "location": "query", + // "repeated": true, + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/iam/testPermissions", + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.update": + +type BucketsUpdateCall struct { + s *Service + bucket string + bucket2 *Bucket + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a bucket. Changes to the bucket will be readable +// immediately after writing, but configuration changes may take time to +// propagate. +func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall { + c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bucket2 = bucket2 + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration matches +// the given value. +func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the bucket metadata +// conditional on whether the bucket's current metageneration does not +// match the given value. +func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Project team owners get OWNER access, and +// allAuthenticatedUsers get READER access. +// "private" - Project team owners get OWNER access. +// "projectPrivate" - Project team members get access according to +// their roles. +// "publicRead" - Project team owners get OWNER access, and allUsers +// get READER access. +// "publicReadWrite" - Project team owners get OWNER access, and +// allUsers get WRITER access. +func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// PredefinedDefaultObjectAcl sets the optional parameter +// "predefinedDefaultObjectAcl": Apply a predefined set of default +// object access controls to this bucket. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall { + c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. +func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *BucketsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *BucketsUpdateCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *BucketsUpdateCall) UserProject(userProject string) *BucketsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *BucketsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.update" call. +// Exactly one of *Bucket or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Bucket.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Bucket{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a bucket. Changes to the bucket will be readable immediately after writing, but configuration changes may take time to propagate.", + // "httpMethod": "PUT", + // "id": "storage.buckets.update", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "private", + // "projectPrivate", + // "publicRead", + // "publicReadWrite" + // ], + // "enumDescriptions": [ + // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.", + // "Project team owners get OWNER access.", + // "Project team members get access according to their roles.", + // "Project team owners get OWNER access, and allUsers get READER access.", + // "Project team owners get OWNER access, and allUsers get WRITER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "predefinedDefaultObjectAcl": { + // "description": "Apply a predefined set of default object access controls to this bucket.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit owner, acl and defaultObjectAcl properties." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}", + // "request": { + // "$ref": "Bucket" + // }, + // "response": { + // "$ref": "Bucket" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.channels.stop": + +type ChannelsStopCall struct { + s *Service + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Stop: Stop watching resources through this channel +func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall { + c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.channel = channel + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ChannelsStopCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.channels.stop" call. +func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Stop watching resources through this channel", + // "httpMethod": "POST", + // "id": "storage.channels.stop", + // "path": "channels/stop", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.delete": + +type DefaultObjectAccessControlsDeleteCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the default object ACL entry for the +// specified entity on the specified bucket. +func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall { + c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsDeleteCall) UserProject(userProject string) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.delete" call. +func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "DELETE", + // "id": "storage.defaultObjectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.get": + +type DefaultObjectAccessControlsGetCall struct { + s *Service + bucket string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the default object ACL entry for the specified entity on +// the specified bucket. +func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall { + c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsGetCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsGetCall) UserProject(userProject string) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.insert": + +type DefaultObjectAccessControlsInsertCall struct { + s *Service + bucket string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new default object ACL entry on the specified +// bucket. +func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall { + c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsInsertCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsInsertCall) UserProject(userProject string) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new default object ACL entry on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.defaultObjectAccessControls.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.list": + +type DefaultObjectAccessControlsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves default object ACL entries on the specified bucket. +func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall { + c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If present, only return default ACL listing +// if the bucket's current metageneration matches this value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If present, only return default ACL +// listing if the bucket's current metageneration does not match the +// given value. +func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsListCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsListCall) UserProject(userProject string) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves default object ACL entries on the specified bucket.", + // "httpMethod": "GET", + // "id": "storage.defaultObjectAccessControls.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.patch": + +type DefaultObjectAccessControlsPatchCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches a default object ACL entry on the specified bucket. +func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall { + c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsPatchCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsPatchCall) UserProject(userProject string) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches a default object ACL entry on the specified bucket.", + // "httpMethod": "PATCH", + // "id": "storage.defaultObjectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.defaultObjectAccessControls.update": + +type DefaultObjectAccessControlsUpdateCall struct { + s *Service + bucket string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates a default object ACL entry on the specified bucket. +func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall { + c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *DefaultObjectAccessControlsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *DefaultObjectAccessControlsUpdateCall) UserProject(userProject string) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.defaultObjectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates a default object ACL entry on the specified bucket.", + // "httpMethod": "PUT", + // "id": "storage.defaultObjectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/defaultObjectAcl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.notifications.delete": + +type NotificationsDeleteCall struct { + s *Service + bucket string + notification string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes a notification subscription. +func (r *NotificationsService) Delete(bucket string, notification string) *NotificationsDeleteCall { + c := &NotificationsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *NotificationsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsDeleteCall) UserProject(userProject string) *NotificationsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsDeleteCall) Fields(s ...googleapi.Field) *NotificationsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsDeleteCall) Context(ctx context.Context) *NotificationsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "notification": c.notification, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.delete" call. +func (c *NotificationsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes a notification subscription.", + // "httpMethod": "DELETE", + // "id": "storage.notifications.delete", + // "parameterOrder": [ + // "bucket", + // "notification" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "notification": { + // "description": "ID of the notification to delete.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs/{notification}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.get": + +type NotificationsGetCall struct { + s *Service + bucket string + notification string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: View a notification configuration. +func (r *NotificationsService) Get(bucket string, notification string) *NotificationsGetCall { + c := &NotificationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *NotificationsGetCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsGetCall) UserProject(userProject string) *NotificationsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsGetCall) Fields(s ...googleapi.Field) *NotificationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NotificationsGetCall) IfNoneMatch(entityTag string) *NotificationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsGetCall) Context(ctx context.Context) *NotificationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs/{notification}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "notification": c.notification, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.get" call. +// Exactly one of *Notification or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NotificationsGetCall) Do(opts ...googleapi.CallOption) (*Notification, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Notification{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "View a notification configuration.", + // "httpMethod": "GET", + // "id": "storage.notifications.get", + // "parameterOrder": [ + // "bucket", + // "notification" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "notification": { + // "description": "Notification ID", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs/{notification}", + // "response": { + // "$ref": "Notification" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.insert": + +type NotificationsInsertCall struct { + s *Service + bucket string + notification *Notification + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a notification subscription for a given bucket. +func (r *NotificationsService) Insert(bucket string, notification *Notification) *NotificationsInsertCall { + c := &NotificationsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.notification = notification + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *NotificationsInsertCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsInsertCall) UserProject(userProject string) *NotificationsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsInsertCall) Fields(s ...googleapi.Field) *NotificationsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsInsertCall) Context(ctx context.Context) *NotificationsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.notification) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.insert" call. +// Exactly one of *Notification or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notification.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *NotificationsInsertCall) Do(opts ...googleapi.CallOption) (*Notification, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Notification{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a notification subscription for a given bucket.", + // "httpMethod": "POST", + // "id": "storage.notifications.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the notification.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs", + // "request": { + // "$ref": "Notification" + // }, + // "response": { + // "$ref": "Notification" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.notifications.list": + +type NotificationsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of notification subscriptions for a given +// bucket. +func (r *NotificationsService) List(bucket string) *NotificationsListCall { + c := &NotificationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *NotificationsListCall) ProvisionalUserProject(provisionalUserProject string) *NotificationsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *NotificationsListCall) UserProject(userProject string) *NotificationsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *NotificationsListCall) Fields(s ...googleapi.Field) *NotificationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *NotificationsListCall) IfNoneMatch(entityTag string) *NotificationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *NotificationsListCall) Context(ctx context.Context) *NotificationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *NotificationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/notificationConfigs") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.notifications.list" call. +// Exactly one of *Notifications or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Notifications.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *NotificationsListCall) Do(opts ...googleapi.CallOption) (*Notifications, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Notifications{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of notification subscriptions for a given bucket.", + // "httpMethod": "GET", + // "id": "storage.notifications.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a Google Cloud Storage bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/notificationConfigs", + // "response": { + // "$ref": "Notifications" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objectAccessControls.delete": + +type ObjectAccessControlsDeleteCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes the ACL entry for the specified entity on +// the specified object. +func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall { + c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsDeleteCall) UserProject(userProject string) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.delete" call. +func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.", + // "httpMethod": "DELETE", + // "id": "storage.objectAccessControls.delete", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.get": + +type ObjectAccessControlsGetCall struct { + s *Service + bucket string + object string + entity string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns the ACL entry for the specified entity on the specified +// object. +func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall { + c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsGetCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsGetCall) UserProject(userProject string) *ObjectAccessControlsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.get" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the ACL entry for the specified entity on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.get", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.insert": + +type ObjectAccessControlsInsertCall struct { + s *Service + bucket string + object string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new ACL entry on the specified object. +func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall { + c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsInsertCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsInsertCall) UserProject(userProject string) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.insert" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new ACL entry on the specified object.", + // "httpMethod": "POST", + // "id": "storage.objectAccessControls.insert", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.list": + +type ObjectAccessControlsListCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves ACL entries on the specified object. +func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall { + c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsListCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsListCall) UserProject(userProject string) *ObjectAccessControlsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.list" call. +// Exactly one of *ObjectAccessControls or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControls.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControls{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves ACL entries on the specified object.", + // "httpMethod": "GET", + // "id": "storage.objectAccessControls.list", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl", + // "response": { + // "$ref": "ObjectAccessControls" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.patch": + +type ObjectAccessControlsPatchCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an ACL entry on the specified object. +func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall { + c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsPatchCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsPatchCall) UserProject(userProject string) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.patch" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches an ACL entry on the specified object.", + // "httpMethod": "PATCH", + // "id": "storage.objectAccessControls.patch", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objectAccessControls.update": + +type ObjectAccessControlsUpdateCall struct { + s *Service + bucket string + object string + entity string + objectaccesscontrol *ObjectAccessControl + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an ACL entry on the specified object. +func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall { + c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.entity = entity + c.objectaccesscontrol = objectaccesscontrol + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectAccessControlsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectAccessControlsUpdateCall) UserProject(userProject string) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectAccessControlsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + "entity": c.entity, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objectAccessControls.update" call. +// Exactly one of *ObjectAccessControl or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *ObjectAccessControl.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ObjectAccessControl{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an ACL entry on the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objectAccessControls.update", + // "parameterOrder": [ + // "bucket", + // "object", + // "entity" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of a bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "entity": { + // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/acl/{entity}", + // "request": { + // "$ref": "ObjectAccessControl" + // }, + // "response": { + // "$ref": "ObjectAccessControl" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.compose": + +type ObjectsComposeCall struct { + s *Service + destinationBucket string + destinationObject string + composerequest *ComposeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Compose: Concatenates a list of existing objects into a new object in +// the same bucket. +func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall { + c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.composerequest = composerequest + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsComposeCall) KmsKeyName(kmsKeyName string) *ObjectsComposeCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsComposeCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsComposeCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsComposeCall) UserProject(userProject string) *ObjectsComposeCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsComposeCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.compose" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Concatenates a list of existing objects into a new object in the same bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.compose", + // "parameterOrder": [ + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket containing the source objects. The destination object is stored in this bucket.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{destinationBucket}/o/{destinationObject}/compose", + // "request": { + // "$ref": "ComposeRequest" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.copy": + +type ObjectsCopyCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Copy: Copies a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall { + c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsCopyCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsCopyCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the destination object's +// current generation matches the given value. Setting to 0 makes the +// operation succeed only if there are no live versions of the object. +func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the destination object's current generation does not match the given +// value. If no live object exists, the precondition fails. Setting to 0 +// makes the operation succeed only if there is a live version of the +// object. +func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's current generation matches the given value. +func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's current generation does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsCopyCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsCopyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsCopyCall) UserProject(userProject string) *ObjectsCopyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsCopyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.copy" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Copies a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.copy", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.delete": + +type ObjectsDeleteCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an object and its metadata. Deletions are permanent +// if versioning is not enabled for the bucket, or if the generation +// parameter is used. +func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall { + c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// permanently deletes a specific revision of this object (as opposed to +// the latest version, the default). +func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsDeleteCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsDeleteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsDeleteCall) UserProject(userProject string) *ObjectsDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.delete" call. +func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.", + // "httpMethod": "DELETE", + // "id": "storage.objects.delete", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.get": + +type ObjectsGetCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an object or its metadata. +func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall { + c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsGetCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do and Download +// methods. Any pending HTTP request will be aborted if the provided +// context is canceled. +func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Download fetches the API endpoint's "media" value, instead of the normal +// API response value. If the returned error is nil, the Response is guaranteed to +// have a 2xx status code. Callers must close the Response.Body as usual. +func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("media") + if err != nil { + return nil, err + } + if err := googleapi.CheckMediaResponse(res); err != nil { + res.Body.Close() + return nil, err + } + return res, nil +} + +// Do executes the "storage.objects.get" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an object or its metadata.", + // "httpMethod": "GET", + // "id": "storage.objects.get", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaDownload": true, + // "useMediaDownloadService": true + // } + +} + +// method id "storage.objects.getIamPolicy": + +type ObjectsGetIamPolicyCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified object. +func (r *ObjectsService) GetIamPolicy(bucket string, object string) *ObjectsGetIamPolicyCall { + c := &ObjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsGetIamPolicyCall) Generation(generation int64) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsGetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsGetIamPolicyCall) UserProject(userProject string) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsGetIamPolicyCall) IfNoneMatch(entityTag string) *ObjectsGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsGetIamPolicyCall) Context(ctx context.Context) *ObjectsGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns an IAM policy for the specified object.", + // "httpMethod": "GET", + // "id": "storage.objects.getIamPolicy", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.insert": + +type ObjectsInsertCall struct { + s *Service + bucket string + object *Object + urlParams_ gensupport.URLParams + mediaInfo_ *gensupport.MediaInfo + ctx_ context.Context + header_ http.Header +} + +// Insert: Stores a new object and metadata. +func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall { + c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + return c +} + +// ContentEncoding sets the optional parameter "contentEncoding": If +// set, sets the contentEncoding property of the final object to this +// value. Setting this parameter is equivalent to setting the +// contentEncoding metadata property. This can be useful when uploading +// an object with uploadType=media to indicate the encoding of the +// content being uploaded. +func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall { + c.urlParams_.Set("contentEncoding", contentEncoding) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// KmsKeyName sets the optional parameter "kmsKeyName": Resource name of +// the Cloud KMS key, of the form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsInsertCall) KmsKeyName(kmsKeyName string) *ObjectsInsertCall { + c.urlParams_.Set("kmsKeyName", kmsKeyName) + return c +} + +// Name sets the optional parameter "name": Name of the object. Required +// when the object metadata is not otherwise provided. Overrides the +// object metadata's name value, if any. For information about how to +// URL encode object names to be path safe, see Encoding URI Path Parts. +func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall { + c.urlParams_.Set("name", name) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsInsertCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsInsertCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsInsertCall) UserProject(userProject string) *ObjectsInsertCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Media specifies the media to upload in one or more chunks. The chunk +// size may be controlled by supplying a MediaOption generated by +// googleapi.ChunkSize. The chunk size defaults to +// googleapi.DefaultUploadChunkSize.The Content-Type header used in the +// upload request will be determined by sniffing the contents of r, +// unless a MediaOption generated by googleapi.ContentType is +// supplied. +// At most one of Media and ResumableMedia may be set. +func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall { + if ct := c.object.ContentType; ct != "" { + options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...) + } + c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options) + return c +} + +// ResumableMedia specifies the media to upload in chunks and can be +// canceled with ctx. +// +// Deprecated: use Media instead. +// +// At most one of Media and ResumableMedia may be set. mediaType +// identifies the MIME media type of the upload, such as "image/png". If +// mediaType is "", it will be auto-detected. The provided ctx will +// supersede any context previously provided to the Context method. +func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall { + c.ctx_ = ctx + c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType) + return c +} + +// ProgressUpdater provides a callback function that will be called +// after every chunk. It should be a low-latency function in order to +// not slow down the upload operation. This should only be called when +// using ResumableMedia (as opposed to Media). +func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall { + c.mediaInfo_.SetProgressUpdater(pu) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +// This context will supersede any context previously provided to the +// ResumableMedia method. +func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + if c.mediaInfo_ != nil { + urls = googleapi.ResolveRelative(c.s.BasePath, "/upload/storage/v1/b/{bucket}/o") + c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType()) + } + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } + body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body) + defer cleanup() + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + req.GetBody = getBody + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.insert" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location")) + if rx != nil { + rx.Client = c.s.client + rx.UserAgent = c.s.userAgent() + ctx := c.ctx_ + if ctx == nil { + ctx = context.TODO() + } + res, err = rx.Upload(ctx) + if err != nil { + return nil, err + } + defer res.Body.Close() + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Stores a new object and metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.insert", + // "mediaUpload": { + // "accept": [ + // "*/*" + // ], + // "protocols": { + // "resumable": { + // "multipart": true, + // "path": "/resumable/upload/storage/v1/b/{bucket}/o" + // }, + // "simple": { + // "multipart": true, + // "path": "/upload/storage/v1/b/{bucket}/o" + // } + // } + // }, + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "contentEncoding": { + // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "kmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "name": { + // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "query", + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsMediaUpload": true + // } + +} + +// method id "storage.objects.list": + +type ObjectsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of objects matching the criteria. +func (r *ObjectsService) List(bucket string) *ObjectsListCall { + c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// EndOffset sets the optional parameter "endOffset": Filter results to +// objects whose names are lexicographically before endOffset. If +// startOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + +// IncludeTrailingDelimiter sets the optional parameter +// "includeTrailingDelimiter": If true, objects that end in exactly one +// instance of delimiter will have their metadata included in items in +// addition to prefixes. +func (c *ObjectsListCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsListCall { + c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return in a single page of responses. As +// duplicate prefixes are omitted, fewer total results may be returned +// than requested. The service will use this parameter or 1,000 items, +// whichever is smaller. +func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsListCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsListCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// StartOffset sets the optional parameter "startOffset": Filter results +// to objects whose names are lexicographically equal to or after +// startOffset. If endOffset is also set, the objects listed will have +// names between startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsListCall) StartOffset(startOffset string) *ObjectsListCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsListCall) UserProject(userProject string) *ObjectsListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.list" call. +// Exactly one of *Objects or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Objects.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Objects{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of objects matching the criteria.", + // "httpMethod": "GET", + // "id": "storage.objects.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "endOffset": { + // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, + // "includeTrailingDelimiter": { + // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "startOffset": { + // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o", + // "response": { + // "$ref": "Objects" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.objects.patch": + +type ObjectsPatchCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Patch: Patches an object's metadata. +func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall { + c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsPatchCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsPatchCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request, for Requester Pays buckets. +func (c *ObjectsPatchCall) UserProject(userProject string) *ObjectsPatchCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsPatchCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PATCH", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.patch" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Patches an object's metadata.", + // "httpMethod": "PATCH", + // "id": "storage.objects.patch", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request, for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.rewrite": + +type ObjectsRewriteCall struct { + s *Service + sourceBucket string + sourceObject string + destinationBucket string + destinationObject string + object *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Rewrite: Rewrites a source object to a destination object. Optionally +// overrides metadata. +func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall { + c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.sourceBucket = sourceBucket + c.sourceObject = sourceObject + c.destinationBucket = destinationBucket + c.destinationObject = destinationObject + c.object = object + return c +} + +// DestinationKmsKeyName sets the optional parameter +// "destinationKmsKeyName": Resource name of the Cloud KMS key, of the +// form +// projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, +// that will be used to encrypt the object. Overrides the object +// metadata's kms_key_name value, if any. +func (c *ObjectsRewriteCall) DestinationKmsKeyName(destinationKmsKeyName string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationKmsKeyName", destinationKmsKeyName) + return c +} + +// DestinationPredefinedAcl sets the optional parameter +// "destinationPredefinedAcl": Apply a predefined set of access controls +// to the destination object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall { + c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the destination object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the destination object's current metageneration does not +// match the given value. +func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// IfSourceGenerationMatch sets the optional parameter +// "ifSourceGenerationMatch": Makes the operation conditional on whether +// the source object's current generation matches the given value. +func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch)) + return c +} + +// IfSourceGenerationNotMatch sets the optional parameter +// "ifSourceGenerationNotMatch": Makes the operation conditional on +// whether the source object's current generation does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch)) + return c +} + +// IfSourceMetagenerationMatch sets the optional parameter +// "ifSourceMetagenerationMatch": Makes the operation conditional on +// whether the source object's current metageneration matches the given +// value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch)) + return c +} + +// IfSourceMetagenerationNotMatch sets the optional parameter +// "ifSourceMetagenerationNotMatch": Makes the operation conditional on +// whether the source object's current metageneration does not match the +// given value. +func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall { + c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch)) + return c +} + +// MaxBytesRewrittenPerCall sets the optional parameter +// "maxBytesRewrittenPerCall": The maximum number of bytes that will be +// rewritten per rewrite request. Most callers shouldn't need to specify +// this parameter - it is primarily in place to support testing. If +// specified the value must be an integral multiple of 1 MiB (1048576). +// Also, this only applies to requests where the source and destination +// span locations and/or storage classes. Finally, this value must not +// change across rewrite calls else you'll get an error that the +// rewriteToken is invalid. +func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall { + c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl, unless the object resource +// specifies the acl property, when it defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsRewriteCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsRewriteCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// RewriteToken sets the optional parameter "rewriteToken": Include this +// field (from the previous rewrite response) on each rewrite request +// after the first one, until the rewrite response 'done' flag is true. +// Calls that provide a rewriteToken can omit all other request fields, +// but if included those fields must match the values provided in the +// first rewrite request. +func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall { + c.urlParams_.Set("rewriteToken", rewriteToken) + return c +} + +// SourceGeneration sets the optional parameter "sourceGeneration": If +// present, selects a specific revision of the source object (as opposed +// to the latest version, the default). +func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall { + c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsRewriteCall) UserProject(userProject string) *ObjectsRewriteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsRewriteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "sourceBucket": c.sourceBucket, + "sourceObject": c.sourceObject, + "destinationBucket": c.destinationBucket, + "destinationObject": c.destinationObject, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.rewrite" call. +// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RewriteResponse.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RewriteResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", + // "httpMethod": "POST", + // "id": "storage.objects.rewrite", + // "parameterOrder": [ + // "sourceBucket", + // "sourceObject", + // "destinationBucket", + // "destinationObject" + // ], + // "parameters": { + // "destinationBucket": { + // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationKmsKeyName": { + // "description": "Resource name of the Cloud KMS key, of the form projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be used to encrypt the object. Overrides the object metadata's kms_key_name value, if any.", + // "location": "query", + // "type": "string" + // }, + // "destinationObject": { + // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "destinationPredefinedAcl": { + // "description": "Apply a predefined set of access controls to the destination object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current generation does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifSourceMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "maxBytesRewrittenPerCall": { + // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "rewriteToken": { + // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.", + // "location": "query", + // "type": "string" + // }, + // "sourceBucket": { + // "description": "Name of the bucket in which to find the source object.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "sourceGeneration": { + // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "sourceObject": { + // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "RewriteResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.setIamPolicy": + +type ObjectsSetIamPolicyCall struct { + s *Service + bucket string + object string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified object. +func (r *ObjectsService) SetIamPolicy(bucket string, object string, policy *Policy) *ObjectsSetIamPolicyCall { + c := &ObjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.policy = policy + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsSetIamPolicyCall) Generation(generation int64) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsSetIamPolicyCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsSetIamPolicyCall) UserProject(userProject string) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ObjectsSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsSetIamPolicyCall) Context(ctx context.Context) *ObjectsSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an IAM policy for the specified object.", + // "httpMethod": "PUT", + // "id": "storage.objects.setIamPolicy", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.testIamPermissions": + +type ObjectsTestIamPermissionsCall struct { + s *Service + bucket string + object string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Tests a set of permissions on the given object to +// see which, if any, are held by the caller. +func (r *ObjectsService) TestIamPermissions(bucket string, object string, permissions []string) *ObjectsTestIamPermissionsCall { + c := &ObjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsTestIamPermissionsCall) Generation(generation int64) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsTestIamPermissionsCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsTestIamPermissionsCall) UserProject(userProject string) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ObjectsTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ObjectsTestIamPermissionsCall) IfNoneMatch(entityTag string) *ObjectsTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsTestIamPermissionsCall) Context(ctx context.Context) *ObjectsTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/iam/testPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Tests a set of permissions on the given object to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.objects.testIamPermissions", + // "parameterOrder": [ + // "bucket", + // "object", + // "permissions" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissions": { + // "description": "Permissions to test.", + // "location": "query", + // "repeated": true, + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/iam/testPermissions", + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.objects.update": + +type ObjectsUpdateCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates an object's metadata. +func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall { + c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// Generation sets the optional parameter "generation": If present, +// selects a specific revision of this object (as opposed to the latest +// version, the default). +func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall { + c.urlParams_.Set("generation", fmt.Sprint(generation)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's current +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// the object's current generation does not match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's current metageneration matches the given value. +func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether the object's current metageneration does not match the given +// value. +func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a +// predefined set of access controls to this object. +// +// Possible values: +// "authenticatedRead" - Object owner gets OWNER access, and +// allAuthenticatedUsers get READER access. +// "bucketOwnerFullControl" - Object owner gets OWNER access, and +// project team owners get OWNER access. +// "bucketOwnerRead" - Object owner gets OWNER access, and project +// team owners get READER access. +// "private" - Object owner gets OWNER access. +// "projectPrivate" - Object owner gets OWNER access, and project team +// members get access according to their roles. +// "publicRead" - Object owner gets OWNER access, and allUsers get +// READER access. +func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall { + c.urlParams_.Set("predefinedAcl", predefinedAcl) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsUpdateCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsUpdateCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsUpdateCall) UserProject(userProject string) *ObjectsUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.update" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an object's metadata.", + // "httpMethod": "PUT", + // "id": "storage.objects.update", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "generation": { + // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current generation does not match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "predefinedAcl": { + // "description": "Apply a predefined set of access controls to this object.", + // "enum": [ + // "authenticatedRead", + // "bucketOwnerFullControl", + // "bucketOwnerRead", + // "private", + // "projectPrivate", + // "publicRead" + // ], + // "enumDescriptions": [ + // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.", + // "Object owner gets OWNER access, and project team owners get OWNER access.", + // "Object owner gets OWNER access, and project team owners get READER access.", + // "Object owner gets OWNER access.", + // "Object owner gets OWNER access, and project team members get access according to their roles.", + // "Object owner gets OWNER access, and allUsers get READER access." + // ], + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.objects.watchAll": + +type ObjectsWatchAllCall struct { + s *Service + bucket string + channel *Channel + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// WatchAll: Watch for changes on all objects in a bucket. +func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall { + c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.channel = channel + return c +} + +// Delimiter sets the optional parameter "delimiter": Returns results in +// a directory-like mode. items will contain only objects whose names, +// aside from the prefix, do not contain delimiter. Objects whose names, +// aside from the prefix, contain delimiter will have their name, +// truncated after the delimiter, returned in prefixes. Duplicate +// prefixes are omitted. +func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall { + c.urlParams_.Set("delimiter", delimiter) + return c +} + +// EndOffset sets the optional parameter "endOffset": Filter results to +// objects whose names are lexicographically before endOffset. If +// startOffset is also set, the objects listed will have names between +// startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) EndOffset(endOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("endOffset", endOffset) + return c +} + +// IncludeTrailingDelimiter sets the optional parameter +// "includeTrailingDelimiter": If true, objects that end in exactly one +// instance of delimiter will have their metadata included in items in +// addition to prefixes. +func (c *ObjectsWatchAllCall) IncludeTrailingDelimiter(includeTrailingDelimiter bool) *ObjectsWatchAllCall { + c.urlParams_.Set("includeTrailingDelimiter", fmt.Sprint(includeTrailingDelimiter)) + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items plus prefixes to return in a single page of responses. As +// duplicate prefixes are omitted, fewer total results may be returned +// than requested. The service will use this parameter or 1,000 items, +// whichever is smaller. +func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": Filter results to +// objects whose names begin with this prefix. +func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to noAcl. +// +// Possible values: +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { + c.urlParams_.Set("projection", projection) + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ObjectsWatchAllCall) ProvisionalUserProject(provisionalUserProject string) *ObjectsWatchAllCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// StartOffset sets the optional parameter "startOffset": Filter results +// to objects whose names are lexicographically equal to or after +// startOffset. If endOffset is also set, the objects listed will have +// names between startOffset (inclusive) and endOffset (exclusive). +func (c *ObjectsWatchAllCall) StartOffset(startOffset string) *ObjectsWatchAllCall { + c.urlParams_.Set("startOffset", startOffset) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsWatchAllCall) UserProject(userProject string) *ObjectsWatchAllCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Versions sets the optional parameter "versions": If true, lists all +// versions of an object as distinct results. The default is false. For +// more information, see Object Versioning. +func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall { + c.urlParams_.Set("versions", fmt.Sprint(versions)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsWatchAllCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.watchAll" call. +// Exactly one of *Channel or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Channel.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Channel{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Watch for changes on all objects in a bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.watchAll", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for objects.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "delimiter": { + // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.", + // "location": "query", + // "type": "string" + // }, + // "endOffset": { + // "description": "Filter results to objects whose names are lexicographically before endOffset. If startOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, + // "includeTrailingDelimiter": { + // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", + // "location": "query", + // "type": "boolean" + // }, + // "maxResults": { + // "default": "1000", + // "description": "Maximum number of items plus prefixes to return in a single page of responses. As duplicate prefixes are omitted, fewer total results may be returned than requested. The service will use this parameter or 1,000 items, whichever is smaller.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "Filter results to objects whose names begin with this prefix.", + // "location": "query", + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to noAcl.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "startOffset": { + // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // }, + // "versions": { + // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.", + // "location": "query", + // "type": "boolean" + // } + // }, + // "path": "b/{bucket}/o/watch", + // "request": { + // "$ref": "Channel", + // "parameterName": "resource" + // }, + // "response": { + // "$ref": "Channel" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ], + // "supportsSubscription": true + // } + +} + +// method id "storage.projects.hmacKeys.create": + +type ProjectsHmacKeysCreateCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Create: Creates a new HMAC key for the specified service account. +func (r *ProjectsHmacKeysService) Create(projectId string, serviceAccountEmail string) *ProjectsHmacKeysCreateCall { + c := &ProjectsHmacKeysCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.urlParams_.Set("serviceAccountEmail", serviceAccountEmail) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsHmacKeysCreateCall) UserProject(userProject string) *ProjectsHmacKeysCreateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHmacKeysCreateCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysCreateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHmacKeysCreateCall) Context(ctx context.Context) *ProjectsHmacKeysCreateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHmacKeysCreateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.create" call. +// Exactly one of *HmacKey or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *HmacKey.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ProjectsHmacKeysCreateCall) Do(opts ...googleapi.CallOption) (*HmacKey, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HmacKey{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new HMAC key for the specified service account.", + // "httpMethod": "POST", + // "id": "storage.projects.hmacKeys.create", + // "parameterOrder": [ + // "projectId", + // "serviceAccountEmail" + // ], + // "parameters": { + // "projectId": { + // "description": "Project ID owning the service account.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "serviceAccountEmail": { + // "description": "Email address of the service account.", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/hmacKeys", + // "response": { + // "$ref": "HmacKey" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.projects.hmacKeys.delete": + +type ProjectsHmacKeysDeleteCall struct { + s *Service + projectId string + accessId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Deletes an HMAC key. +func (r *ProjectsHmacKeysService) Delete(projectId string, accessId string) *ProjectsHmacKeysDeleteCall { + c := &ProjectsHmacKeysDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.accessId = accessId + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsHmacKeysDeleteCall) UserProject(userProject string) *ProjectsHmacKeysDeleteCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHmacKeysDeleteCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHmacKeysDeleteCall) Context(ctx context.Context) *ProjectsHmacKeysDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "accessId": c.accessId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.delete" call. +func (c *ProjectsHmacKeysDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return err + } + return nil + // { + // "description": "Deletes an HMAC key.", + // "httpMethod": "DELETE", + // "id": "storage.projects.hmacKeys.delete", + // "parameterOrder": [ + // "projectId", + // "accessId" + // ], + // "parameters": { + // "accessId": { + // "description": "Name of the HMAC key to be deleted.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID owning the requested key", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/hmacKeys/{accessId}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.projects.hmacKeys.get": + +type ProjectsHmacKeysGetCall struct { + s *Service + projectId string + accessId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Retrieves an HMAC key's metadata +func (r *ProjectsHmacKeysService) Get(projectId string, accessId string) *ProjectsHmacKeysGetCall { + c := &ProjectsHmacKeysGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.accessId = accessId + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsHmacKeysGetCall) UserProject(userProject string) *ProjectsHmacKeysGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHmacKeysGetCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHmacKeysGetCall) IfNoneMatch(entityTag string) *ProjectsHmacKeysGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHmacKeysGetCall) Context(ctx context.Context) *ProjectsHmacKeysGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHmacKeysGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "accessId": c.accessId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.get" call. +// Exactly one of *HmacKeyMetadata or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHmacKeysGetCall) Do(opts ...googleapi.CallOption) (*HmacKeyMetadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HmacKeyMetadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an HMAC key's metadata", + // "httpMethod": "GET", + // "id": "storage.projects.hmacKeys.get", + // "parameterOrder": [ + // "projectId", + // "accessId" + // ], + // "parameters": { + // "accessId": { + // "description": "Name of the HMAC key.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID owning the service account of the requested key.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/hmacKeys/{accessId}", + // "response": { + // "$ref": "HmacKeyMetadata" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only" + // ] + // } + +} + +// method id "storage.projects.hmacKeys.list": + +type ProjectsHmacKeysListCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Retrieves a list of HMAC keys matching the criteria. +func (r *ProjectsHmacKeysService) List(projectId string) *ProjectsHmacKeysListCall { + c := &ProjectsHmacKeysListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// MaxResults sets the optional parameter "maxResults": Maximum number +// of items to return in a single page of responses. The service uses +// this parameter or 250 items, whichever is smaller. The max number of +// items per page will also be limited by the number of distinct service +// accounts in the response. If the number of service accounts in a +// single response is too high, the page will truncated and a next page +// token will be returned. +func (c *ProjectsHmacKeysListCall) MaxResults(maxResults int64) *ProjectsHmacKeysListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ProjectsHmacKeysListCall) PageToken(pageToken string) *ProjectsHmacKeysListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// ServiceAccountEmail sets the optional parameter +// "serviceAccountEmail": If present, only keys for the given service +// account are returned. +func (c *ProjectsHmacKeysListCall) ServiceAccountEmail(serviceAccountEmail string) *ProjectsHmacKeysListCall { + c.urlParams_.Set("serviceAccountEmail", serviceAccountEmail) + return c +} + +// ShowDeletedKeys sets the optional parameter "showDeletedKeys": +// Whether or not to show keys in the DELETED state. +func (c *ProjectsHmacKeysListCall) ShowDeletedKeys(showDeletedKeys bool) *ProjectsHmacKeysListCall { + c.urlParams_.Set("showDeletedKeys", fmt.Sprint(showDeletedKeys)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsHmacKeysListCall) UserProject(userProject string) *ProjectsHmacKeysListCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHmacKeysListCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsHmacKeysListCall) IfNoneMatch(entityTag string) *ProjectsHmacKeysListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHmacKeysListCall) Context(ctx context.Context) *ProjectsHmacKeysListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHmacKeysListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.list" call. +// Exactly one of *HmacKeysMetadata or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *HmacKeysMetadata.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHmacKeysListCall) Do(opts ...googleapi.CallOption) (*HmacKeysMetadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HmacKeysMetadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of HMAC keys matching the criteria.", + // "httpMethod": "GET", + // "id": "storage.projects.hmacKeys.list", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "maxResults": { + // "default": "250", + // "description": "Maximum number of items to return in a single page of responses. The service uses this parameter or 250 items, whichever is smaller. The max number of items per page will also be limited by the number of distinct service accounts in the response. If the number of service accounts in a single response is too high, the page will truncated and a next page token will be returned.", + // "format": "uint32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "projectId": { + // "description": "Name of the project in which to look for HMAC keys.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "serviceAccountEmail": { + // "description": "If present, only keys for the given service account are returned.", + // "location": "query", + // "type": "string" + // }, + // "showDeletedKeys": { + // "description": "Whether or not to show keys in the DELETED state.", + // "location": "query", + // "type": "boolean" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/hmacKeys", + // "response": { + // "$ref": "HmacKeysMetadata" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ProjectsHmacKeysListCall) Pages(ctx context.Context, f func(*HmacKeysMetadata) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.projects.hmacKeys.update": + +type ProjectsHmacKeysUpdateCall struct { + s *Service + projectId string + accessId string + hmackeymetadata *HmacKeyMetadata + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Update: Updates the state of an HMAC key. See the HMAC Key resource +// descriptor for valid states. +func (r *ProjectsHmacKeysService) Update(projectId string, accessId string, hmackeymetadata *HmacKeyMetadata) *ProjectsHmacKeysUpdateCall { + c := &ProjectsHmacKeysUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + c.accessId = accessId + c.hmackeymetadata = hmackeymetadata + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsHmacKeysUpdateCall) UserProject(userProject string) *ProjectsHmacKeysUpdateCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsHmacKeysUpdateCall) Fields(s ...googleapi.Field) *ProjectsHmacKeysUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsHmacKeysUpdateCall) Context(ctx context.Context) *ProjectsHmacKeysUpdateCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.hmackeymetadata) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/hmacKeys/{accessId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + "accessId": c.accessId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.hmacKeys.update" call. +// Exactly one of *HmacKeyMetadata or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *HmacKeyMetadata.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsHmacKeysUpdateCall) Do(opts ...googleapi.CallOption) (*HmacKeyMetadata, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &HmacKeyMetadata{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the state of an HMAC key. See the HMAC Key resource descriptor for valid states.", + // "httpMethod": "PUT", + // "id": "storage.projects.hmacKeys.update", + // "parameterOrder": [ + // "projectId", + // "accessId" + // ], + // "parameters": { + // "accessId": { + // "description": "Name of the HMAC key being updated.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projectId": { + // "description": "Project ID owning the service account of the updated key.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/hmacKeys/{accessId}", + // "request": { + // "$ref": "HmacKeyMetadata" + // }, + // "response": { + // "$ref": "HmacKeyMetadata" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.projects.serviceAccount.get": + +type ProjectsServiceAccountGetCall struct { + s *Service + projectId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Get the email address of this project's Google Cloud Storage +// service account. +func (r *ProjectsServiceAccountService) Get(projectId string) *ProjectsServiceAccountGetCall { + c := &ProjectsServiceAccountGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.projectId = projectId + return c +} + +// ProvisionalUserProject sets the optional parameter +// "provisionalUserProject": The project to be billed for this request +// if the target bucket is requester-pays bucket. +func (c *ProjectsServiceAccountGetCall) ProvisionalUserProject(provisionalUserProject string) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("provisionalUserProject", provisionalUserProject) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. +func (c *ProjectsServiceAccountGetCall) UserProject(userProject string) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ProjectsServiceAccountGetCall) Fields(s ...googleapi.Field) *ProjectsServiceAccountGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ProjectsServiceAccountGetCall) IfNoneMatch(entityTag string) *ProjectsServiceAccountGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ProjectsServiceAccountGetCall) Context(ctx context.Context) *ProjectsServiceAccountGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ProjectsServiceAccountGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201023") + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "projects/{projectId}/serviceAccount") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "projectId": c.projectId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.projects.serviceAccount.get" call. +// Exactly one of *ServiceAccount or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ServiceAccount.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ProjectsServiceAccountGetCall) Do(opts ...googleapi.CallOption) (*ServiceAccount, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &ServiceAccount{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Get the email address of this project's Google Cloud Storage service account.", + // "httpMethod": "GET", + // "id": "storage.projects.serviceAccount.get", + // "parameterOrder": [ + // "projectId" + // ], + // "parameters": { + // "projectId": { + // "description": "Project ID", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "provisionalUserProject": { + // "description": "The project to be billed for this request if the target bucket is requester-pays bucket.", + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "projects/{projectId}/serviceAccount", + // "response": { + // "$ref": "ServiceAccount" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go new file mode 100644 index 00000000000..c03af65fd73 --- /dev/null +++ b/vendor/google.golang.org/api/transport/cert/default_cert.go @@ -0,0 +1,110 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cert contains certificate tools for Google API clients. +// This package is intended to be used with crypto/tls.Config.GetClientCertificate. +// +// The certificates can be used to satisfy Google's Endpoint Validation. +// See https://cloud.google.com/endpoint-verification/docs/overview +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package cert + +import ( + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "os/user" + "path/filepath" + "sync" +) + +const ( + metadataPath = ".secureConnect" + metadataFile = "context_aware_metadata.json" +) + +var ( + defaultSourceOnce sync.Once + defaultSource Source + defaultSourceErr error +) + +// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. +type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) + +// DefaultSource returns a certificate source that execs the command specified +// in the file at ~/.secureConnect/context_aware_metadata.json +// +// If that file does not exist, a nil source is returned. +func DefaultSource() (Source, error) { + defaultSourceOnce.Do(func() { + defaultSource, defaultSourceErr = newSecureConnectSource() + }) + return defaultSource, defaultSourceErr +} + +type secureConnectSource struct { + metadata secureConnectMetadata +} + +type secureConnectMetadata struct { + Cmd []string `json:"cert_provider_command"` +} + +// newSecureConnectSource creates a secureConnectSource by reading the well-known file. +func newSecureConnectSource() (Source, error) { + user, err := user.Current() + if err != nil { + // Ignore. + return nil, nil + } + filename := filepath.Join(user.HomeDir, metadataPath, metadataFile) + file, err := ioutil.ReadFile(filename) + if os.IsNotExist(err) { + // Ignore. + return nil, nil + } + if err != nil { + return nil, err + } + + var metadata secureConnectMetadata + if err := json.Unmarshal(file, &metadata); err != nil { + return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err) + } + if err := validateMetadata(metadata); err != nil { + return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err) + } + return (&secureConnectSource{ + metadata: metadata, + }).getClientCertificate, nil +} + +func validateMetadata(metadata secureConnectMetadata) error { + if len(metadata.Cmd) == 0 { + return errors.New("empty cert_provider_command") + } + return nil +} + +func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { + // TODO(cbro): consider caching valid certificates rather than exec'ing every time. + command := s.metadata.Cmd + data, err := exec.Command(command[0], command[1:]...).Output() + if err != nil { + // TODO(cbro): read stderr for error message? Might contain sensitive info. + return nil, err + } + cert, err := tls.X509KeyPair(data, data) + if err != nil { + return nil, err + } + return &cert, nil +} diff --git a/vendor/google.golang.org/api/transport/http/default_transport_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_go113.go new file mode 100644 index 00000000000..924f2704d1a --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/default_transport_go113.go @@ -0,0 +1,20 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package http + +import "net/http" + +// clonedTransport returns the given RoundTripper as a cloned *http.Transport. +// It returns nil if the RoundTripper can't be cloned or coerced to +// *http.Transport. +func clonedTransport(rt http.RoundTripper) *http.Transport { + t, ok := rt.(*http.Transport) + if !ok { + return nil + } + return t.Clone() +} diff --git a/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go b/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go new file mode 100644 index 00000000000..3cb16c6cb6c --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/default_transport_not_go113.go @@ -0,0 +1,15 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package http + +import "net/http" + +// clonedTransport returns the given RoundTripper as a cloned *http.Transport. +// For versions of Go <1.13, this is not supported, so return nil. +func clonedTransport(rt http.RoundTripper) *http.Transport { + return nil +} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go new file mode 100644 index 00000000000..8578cac9ef2 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -0,0 +1,208 @@ +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http supports network connections to HTTP servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package http + +import ( + "context" + "crypto/tls" + "errors" + "net" + "net/http" + "time" + + "go.opencensus.io/plugin/ochttp" + "golang.org/x/oauth2" + "google.golang.org/api/googleapi/transport" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/api/transport/cert" + "google.golang.org/api/transport/http/internal/propagation" + "google.golang.org/api/transport/internal/dca" +) + +// NewClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, "", err + } + clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(settings) + if err != nil { + return nil, "", err + } + // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? + if settings.HTTPClient != nil { + return settings.HTTPClient, endpoint, nil + } + trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings) + if err != nil { + return nil, "", err + } + return &http.Client{Transport: trans}, endpoint, nil +} + +// NewTransport creates an http.RoundTripper for use communicating with a Google +// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. +func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, err + } + if settings.HTTPClient != nil { + return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") + } + return newTransport(ctx, base, settings) +} + +func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { + paramTransport := ¶meterTransport{ + base: base, + userAgent: settings.UserAgent, + quotaProject: settings.QuotaProject, + requestReason: settings.RequestReason, + } + var trans http.RoundTripper = paramTransport + trans = addOCTransport(trans, settings) + switch { + case settings.NoAuth: + // Do nothing. + case settings.APIKey != "": + trans = &transport.APIKey{ + Transport: trans, + Key: settings.APIKey, + } + default: + creds, err := internal.Creds(ctx, settings) + if err != nil { + return nil, err + } + if paramTransport.quotaProject == "" { + paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) + } + + ts := creds.TokenSource + if settings.TokenSource != nil { + ts = settings.TokenSource + } + trans = &oauth2.Transport{ + Base: trans, + Source: ts, + } + } + return trans, nil +} + +func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.GRPCConn != nil { + return nil, errors.New("unsupported gRPC connection specified") + } + return &o, nil +} + +type parameterTransport struct { + userAgent string + quotaProject string + requestReason string + + base http.RoundTripper +} + +func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + if rt == nil { + return nil, errors.New("transport: no Transport specified") + } + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + if t.userAgent != "" { + // TODO(cbro): append to existing User-Agent header? + newReq.Header.Set("User-Agent", t.userAgent) + } + + // Attach system parameters into the header + if t.quotaProject != "" { + newReq.Header.Set("X-Goog-User-Project", t.quotaProject) + } + if t.requestReason != "" { + newReq.Header.Set("X-Goog-Request-Reason", t.requestReason) + } + + return rt.RoundTrip(&newReq) +} + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineUrlfetchHook func(context.Context) http.RoundTripper + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport. +// Otherwise, use a default transport, taking most defaults from +// http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. +func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } + + // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, + // which is increased due to reported performance issues under load in the GCS + // client. Transport.Clone is only available in Go 1.13 and up. + trans := clonedTransport(http.DefaultTransport) + if trans == nil { + trans = fallbackBaseTransport() + } + trans.MaxIdleConnsPerHost = 100 + + if clientCertSource != nil { + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, + } + } + + return trans +} + +// fallbackBaseTransport is used in httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +} diff --git a/vendor/google.golang.org/api/transport/internal/dca/dca.go b/vendor/google.golang.org/api/transport/internal/dca/dca.go new file mode 100644 index 00000000000..b3be7e4e3a7 --- /dev/null +++ b/vendor/google.golang.org/api/transport/internal/dca/dca.go @@ -0,0 +1,145 @@ +// Copyright 2020 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package dca contains utils for implementing Device Certificate +// Authentication according to https://google.aip.dev/auth/4114 +// +// The overall logic for DCA is as follows: +// 1. If both endpoint override and client certificate are specified, use them as is. +// 2. If user does not specify client certificate, we will attempt to use default +// client certificate. +// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if +// client certificate is available and defaultEndpoint otherwise. +// +// Implications of the above logic: +// 1. If the user specifies a non-mTLS endpoint override but client certificate is +// available, we will pass along the cert anyway and let the server decide what to do. +// 2. If the user specifies an mTLS endpoint override but client certificate is not +// available, we will not fail-fast, but let backend throw error when connecting. +// +// We would like to avoid introducing client-side logic that parses whether the +// endpoint override is an mTLS url, since the url pattern may change at anytime. +// +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package dca + +import ( + "net/url" + "os" + "strings" + + "google.golang.org/api/internal" + "google.golang.org/api/transport/cert" +) + +const ( + mTLSModeAlways = "always" + mTLSModeNever = "never" + mTLSModeAuto = "auto" +) + +// GetClientCertificateSourceAndEndpoint is a convenience function that invokes +// getClientCertificateSource and getEndpoint sequentially and returns the client +// cert source and endpoint as a tuple. +func GetClientCertificateSourceAndEndpoint(settings *internal.DialSettings) (cert.Source, string, error) { + clientCertSource, err := getClientCertificateSource(settings) + if err != nil { + return nil, "", err + } + endpoint, err := getEndpoint(settings, clientCertSource) + if err != nil { + return nil, "", err + } + return clientCertSource, endpoint, nil +} + +// getClientCertificateSource returns a default client certificate source, if +// not provided by the user. +// +// A nil default source can be returned if the source does not exist. Any exceptions +// encountered while initializing the default source will be reported as client +// error (ex. corrupt metadata file). +// +// Important Note: For now, the environment variable GOOGLE_API_USE_CLIENT_CERTIFICATE +// must be set to "true" to allow certificate to be used (including user provided +// certificates). For details, see AIP-4114. +func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) { + if !isClientCertificateEnabled() { + return nil, nil + } else if settings.HTTPClient != nil { + return nil, nil // HTTPClient is incompatible with ClientCertificateSource + } else if settings.ClientCertSource != nil { + return settings.ClientCertSource, nil + } else { + return cert.DefaultSource() + } +} + +func isClientCertificateEnabled() bool { + useClientCert := os.Getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE") + // TODO(andyrzhao): Update default to return "true" after DCA feature is fully released. + return strings.ToLower(useClientCert) == "true" +} + +// getEndpoint returns the endpoint for the service, taking into account the +// user-provided endpoint override "settings.Endpoint". +// +// If no endpoint override is specified, we will either return the default endpoint or +// the default mTLS endpoint if a client certificate is available. +// +// You can override the default endpoint choice (mtls vs. regular) by setting the +// GOOGLE_API_USE_MTLS_ENDPOINT environment variable. +// +// If the endpoint override is an address (host:port) rather than full base +// URL (ex. https://...), then the user-provided address will be merged into +// the default endpoint. For example, WithEndpoint("myhost:8000") and +// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" +func getEndpoint(settings *internal.DialSettings, clientCertSource cert.Source) (string, error) { + if settings.Endpoint == "" { + mtlsMode := getMTLSMode() + if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + return settings.DefaultMTLSEndpoint, nil + } + return settings.DefaultEndpoint, nil + } + if strings.Contains(settings.Endpoint, "://") { + // User passed in a full URL path, use it verbatim. + return settings.Endpoint, nil + } + if settings.DefaultEndpoint == "" { + // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. + // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. + return settings.Endpoint, nil + } + + // Assume user-provided endpoint is host[:port], merge it with the default endpoint. + return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) +} + +func getMTLSMode() string { + mode := os.Getenv("GOOGLE_API_USE_MTLS_ENDPOINT") + if mode == "" { + mode = os.Getenv("GOOGLE_API_USE_MTLS") // Deprecated. + } + if mode == "" { + return mTLSModeAuto + } + return strings.ToLower(mode) +} + +func mergeEndpoints(baseURL, newHost string) (string, error) { + u, err := url.Parse(fixScheme(baseURL)) + if err != nil { + return "", err + } + return strings.Replace(baseURL, u.Host, newHost, 1), nil +} + +func fixScheme(baseURL string) string { + if !strings.Contains(baseURL, "://") { + return "https://" + baseURL + } + return baseURL +} diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md new file mode 100644 index 00000000000..289693613cc --- /dev/null +++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md @@ -0,0 +1,88 @@ +# Contributing + +1. Sign one of the contributor license agreements below. +1. Get the package: + + `go get -d google.golang.org/appengine` +1. Change into the checked out source: + + `cd $GOPATH/src/google.golang.org/appengine` +1. Fork the repo. +1. Set your fork as a remote: + + `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git` +1. Make changes, commit to your fork. +1. Send a pull request with your changes. + The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request. + +# Testing + +## Running system tests + +Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. + +Run tests with `go test`: + +``` +go test -v google.golang.org/appengine/... +``` + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your work**, +then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md new file mode 100644 index 00000000000..5ccddd9990d --- /dev/null +++ b/vendor/google.golang.org/appengine/README.md @@ -0,0 +1,100 @@ +# Go App Engine packages + +[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml) + +This repository supports the Go runtime on *App Engine standard*. +It provides APIs for interacting with App Engine services. +Its canonical import path is `google.golang.org/appengine`. + +See https://cloud.google.com/appengine/docs/go/ +for more information. + +File issue reports and feature requests on the [GitHub's issue +tracker](https://github.com/golang/appengine/issues). + +## Upgrading an App Engine app to the flexible environment + +This package does not work on *App Engine flexible*. + +There are many differences between the App Engine standard environment and +the flexible environment. + +See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading). + +## Directory structure + +The top level directory of this repository is the `appengine` package. It +contains the +basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API +packages are in subdirectories (e.g. `datastore`). + +There is an `internal` subdirectory that contains service protocol buffers, +plus packages required for connectivity to make API calls. App Engine apps +should not directly import any package under `internal`. + +## Updating from legacy (`import "appengine"`) packages + +If you're currently using the bare `appengine` packages +(that is, not these ones, imported via `google.golang.org/appengine`), +then you can use the `aefix` tool to help automate an upgrade to these packages. + +Run `go get google.golang.org/appengine/cmd/aefix` to install it. + +### 1. Update import paths + +The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. +You will need to update your code to use import paths starting with that; for instance, +code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. + +### 2. Update code using deprecated, removed or modified APIs + +Most App Engine services are available with exactly the same API. +A few APIs were cleaned up, and there are some differences: + +* `appengine.Context` has been replaced with the `Context` type from `context`. +* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. +* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. +* `appengine.Datacenter` now takes a `context.Context` argument. +* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. +* `delay.Call` now returns an error. +* `search.FieldLoadSaver` now handles document metadata. +* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the + `context.Context` instead. +* `aetest` no longer declares its own Context type, and uses the standard one instead. +* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been + deprecated and unused for a long time. +* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. + Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. +* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. + Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the + feature you require is not present in the new + [blobstore package](https://google.golang.org/appengine/blobstore). +* `appengine/socket` is not required on App Engine flexible environment / Managed VMs. + Use the standard `net` package instead. + +## Key Encode/Decode compatibility to help with datastore library migrations + +Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. +The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. + +### Enabling key conversion + +Enable key conversion by calling `EnableKeyConversion(ctx)` in the `/_ah/start` handler for basic and manual scaling or any handler in automatic scaling. + +#### 1. Basic or manual scaling + +This start handler will enable key conversion for all handlers in the service. + +``` +http.HandleFunc("/_ah/start", func(w http.ResponseWriter, r *http.Request) { + datastore.EnableKeyConversion(appengine.NewContext(r)) +}) +``` + +#### 2. Automatic scaling + +`/_ah/start` is not supported for automatic scaling and `/_ah/warmup` is not guaranteed to run, so you must call `datastore.EnableKeyConversion(appengine.NewContext(r))` +before you use code that needs key conversion. + +You may want to add this to each of your handlers, or introduce middleware where it's called. +`EnableKeyConversion` is safe for concurrent use. Any call to it after the first is ignored. \ No newline at end of file diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go new file mode 100644 index 00000000000..35ba9c89676 --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine.go @@ -0,0 +1,138 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package appengine provides basic functionality for Google App Engine. +// +// For more information on how to write Go apps for Google App Engine, see: +// https://cloud.google.com/appengine/docs/go/ +package appengine // import "google.golang.org/appengine" + +import ( + "context" + "net/http" + + "github.com/golang/protobuf/proto" + + "google.golang.org/appengine/internal" +) + +// The gophers party all night; the rabbits provide the beats. + +// Main is the principal entry point for an app running in App Engine. +// +// On App Engine Flexible it installs a trivial health checker if one isn't +// already registered, and starts listening on port 8080 (overridden by the +// $PORT environment variable). +// +// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests +// for details on how to do your own health checking. +// +// On App Engine Standard it ensures the server has started and is prepared to +// receive requests. +// +// Main never returns. +// +// Main is designed so that the app's main package looks like this: +// +// package main +// +// import ( +// "google.golang.org/appengine" +// +// _ "myapp/package0" +// _ "myapp/package1" +// ) +// +// func main() { +// appengine.Main() +// } +// +// The "myapp/packageX" packages are expected to register HTTP handlers +// in their init functions. +func Main() { + internal.Main() +} + +// Middleware wraps an http handler so that it can make GAE API calls +var Middleware func(http.Handler) http.Handler = internal.Middleware + +// IsDevAppServer reports whether the App Engine app is running in the +// development App Server. +func IsDevAppServer() bool { + return internal.IsDevAppServer() +} + +// IsStandard reports whether the App Engine app is running in the standard +// environment. This includes both the first generation runtimes (<= Go 1.9) +// and the second generation runtimes (>= Go 1.11). +func IsStandard() bool { + return internal.IsStandard() +} + +// IsFlex reports whether the App Engine app is running in the flexible environment. +func IsFlex() bool { + return internal.IsFlex() +} + +// IsAppEngine reports whether the App Engine app is running on App Engine, in either +// the standard or flexible environment. +func IsAppEngine() bool { + return internal.IsAppEngine() +} + +// IsSecondGen reports whether the App Engine app is running on the second generation +// runtimes (>= Go 1.11). +func IsSecondGen() bool { + return internal.IsSecondGen() +} + +// NewContext returns a context for an in-flight HTTP request. +// This function is cheap. +func NewContext(req *http.Request) context.Context { + return internal.ReqContext(req) +} + +// WithContext returns a copy of the parent context +// and associates it with an in-flight HTTP request. +// This function is cheap. +func WithContext(parent context.Context, req *http.Request) context.Context { + return internal.WithContext(parent, req) +} + +// BlobKey is a key for a blobstore blob. +// +// Conceptually, this type belongs in the blobstore package, but it lives in +// the appengine package to avoid a circular dependency: blobstore depends on +// datastore, and datastore needs to refer to the BlobKey type. +type BlobKey string + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +// APICallFunc defines a function type for handling an API call. +// See WithCallOverride. +type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error + +// WithAPICallFunc returns a copy of the parent context +// that will cause API calls to invoke f instead of their normal operation. +// +// This is intended for advanced users only. +func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { + return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) +} + +// APICall performs an API call. +// +// This is not intended for general use; it is exported for use in conjunction +// with WithAPICallFunc. +func APICall(ctx context.Context, service, method string, in, out proto.Message) error { + return internal.Call(ctx, service, method, in, out) +} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go new file mode 100644 index 00000000000..6e1d041cd95 --- /dev/null +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -0,0 +1,20 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +//go:build !appengine +// +build !appengine + +package appengine + +import ( + "context" +) + +// BackgroundContext returns a context not associated with a request. +// +// Deprecated: App Engine no longer has a special background context. +// Just use context.Background(). +func BackgroundContext() context.Context { + return context.Background() +} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go new file mode 100644 index 00000000000..16d0772e2a4 --- /dev/null +++ b/vendor/google.golang.org/appengine/errors.go @@ -0,0 +1,46 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// This file provides error functions for common API failure modes. + +package appengine + +import ( + "fmt" + + "google.golang.org/appengine/internal" +) + +// IsOverQuota reports whether err represents an API call failure +// due to insufficient available quota. +func IsOverQuota(err error) bool { + callErr, ok := err.(*internal.CallError) + return ok && callErr.Code == 4 +} + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go new file mode 100644 index 00000000000..1202fc1a531 --- /dev/null +++ b/vendor/google.golang.org/appengine/identity.go @@ -0,0 +1,141 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "context" + "time" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/app_identity" + modpb "google.golang.org/appengine/internal/modules" +) + +// AppID returns the application ID for the current application. +// The string will be a plain application ID (e.g. "appid"), with a +// domain prefix for custom domain deployments (e.g. "example.com:appid"). +func AppID(c context.Context) string { return internal.AppID(c) } + +// DefaultVersionHostname returns the standard hostname of the default version +// of the current application (e.g. "my-app.appspot.com"). This is suitable for +// use in constructing URLs. +func DefaultVersionHostname(c context.Context) string { + return internal.DefaultVersionHostname(c) +} + +// ModuleName returns the module name of the current instance. +func ModuleName(c context.Context) string { + return internal.ModuleName(c) +} + +// ModuleHostname returns a hostname of a module instance. +// If module is the empty string, it refers to the module of the current instance. +// If version is empty, it refers to the version of the current instance if valid, +// or the default version of the module of the current instance. +// If instance is empty, ModuleHostname returns the load-balancing hostname. +func ModuleHostname(c context.Context, module, version, instance string) (string, error) { + req := &modpb.GetHostnameRequest{} + if module != "" { + req.Module = &module + } + if version != "" { + req.Version = &version + } + if instance != "" { + req.Instance = &instance + } + res := &modpb.GetHostnameResponse{} + if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { + return "", err + } + return *res.Hostname, nil +} + +// VersionID returns the version ID for the current application. +// It will be of the form "X.Y", where X is specified in app.yaml, +// and Y is a number generated when each version of the app is uploaded. +// It does not include a module name. +func VersionID(c context.Context) string { return internal.VersionID(c) } + +// InstanceID returns a mostly-unique identifier for this instance. +func InstanceID() string { return internal.InstanceID() } + +// Datacenter returns an identifier for the datacenter that the instance is running in. +func Datacenter(c context.Context) string { return internal.Datacenter(c) } + +// ServerSoftware returns the App Engine release version. +// In production, it looks like "Google App Engine/X.Y.Z". +// In the development appserver, it looks like "Development/X.Y". +func ServerSoftware() string { return internal.ServerSoftware() } + +// RequestID returns a string that uniquely identifies the request. +func RequestID(c context.Context) string { return internal.RequestID(c) } + +// AccessToken generates an OAuth2 access token for the specified scopes on +// behalf of service account of this application. This token will expire after +// the returned time. +func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { + req := &pb.GetAccessTokenRequest{Scope: scopes} + res := &pb.GetAccessTokenResponse{} + + err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) + if err != nil { + return "", time.Time{}, err + } + return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil +} + +// Certificate represents a public certificate for the app. +type Certificate struct { + KeyName string + Data []byte // PEM-encoded X.509 certificate +} + +// PublicCertificates retrieves the public certificates for the app. +// They can be used to verify a signature returned by SignBytes. +func PublicCertificates(c context.Context) ([]Certificate, error) { + req := &pb.GetPublicCertificateForAppRequest{} + res := &pb.GetPublicCertificateForAppResponse{} + if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { + return nil, err + } + var cs []Certificate + for _, pc := range res.PublicCertificateList { + cs = append(cs, Certificate{ + KeyName: pc.GetKeyName(), + Data: []byte(pc.GetX509CertificatePem()), + }) + } + return cs, nil +} + +// ServiceAccount returns a string representing the service account name, in +// the form of an email address (typically app_id@appspot.gserviceaccount.com). +func ServiceAccount(c context.Context) (string, error) { + req := &pb.GetServiceAccountNameRequest{} + res := &pb.GetServiceAccountNameResponse{} + + err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) + if err != nil { + return "", err + } + return res.GetServiceAccountName(), err +} + +// SignBytes signs bytes using a private key unique to your application. +func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { + req := &pb.SignForAppRequest{BytesToSign: bytes} + res := &pb.SignForAppResponse{} + + if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { + return "", nil, err + } + return res.GetKeyName(), res.GetSignatureBytes(), nil +} + +func init() { + internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) + internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go new file mode 100644 index 00000000000..9a2ff77ab5d --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go @@ -0,0 +1,611 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto + +package app_identity + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AppIdentityServiceError_ErrorCode int32 + +const ( + AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 + AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 + AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 + AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 + AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 + AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 + AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 + AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 +) + +var AppIdentityServiceError_ErrorCode_name = map[int32]string{ + 0: "SUCCESS", + 9: "UNKNOWN_SCOPE", + 1000: "BLOB_TOO_LARGE", + 1001: "DEADLINE_EXCEEDED", + 1002: "NOT_A_VALID_APP", + 1003: "UNKNOWN_ERROR", + 1005: "NOT_ALLOWED", + 1006: "NOT_IMPLEMENTED", +} +var AppIdentityServiceError_ErrorCode_value = map[string]int32{ + "SUCCESS": 0, + "UNKNOWN_SCOPE": 9, + "BLOB_TOO_LARGE": 1000, + "DEADLINE_EXCEEDED": 1001, + "NOT_A_VALID_APP": 1002, + "UNKNOWN_ERROR": 1003, + "NOT_ALLOWED": 1005, + "NOT_IMPLEMENTED": 1006, +} + +func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { + p := new(AppIdentityServiceError_ErrorCode) + *p = x + return p +} +func (x AppIdentityServiceError_ErrorCode) String() string { + return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) +} +func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") + if err != nil { + return err + } + *x = AppIdentityServiceError_ErrorCode(value) + return nil +} +func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0} +} + +type AppIdentityServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } +func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } +func (*AppIdentityServiceError) ProtoMessage() {} +func (*AppIdentityServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0} +} +func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b) +} +func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic) +} +func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppIdentityServiceError.Merge(dst, src) +} +func (m *AppIdentityServiceError) XXX_Size() int { + return xxx_messageInfo_AppIdentityServiceError.Size(m) +} +func (m *AppIdentityServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo + +type SignForAppRequest struct { + BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } +func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } +func (*SignForAppRequest) ProtoMessage() {} +func (*SignForAppRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1} +} +func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b) +} +func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic) +} +func (dst *SignForAppRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignForAppRequest.Merge(dst, src) +} +func (m *SignForAppRequest) XXX_Size() int { + return xxx_messageInfo_SignForAppRequest.Size(m) +} +func (m *SignForAppRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SignForAppRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo + +func (m *SignForAppRequest) GetBytesToSign() []byte { + if m != nil { + return m.BytesToSign + } + return nil +} + +type SignForAppResponse struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` + SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } +func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } +func (*SignForAppResponse) ProtoMessage() {} +func (*SignForAppResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2} +} +func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b) +} +func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic) +} +func (dst *SignForAppResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignForAppResponse.Merge(dst, src) +} +func (m *SignForAppResponse) XXX_Size() int { + return xxx_messageInfo_SignForAppResponse.Size(m) +} +func (m *SignForAppResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SignForAppResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo + +func (m *SignForAppResponse) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *SignForAppResponse) GetSignatureBytes() []byte { + if m != nil { + return m.SignatureBytes + } + return nil +} + +type GetPublicCertificateForAppRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } +func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppRequest) ProtoMessage() {} +func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3} +} +func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b) +} +func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic) +} +func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src) +} +func (m *GetPublicCertificateForAppRequest) XXX_Size() int { + return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m) +} +func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo + +type PublicCertificate struct { + KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` + X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } +func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } +func (*PublicCertificate) ProtoMessage() {} +func (*PublicCertificate) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4} +} +func (m *PublicCertificate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PublicCertificate.Unmarshal(m, b) +} +func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic) +} +func (dst *PublicCertificate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PublicCertificate.Merge(dst, src) +} +func (m *PublicCertificate) XXX_Size() int { + return xxx_messageInfo_PublicCertificate.Size(m) +} +func (m *PublicCertificate) XXX_DiscardUnknown() { + xxx_messageInfo_PublicCertificate.DiscardUnknown(m) +} + +var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo + +func (m *PublicCertificate) GetKeyName() string { + if m != nil && m.KeyName != nil { + return *m.KeyName + } + return "" +} + +func (m *PublicCertificate) GetX509CertificatePem() string { + if m != nil && m.X509CertificatePem != nil { + return *m.X509CertificatePem + } + return "" +} + +type GetPublicCertificateForAppResponse struct { + PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"` + MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } +func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } +func (*GetPublicCertificateForAppResponse) ProtoMessage() {} +func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5} +} +func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b) +} +func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic) +} +func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src) +} +func (m *GetPublicCertificateForAppResponse) XXX_Size() int { + return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m) +} +func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo + +func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { + if m != nil { + return m.PublicCertificateList + } + return nil +} + +func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { + if m != nil && m.MaxClientCacheTimeInSecond != nil { + return *m.MaxClientCacheTimeInSecond + } + return 0 +} + +type GetServiceAccountNameRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } +func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameRequest) ProtoMessage() {} +func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6} +} +func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b) +} +func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src) +} +func (m *GetServiceAccountNameRequest) XXX_Size() int { + return xxx_messageInfo_GetServiceAccountNameRequest.Size(m) +} +func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo + +type GetServiceAccountNameResponse struct { + ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } +func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetServiceAccountNameResponse) ProtoMessage() {} +func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7} +} +func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b) +} +func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic) +} +func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src) +} +func (m *GetServiceAccountNameResponse) XXX_Size() int { + return xxx_messageInfo_GetServiceAccountNameResponse.Size(m) +} +func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo + +func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenRequest struct { + Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` + ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"` + ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } +func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenRequest) ProtoMessage() {} +func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8} +} +func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b) +} +func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic) +} +func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src) +} +func (m *GetAccessTokenRequest) XXX_Size() int { + return xxx_messageInfo_GetAccessTokenRequest.Size(m) +} +func (m *GetAccessTokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo + +func (m *GetAccessTokenRequest) GetScope() []string { + if m != nil { + return m.Scope + } + return nil +} + +func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { + if m != nil && m.ServiceAccountId != nil { + return *m.ServiceAccountId + } + return 0 +} + +func (m *GetAccessTokenRequest) GetServiceAccountName() string { + if m != nil && m.ServiceAccountName != nil { + return *m.ServiceAccountName + } + return "" +} + +type GetAccessTokenResponse struct { + AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"` + ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } +func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } +func (*GetAccessTokenResponse) ProtoMessage() {} +func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9} +} +func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b) +} +func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic) +} +func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src) +} +func (m *GetAccessTokenResponse) XXX_Size() int { + return xxx_messageInfo_GetAccessTokenResponse.Size(m) +} +func (m *GetAccessTokenResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo + +func (m *GetAccessTokenResponse) GetAccessToken() string { + if m != nil && m.AccessToken != nil { + return *m.AccessToken + } + return "" +} + +func (m *GetAccessTokenResponse) GetExpirationTime() int64 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type GetDefaultGcsBucketNameRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } +func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} +func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10} +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic) +} +func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int { + return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m) +} +func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo + +type GetDefaultGcsBucketNameResponse struct { + DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } +func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} +func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11} +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic) +} +func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int { + return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m) +} +func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo + +func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { + if m != nil && m.DefaultGcsBucketName != nil { + return *m.DefaultGcsBucketName + } + return "" +} + +func init() { + proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError") + proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest") + proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse") + proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest") + proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate") + proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse") + proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest") + proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse") + proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest") + proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse") + proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest") + proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4) +} + +var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{ + // 676 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58, + 0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e, + 0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a, + 0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f, + 0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37, + 0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87, + 0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c, + 0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e, + 0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a, + 0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9, + 0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2, + 0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1, + 0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d, + 0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4, + 0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b, + 0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71, + 0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d, + 0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf, + 0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd, + 0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30, + 0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79, + 0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66, + 0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea, + 0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a, + 0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34, + 0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe, + 0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38, + 0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42, + 0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde, + 0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84, + 0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8, + 0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc, + 0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92, + 0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14, + 0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08, + 0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79, + 0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b, + 0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f, + 0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa, + 0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1, + 0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc, + 0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38, + 0xf3, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto new file mode 100644 index 00000000000..19610ca5b75 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto @@ -0,0 +1,64 @@ +syntax = "proto2"; +option go_package = "app_identity"; + +package appengine; + +message AppIdentityServiceError { + enum ErrorCode { + SUCCESS = 0; + UNKNOWN_SCOPE = 9; + BLOB_TOO_LARGE = 1000; + DEADLINE_EXCEEDED = 1001; + NOT_A_VALID_APP = 1002; + UNKNOWN_ERROR = 1003; + NOT_ALLOWED = 1005; + NOT_IMPLEMENTED = 1006; + } +} + +message SignForAppRequest { + optional bytes bytes_to_sign = 1; +} + +message SignForAppResponse { + optional string key_name = 1; + optional bytes signature_bytes = 2; +} + +message GetPublicCertificateForAppRequest { +} + +message PublicCertificate { + optional string key_name = 1; + optional string x509_certificate_pem = 2; +} + +message GetPublicCertificateForAppResponse { + repeated PublicCertificate public_certificate_list = 1; + optional int64 max_client_cache_time_in_second = 2; +} + +message GetServiceAccountNameRequest { +} + +message GetServiceAccountNameResponse { + optional string service_account_name = 1; +} + +message GetAccessTokenRequest { + repeated string scope = 1; + optional int64 service_account_id = 2; + optional string service_account_name = 3; +} + +message GetAccessTokenResponse { + optional string access_token = 1; + optional int64 expiration_time = 2; +} + +message GetDefaultGcsBucketNameRequest { +} + +message GetDefaultGcsBucketNameResponse { + optional string default_gcs_bucket_name = 1; +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go new file mode 100644 index 00000000000..ddfc0c04a12 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go @@ -0,0 +1,786 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/modules/modules_service.proto + +package modules + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ModulesServiceError_ErrorCode int32 + +const ( + ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 + ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 + ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 + ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 + ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 + ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 +) + +var ModulesServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_MODULE", + 2: "INVALID_VERSION", + 3: "INVALID_INSTANCES", + 4: "TRANSIENT_ERROR", + 5: "UNEXPECTED_STATE", +} +var ModulesServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_MODULE": 1, + "INVALID_VERSION": 2, + "INVALID_INSTANCES": 3, + "TRANSIENT_ERROR": 4, + "UNEXPECTED_STATE": 5, +} + +func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { + p := new(ModulesServiceError_ErrorCode) + *p = x + return p +} +func (x ModulesServiceError_ErrorCode) String() string { + return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) +} +func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") + if err != nil { + return err + } + *x = ModulesServiceError_ErrorCode(value) + return nil +} +func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0} +} + +type ModulesServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } +func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } +func (*ModulesServiceError) ProtoMessage() {} +func (*ModulesServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0} +} +func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b) +} +func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic) +} +func (dst *ModulesServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_ModulesServiceError.Merge(dst, src) +} +func (m *ModulesServiceError) XXX_Size() int { + return xxx_messageInfo_ModulesServiceError.Size(m) +} +func (m *ModulesServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_ModulesServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo + +type GetModulesRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } +func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } +func (*GetModulesRequest) ProtoMessage() {} +func (*GetModulesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1} +} +func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b) +} +func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic) +} +func (dst *GetModulesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModulesRequest.Merge(dst, src) +} +func (m *GetModulesRequest) XXX_Size() int { + return xxx_messageInfo_GetModulesRequest.Size(m) +} +func (m *GetModulesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetModulesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo + +type GetModulesResponse struct { + Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } +func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } +func (*GetModulesResponse) ProtoMessage() {} +func (*GetModulesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2} +} +func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b) +} +func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic) +} +func (dst *GetModulesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetModulesResponse.Merge(dst, src) +} +func (m *GetModulesResponse) XXX_Size() int { + return xxx_messageInfo_GetModulesResponse.Size(m) +} +func (m *GetModulesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetModulesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo + +func (m *GetModulesResponse) GetModule() []string { + if m != nil { + return m.Module + } + return nil +} + +type GetVersionsRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } +func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionsRequest) ProtoMessage() {} +func (*GetVersionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3} +} +func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b) +} +func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic) +} +func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVersionsRequest.Merge(dst, src) +} +func (m *GetVersionsRequest) XXX_Size() int { + return xxx_messageInfo_GetVersionsRequest.Size(m) +} +func (m *GetVersionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo + +func (m *GetVersionsRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetVersionsResponse struct { + Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } +func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionsResponse) ProtoMessage() {} +func (*GetVersionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4} +} +func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b) +} +func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic) +} +func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetVersionsResponse.Merge(dst, src) +} +func (m *GetVersionsResponse) XXX_Size() int { + return xxx_messageInfo_GetVersionsResponse.Size(m) +} +func (m *GetVersionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo + +func (m *GetVersionsResponse) GetVersion() []string { + if m != nil { + return m.Version + } + return nil +} + +type GetDefaultVersionRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } +func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionRequest) ProtoMessage() {} +func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5} +} +func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b) +} +func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic) +} +func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src) +} +func (m *GetDefaultVersionRequest) XXX_Size() int { + return xxx_messageInfo_GetDefaultVersionRequest.Size(m) +} +func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo + +func (m *GetDefaultVersionRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +type GetDefaultVersionResponse struct { + Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } +func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetDefaultVersionResponse) ProtoMessage() {} +func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6} +} +func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b) +} +func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic) +} +func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src) +} +func (m *GetDefaultVersionResponse) XXX_Size() int { + return xxx_messageInfo_GetDefaultVersionResponse.Size(m) +} +func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo + +func (m *GetDefaultVersionResponse) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } +func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesRequest) ProtoMessage() {} +func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7} +} +func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b) +} +func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic) +} +func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src) +} +func (m *GetNumInstancesRequest) XXX_Size() int { + return xxx_messageInfo_GetNumInstancesRequest.Size(m) +} +func (m *GetNumInstancesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo + +func (m *GetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type GetNumInstancesResponse struct { + Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } +func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*GetNumInstancesResponse) ProtoMessage() {} +func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8} +} +func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b) +} +func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic) +} +func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src) +} +func (m *GetNumInstancesResponse) XXX_Size() int { + return xxx_messageInfo_GetNumInstancesResponse.Size(m) +} +func (m *GetNumInstancesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo + +func (m *GetNumInstancesResponse) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } +func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesRequest) ProtoMessage() {} +func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9} +} +func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b) +} +func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic) +} +func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src) +} +func (m *SetNumInstancesRequest) XXX_Size() int { + return xxx_messageInfo_SetNumInstancesRequest.Size(m) +} +func (m *SetNumInstancesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo + +func (m *SetNumInstancesRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *SetNumInstancesRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *SetNumInstancesRequest) GetInstances() int64 { + if m != nil && m.Instances != nil { + return *m.Instances + } + return 0 +} + +type SetNumInstancesResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } +func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } +func (*SetNumInstancesResponse) ProtoMessage() {} +func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10} +} +func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b) +} +func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic) +} +func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src) +} +func (m *SetNumInstancesResponse) XXX_Size() int { + return xxx_messageInfo_SetNumInstancesResponse.Size(m) +} +func (m *SetNumInstancesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo + +type StartModuleRequest struct { + Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } +func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StartModuleRequest) ProtoMessage() {} +func (*StartModuleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11} +} +func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b) +} +func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic) +} +func (dst *StartModuleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartModuleRequest.Merge(dst, src) +} +func (m *StartModuleRequest) XXX_Size() int { + return xxx_messageInfo_StartModuleRequest.Size(m) +} +func (m *StartModuleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StartModuleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo + +func (m *StartModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StartModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StartModuleResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } +func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StartModuleResponse) ProtoMessage() {} +func (*StartModuleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12} +} +func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b) +} +func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic) +} +func (dst *StartModuleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StartModuleResponse.Merge(dst, src) +} +func (m *StartModuleResponse) XXX_Size() int { + return xxx_messageInfo_StartModuleResponse.Size(m) +} +func (m *StartModuleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StartModuleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo + +type StopModuleRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } +func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } +func (*StopModuleRequest) ProtoMessage() {} +func (*StopModuleRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13} +} +func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b) +} +func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic) +} +func (dst *StopModuleRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopModuleRequest.Merge(dst, src) +} +func (m *StopModuleRequest) XXX_Size() int { + return xxx_messageInfo_StopModuleRequest.Size(m) +} +func (m *StopModuleRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StopModuleRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo + +func (m *StopModuleRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *StopModuleRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +type StopModuleResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } +func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } +func (*StopModuleResponse) ProtoMessage() {} +func (*StopModuleResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14} +} +func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b) +} +func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic) +} +func (dst *StopModuleResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StopModuleResponse.Merge(dst, src) +} +func (m *StopModuleResponse) XXX_Size() int { + return xxx_messageInfo_StopModuleResponse.Size(m) +} +func (m *StopModuleResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StopModuleResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo + +type GetHostnameRequest struct { + Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` + Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } +func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } +func (*GetHostnameRequest) ProtoMessage() {} +func (*GetHostnameRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15} +} +func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b) +} +func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic) +} +func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHostnameRequest.Merge(dst, src) +} +func (m *GetHostnameRequest) XXX_Size() int { + return xxx_messageInfo_GetHostnameRequest.Size(m) +} +func (m *GetHostnameRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo + +func (m *GetHostnameRequest) GetModule() string { + if m != nil && m.Module != nil { + return *m.Module + } + return "" +} + +func (m *GetHostnameRequest) GetVersion() string { + if m != nil && m.Version != nil { + return *m.Version + } + return "" +} + +func (m *GetHostnameRequest) GetInstance() string { + if m != nil && m.Instance != nil { + return *m.Instance + } + return "" +} + +type GetHostnameResponse struct { + Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } +func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } +func (*GetHostnameResponse) ProtoMessage() {} +func (*GetHostnameResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16} +} +func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b) +} +func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic) +} +func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetHostnameResponse.Merge(dst, src) +} +func (m *GetHostnameResponse) XXX_Size() int { + return xxx_messageInfo_GetHostnameResponse.Size(m) +} +func (m *GetHostnameResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo + +func (m *GetHostnameResponse) GetHostname() string { + if m != nil && m.Hostname != nil { + return *m.Hostname + } + return "" +} + +func init() { + proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError") + proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest") + proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse") + proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest") + proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse") + proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest") + proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse") + proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest") + proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse") + proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest") + proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse") + proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest") + proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse") + proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest") + proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse") + proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest") + proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a) +} + +var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{ + // 457 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30, + 0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c, + 0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a, + 0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6, + 0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e, + 0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79, + 0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c, + 0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05, + 0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8, + 0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34, + 0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16, + 0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd, + 0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72, + 0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f, + 0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36, + 0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b, + 0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41, + 0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8, + 0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad, + 0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8, + 0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39, + 0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec, + 0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc, + 0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda, + 0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea, + 0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd, + 0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18, + 0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto new file mode 100644 index 00000000000..d29f0065a2f --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto @@ -0,0 +1,80 @@ +syntax = "proto2"; +option go_package = "modules"; + +package appengine; + +message ModulesServiceError { + enum ErrorCode { + OK = 0; + INVALID_MODULE = 1; + INVALID_VERSION = 2; + INVALID_INSTANCES = 3; + TRANSIENT_ERROR = 4; + UNEXPECTED_STATE = 5; + } +} + +message GetModulesRequest { +} + +message GetModulesResponse { + repeated string module = 1; +} + +message GetVersionsRequest { + optional string module = 1; +} + +message GetVersionsResponse { + repeated string version = 1; +} + +message GetDefaultVersionRequest { + optional string module = 1; +} + +message GetDefaultVersionResponse { + required string version = 1; +} + +message GetNumInstancesRequest { + optional string module = 1; + optional string version = 2; +} + +message GetNumInstancesResponse { + required int64 instances = 1; +} + +message SetNumInstancesRequest { + optional string module = 1; + optional string version = 2; + required int64 instances = 3; +} + +message SetNumInstancesResponse {} + +message StartModuleRequest { + required string module = 1; + required string version = 2; +} + +message StartModuleResponse {} + +message StopModuleRequest { + optional string module = 1; + optional string version = 2; +} + +message StopModuleResponse {} + +message GetHostnameRequest { + optional string module = 1; + optional string version = 2; + optional string instance = 3; +} + +message GetHostnameResponse { + required string hostname = 1; +} + diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go new file mode 100644 index 00000000000..6f169be487d --- /dev/null +++ b/vendor/google.golang.org/appengine/namespace.go @@ -0,0 +1,24 @@ +// Copyright 2012 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import ( + "context" + "fmt" + "regexp" + + "google.golang.org/appengine/internal" +) + +// Namespace returns a replacement context that operates within the given namespace. +func Namespace(c context.Context, namespace string) (context.Context, error) { + if !validNamespace.MatchString(namespace) { + return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) + } + return internal.NamespacedContext(c, namespace), nil +} + +// validNamespace matches valid namespace names. +var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go new file mode 100644 index 00000000000..fcf3ad0a58f --- /dev/null +++ b/vendor/google.golang.org/appengine/timeout.go @@ -0,0 +1,20 @@ +// Copyright 2013 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package appengine + +import "context" + +// IsTimeoutError reports whether err is a timeout error. +func IsTimeoutError(err error) bool { + if err == context.DeadlineExceeded { + return true + } + if t, ok := err.(interface { + IsTimeout() bool + }); ok { + return t.IsTimeout() + } + return false +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go new file mode 100644 index 00000000000..cc5d52fbcc3 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -0,0 +1,336 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/rpc/code.proto + +package code + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The canonical error codes for gRPC APIs. +// +// Sometimes multiple error codes may apply. Services should return +// the most specific error code that applies. For example, prefer +// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. +// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. +type Code int32 + +const ( + // Not an error; returned on success. + // + // HTTP Mapping: 200 OK + Code_OK Code = 0 + // The operation was cancelled, typically by the caller. + // + // HTTP Mapping: 499 Client Closed Request + Code_CANCELLED Code = 1 + // Unknown error. For example, this error may be returned when + // a `Status` value received from another address space belongs to + // an error space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + // + // HTTP Mapping: 500 Internal Server Error + Code_UNKNOWN Code = 2 + // The client specified an invalid argument. Note that this differs + // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + // + // HTTP Mapping: 400 Bad Request + Code_INVALID_ARGUMENT Code = 3 + // The deadline expired before the operation could complete. For operations + // that change the state of the system, this error may be returned + // even if the operation has completed successfully. For example, a + // successful response from a server could have been delayed long + // enough for the deadline to expire. + // + // HTTP Mapping: 504 Gateway Timeout + Code_DEADLINE_EXCEEDED Code = 4 + // Some requested entity (e.g., file or directory) was not found. + // + // Note to server developers: if a request is denied for an entire class + // of users, such as gradual feature rollout or undocumented allowlist, + // `NOT_FOUND` may be used. If a request is denied for some users within + // a class of users, such as user-based access control, `PERMISSION_DENIED` + // must be used. + // + // HTTP Mapping: 404 Not Found + Code_NOT_FOUND Code = 5 + // The entity that a client attempted to create (e.g., file or directory) + // already exists. + // + // HTTP Mapping: 409 Conflict + Code_ALREADY_EXISTS Code = 6 + // The caller does not have permission to execute the specified + // operation. `PERMISSION_DENIED` must not be used for rejections + // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` + // instead for those errors). `PERMISSION_DENIED` must not be + // used if the caller can not be identified (use `UNAUTHENTICATED` + // instead for those errors). This error code does not imply the + // request is valid or the requested entity exists or satisfies + // other pre-conditions. + // + // HTTP Mapping: 403 Forbidden + Code_PERMISSION_DENIED Code = 7 + // The request does not have valid authentication credentials for the + // operation. + // + // HTTP Mapping: 401 Unauthorized + Code_UNAUTHENTICATED Code = 16 + // Some resource has been exhausted, perhaps a per-user quota, or + // perhaps the entire file system is out of space. + // + // HTTP Mapping: 429 Too Many Requests + Code_RESOURCE_EXHAUSTED Code = 8 + // The operation was rejected because the system is not in a state + // required for the operation's execution. For example, the directory + // to be deleted is non-empty, an rmdir operation is applied to + // a non-directory, etc. + // + // Service implementors can use the following guidelines to decide + // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: + // + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level. For + // example, when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence. + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. For example, if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. + // + // HTTP Mapping: 400 Bad Request + Code_FAILED_PRECONDITION Code = 9 + // The operation was aborted, typically due to a concurrency issue such as + // a sequencer check failure or transaction abort. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 409 Conflict + Code_ABORTED Code = 10 + // The operation was attempted past the valid range. E.g., seeking or + // reading past end-of-file. + // + // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate `INVALID_ARGUMENT` if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // `OUT_OF_RANGE` if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between `FAILED_PRECONDITION` and + // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an `OUT_OF_RANGE` error to detect when + // they are done. + // + // HTTP Mapping: 400 Bad Request + Code_OUT_OF_RANGE Code = 11 + // The operation is not implemented or is not supported/enabled in this + // service. + // + // HTTP Mapping: 501 Not Implemented + Code_UNIMPLEMENTED Code = 12 + // Internal errors. This means that some invariants expected by the + // underlying system have been broken. This error code is reserved + // for serious errors. + // + // HTTP Mapping: 500 Internal Server Error + Code_INTERNAL Code = 13 + // The service is currently unavailable. This is most likely a + // transient condition, which can be corrected by retrying with + // a backoff. Note that it is not always safe to retry + // non-idempotent operations. + // + // See the guidelines above for deciding between `FAILED_PRECONDITION`, + // `ABORTED`, and `UNAVAILABLE`. + // + // HTTP Mapping: 503 Service Unavailable + Code_UNAVAILABLE Code = 14 + // Unrecoverable data loss or corruption. + // + // HTTP Mapping: 500 Internal Server Error + Code_DATA_LOSS Code = 15 +) + +// Enum value maps for Code. +var ( + Code_name = map[int32]string{ + 0: "OK", + 1: "CANCELLED", + 2: "UNKNOWN", + 3: "INVALID_ARGUMENT", + 4: "DEADLINE_EXCEEDED", + 5: "NOT_FOUND", + 6: "ALREADY_EXISTS", + 7: "PERMISSION_DENIED", + 16: "UNAUTHENTICATED", + 8: "RESOURCE_EXHAUSTED", + 9: "FAILED_PRECONDITION", + 10: "ABORTED", + 11: "OUT_OF_RANGE", + 12: "UNIMPLEMENTED", + 13: "INTERNAL", + 14: "UNAVAILABLE", + 15: "DATA_LOSS", + } + Code_value = map[string]int32{ + "OK": 0, + "CANCELLED": 1, + "UNKNOWN": 2, + "INVALID_ARGUMENT": 3, + "DEADLINE_EXCEEDED": 4, + "NOT_FOUND": 5, + "ALREADY_EXISTS": 6, + "PERMISSION_DENIED": 7, + "UNAUTHENTICATED": 16, + "RESOURCE_EXHAUSTED": 8, + "FAILED_PRECONDITION": 9, + "ABORTED": 10, + "OUT_OF_RANGE": 11, + "UNIMPLEMENTED": 12, + "INTERNAL": 13, + "UNAVAILABLE": 14, + "DATA_LOSS": 15, + } +) + +func (x Code) Enum() *Code { + p := new(Code) + *p = x + return p +} + +func (x Code) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Code) Descriptor() protoreflect.EnumDescriptor { + return file_google_rpc_code_proto_enumTypes[0].Descriptor() +} + +func (Code) Type() protoreflect.EnumType { + return &file_google_rpc_code_proto_enumTypes[0] +} + +func (x Code) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Code.Descriptor instead. +func (Code) EnumDescriptor() ([]byte, []int) { + return file_google_rpc_code_proto_rawDescGZIP(), []int{0} +} + +var File_google_rpc_code_proto protoreflect.FileDescriptor + +var file_google_rpc_code_proto_rawDesc = []byte{ + 0x0a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2a, 0xb7, 0x02, 0x0a, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x06, 0x0a, 0x02, + 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, + 0x12, 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, + 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, + 0x4e, 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, + 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, + 0x41, 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, + 0x12, 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, + 0x45, 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54, + 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x12, 0x16, 0x0a, 0x12, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, + 0x45, 0x44, 0x10, 0x08, 0x12, 0x17, 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, + 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, + 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, + 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, + 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, + 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, + 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, + 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x42, 0x58, 0x0a, + 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, + 0x09, 0x43, 0x6f, 0x64, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, + 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3b, 0x63, 0x6f, 0x64, + 0x65, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_code_proto_rawDescOnce sync.Once + file_google_rpc_code_proto_rawDescData = file_google_rpc_code_proto_rawDesc +) + +func file_google_rpc_code_proto_rawDescGZIP() []byte { + file_google_rpc_code_proto_rawDescOnce.Do(func() { + file_google_rpc_code_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_code_proto_rawDescData) + }) + return file_google_rpc_code_proto_rawDescData +} + +var file_google_rpc_code_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_rpc_code_proto_goTypes = []interface{}{ + (Code)(0), // 0: google.rpc.Code +} +var file_google_rpc_code_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_rpc_code_proto_init() } +func file_google_rpc_code_proto_init() { + if File_google_rpc_code_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_code_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_code_proto_goTypes, + DependencyIndexes: file_google_rpc_code_proto_depIdxs, + EnumInfos: file_google_rpc_code_proto_enumTypes, + }.Build() + File_google_rpc_code_proto = out.File + file_google_rpc_code_proto_rawDesc = nil + file_google_rpc_code_proto_goTypes = nil + file_google_rpc_code_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go new file mode 100644 index 00000000000..7bd161e48ad --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -0,0 +1,1314 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.9 +// source: google/rpc/error_details.proto + +package errdetails + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ErrorInfo) Reset() { + *x = ErrorInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorInfo) ProtoMessage() {} + +func (x *ErrorInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} +} + +func (x *ErrorInfo) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ErrorInfo) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ErrorInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +// Describes when the clients can retry a failed request. Clients could ignore +// the recommendation here or retry when this information is missing from error +// responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retries have been reached or a maximum retry delay cap has been +// reached. +type RetryInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Clients should wait at least this long between retrying the same request. + RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` +} + +func (x *RetryInfo) Reset() { + *x = RetryInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryInfo) ProtoMessage() {} + +func (x *RetryInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. +func (*RetryInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} +} + +func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { + if x != nil { + return x.RetryDelay + } + return nil +} + +// Describes additional debugging info. +type DebugInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The stack trace entries indicating where the error occurred. + StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` + // Additional debugging information provided by the server. + Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` +} + +func (x *DebugInfo) Reset() { + *x = DebugInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DebugInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DebugInfo) ProtoMessage() {} + +func (x *DebugInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. +func (*DebugInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} +} + +func (x *DebugInfo) GetStackEntries() []string { + if x != nil { + return x.StackEntries + } + return nil +} + +func (x *DebugInfo) GetDetail() string { + if x != nil { + return x.Detail + } + return "" +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryInfo and Help types for other details about handling a +// quota failure. +type QuotaFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all quota violations. + Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` +} + +func (x *QuotaFailure) Reset() { + *x = QuotaFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure) ProtoMessage() {} + +func (x *QuotaFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. +func (*QuotaFailure) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} +} + +func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { + if x != nil { + return x.Violations + } + return nil +} + +// Describes what preconditions have failed. +// +// For example, if an RPC failed because it required the Terms of Service to be +// acknowledged, it could list the terms of service violation in the +// PreconditionFailure message. +type PreconditionFailure struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all precondition violations. + Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` +} + +func (x *PreconditionFailure) Reset() { + *x = PreconditionFailure{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure) ProtoMessage() {} + +func (x *PreconditionFailure) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead. +func (*PreconditionFailure) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4} +} + +func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { + if x != nil { + return x.Violations + } + return nil +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +type BadRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all violations in a client request. + FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` +} + +func (x *BadRequest) Reset() { + *x = BadRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest) ProtoMessage() {} + +func (x *BadRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead. +func (*BadRequest) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5} +} + +func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { + if x != nil { + return x.FieldViolations + } + return nil +} + +// Contains metadata about the request that clients can attach when filing a bug +// or providing other forms of feedback. +type RequestInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An opaque string that should only be interpreted by the service generating + // it. For example, it can be used to identify requests in the service's logs. + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` +} + +func (x *RequestInfo) Reset() { + *x = RequestInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RequestInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestInfo) ProtoMessage() {} + +func (x *RequestInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead. +func (*RequestInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6} +} + +func (x *RequestInfo) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *RequestInfo) GetServingData() string { + if x != nil { + return x.ServingData + } + return "" +} + +// Describes the resource that is being accessed. +type ResourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A name for the type of resource being accessed, e.g. "sql table", + // "cloud storage bucket", "file", "Google calendar"; or the type URL + // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". + ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + // The name of the resource being accessed. For example, a shared calendar + // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + // The owner of the resource (optional). + // For example, "user:" or "project:". + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + // Describes what error is encountered when accessing this resource. + // For example, updating a cloud project may require the `writer` permission + // on the developer console project. + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *ResourceInfo) Reset() { + *x = ResourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceInfo) ProtoMessage() {} + +func (x *ResourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead. +func (*ResourceInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7} +} + +func (x *ResourceInfo) GetResourceType() string { + if x != nil { + return x.ResourceType + } + return "" +} + +func (x *ResourceInfo) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +func (x *ResourceInfo) GetOwner() string { + if x != nil { + return x.Owner + } + return "" +} + +func (x *ResourceInfo) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +type Help struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // URL(s) pointing to additional information on handling the current error. + Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` +} + +func (x *Help) Reset() { + *x = Help{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help) ProtoMessage() {} + +func (x *Help) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help.ProtoReflect.Descriptor instead. +func (*Help) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8} +} + +func (x *Help) GetLinks() []*Help_Link { + if x != nil { + return x.Links + } + return nil +} + +// Provides a localized error message that is safe to return to the user +// which can be attached to an RPC error. +type LocalizedMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The locale used following the specification defined at + // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. + // Examples are: "en-US", "fr-CH", "es-MX" + Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` + // The localized error message in the above locale. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *LocalizedMessage) Reset() { + *x = LocalizedMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LocalizedMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LocalizedMessage) ProtoMessage() {} + +func (x *LocalizedMessage) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead. +func (*LocalizedMessage) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9} +} + +func (x *LocalizedMessage) GetLocale() string { + if x != nil { + return x.Locale + } + return "" +} + +func (x *LocalizedMessage) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +// A message type used to describe a single quota violation. For example, a +// daily quota or a custom quota that was exceeded. +type QuotaFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The subject on which the quota check failed. + // For example, "clientip:" or "project:". + Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *QuotaFailure_Violation) Reset() { + *x = QuotaFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuotaFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuotaFailure_Violation) ProtoMessage() {} + +func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead. +func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *QuotaFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *QuotaFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single precondition failure. +type PreconditionFailure_Violation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of PreconditionFailure. We recommend using a service-specific + // enum type to define the supported precondition violation subjects. For + // example, "TOS" for "Terms of Service violation". + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The subject, relative to the type, that failed. + // For example, "google.com/cloud" relative to the "TOS" type would indicate + // which terms of service is being referenced. + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *PreconditionFailure_Violation) Reset() { + *x = PreconditionFailure_Violation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PreconditionFailure_Violation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PreconditionFailure_Violation) ProtoMessage() {} + +func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead. +func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0} +} + +func (x *PreconditionFailure_Violation) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PreconditionFailure_Violation) GetSubject() string { + if x != nil { + return x.Subject + } + return "" +} + +func (x *PreconditionFailure_Violation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// A message type used to describe a single bad request field. +type BadRequest_FieldViolation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A path that leads to a field in the request body. The value will be a + // sequence of dot-separated identifiers that identify a protocol buffer + // field. + // + // Consider the following: + // + // message CreateContactRequest { + // message EmailAddress { + // enum Type { + // TYPE_UNSPECIFIED = 0; + // HOME = 1; + // WORK = 2; + // } + // + // optional string email = 1; + // repeated EmailType type = 2; + // } + // + // string full_name = 1; + // repeated EmailAddress email_addresses = 2; + // } + // + // In this example, in proto `field` could take one of the following values: + // + // - `full_name` for a violation in the `full_name` value + // - `email_addresses[1].email` for a violation in the `email` field of the + // first `email_addresses` message + // - `email_addresses[3].type[2]` for a violation in the second `type` + // value in the third `email_addresses` message. + // + // In JSON, the same values are represented as: + // + // - `fullName` for a violation in the `fullName` value + // - `emailAddresses[1].email` for a violation in the `email` field of the + // first `emailAddresses` message + // - `emailAddresses[3].type[2]` for a violation in the second `type` + // value in the third `emailAddresses` message. + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + // A description of why the request element is bad. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *BadRequest_FieldViolation) Reset() { + *x = BadRequest_FieldViolation{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BadRequest_FieldViolation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BadRequest_FieldViolation) ProtoMessage() {} + +func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead. +func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0} +} + +func (x *BadRequest_FieldViolation) GetField() string { + if x != nil { + return x.Field + } + return "" +} + +func (x *BadRequest_FieldViolation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +// Describes a URL link. +type Help_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes what the link offers. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The URL of the link. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *Help_Link) Reset() { + *x = Help_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Help_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Help_Link) ProtoMessage() {} + +func (x *Help_Link) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead. +func (*Help_Link) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *Help_Link) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Help_Link) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +var File_google_rpc_error_details_proto protoreflect.FileDescriptor + +var file_google_rpc_error_details_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a, + 0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x72, + 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x64, + 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, + 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x9b, 0x01, 0x0a, 0x0c, + 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a, + 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75, + 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x47, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xbd, 0x01, 0x0a, 0x13, 0x50, 0x72, + 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, + 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, + 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x5b, 0x0a, 0x09, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, + 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, + 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, + 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, + 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, + 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, + 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, + 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, + 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, + 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, + 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_rpc_error_details_proto_rawDescOnce sync.Once + file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc +) + +func file_google_rpc_error_details_proto_rawDescGZIP() []byte { + file_google_rpc_error_details_proto_rawDescOnce.Do(func() { + file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData) + }) + return file_google_rpc_error_details_proto_rawDescData +} + +var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_google_rpc_error_details_proto_goTypes = []interface{}{ + (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo + (*RetryInfo)(nil), // 1: google.rpc.RetryInfo + (*DebugInfo)(nil), // 2: google.rpc.DebugInfo + (*QuotaFailure)(nil), // 3: google.rpc.QuotaFailure + (*PreconditionFailure)(nil), // 4: google.rpc.PreconditionFailure + (*BadRequest)(nil), // 5: google.rpc.BadRequest + (*RequestInfo)(nil), // 6: google.rpc.RequestInfo + (*ResourceInfo)(nil), // 7: google.rpc.ResourceInfo + (*Help)(nil), // 8: google.rpc.Help + (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage + nil, // 10: google.rpc.ErrorInfo.MetadataEntry + (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation + (*PreconditionFailure_Violation)(nil), // 12: google.rpc.PreconditionFailure.Violation + (*BadRequest_FieldViolation)(nil), // 13: google.rpc.BadRequest.FieldViolation + (*Help_Link)(nil), // 14: google.rpc.Help.Link + (*durationpb.Duration)(nil), // 15: google.protobuf.Duration +} +var file_google_rpc_error_details_proto_depIdxs = []int32{ + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation + 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation + 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation + 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_rpc_error_details_proto_init() } +func file_google_rpc_error_details_proto_init() { + if File_google_rpc_error_details_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DebugInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RequestInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LocalizedMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuotaFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PreconditionFailure_Violation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BadRequest_FieldViolation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Help_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_rpc_error_details_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_rpc_error_details_proto_goTypes, + DependencyIndexes: file_google_rpc_error_details_proto_depIdxs, + MessageInfos: file_google_rpc_error_details_proto_msgTypes, + }.Build() + File_google_rpc_error_details_proto = out.File + file_google_rpc_error_details_proto_rawDesc = nil + file_google_rpc_error_details_proto_goTypes = nil + file_google_rpc_error_details_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index d79560a2e26..f391744f729 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -54,13 +54,14 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { - if strings.ToLower(b.Name()) != b.Name() { + name := strings.ToLower(b.Name()) + if name != b.Name() { // TODO: Skip the use of strings.ToLower() to index the map after v1.59 // is released to switch to case sensitive balancer registry. Also, // remove this warning and update the docstrings for Register and Get. logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) } - m[strings.ToLower(b.Name())] = b + m[name] = b } // unregisterForTesting deletes the balancer with the given name from the @@ -232,8 +233,8 @@ type BuildOptions struct { // implementations which do not communicate with a remote load balancer // server can ignore this field. Authority string - // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID *channelz.Identifier + // ChannelzParent is the parent ClientConn's channelz channel. + ChannelzParent channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go index b5e30cff021..af39b8a4c73 100644 --- a/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -21,7 +21,6 @@ package grpc import ( "context" "fmt" - "strings" "sync" "google.golang.org/grpc/balancer" @@ -66,19 +65,20 @@ type ccBalancerWrapper struct { } // newCCBalancerWrapper creates a new balancer wrapper in idle state. The -// underlying balancer is not created until the switchTo() method is invoked. +// underlying balancer is not created until the updateClientConnState() method +// is invoked. func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ cc: cc, opts: balancer.BuildOptions{ - DialCreds: cc.dopts.copts.TransportCredentials, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParent: cc.channelz, + Target: cc.parsedTarget, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -97,6 +97,11 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat if ctx.Err() != nil || ccb.balancer == nil { return } + name := gracefulswitch.ChildName(ccs.BalancerConfig) + if ccb.curBalancerName != name { + ccb.curBalancerName = name + channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name) + } err := ccb.balancer.UpdateClientConnState(*ccs) if logger.V(2) && err != nil { logger.Infof("error from balancer.UpdateClientConnState: %v", err) @@ -120,54 +125,6 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { }) } -// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the -// LB policy identified by name. -// -// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the -// first good update from the name resolver, it determines the LB policy to use -// and invokes the switchTo() method. Upon receipt of every subsequent update -// from the name resolver, it invokes this method. -// -// the ccBalancerWrapper keeps track of the current LB policy name, and skips -// the graceful balancer switching process if the name does not change. -func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.serializer.Schedule(func(ctx context.Context) { - if ctx.Err() != nil || ccb.balancer == nil { - return - } - // TODO: Other languages use case-sensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - ccb.buildLoadBalancingPolicy(name) - }) -} - -// buildLoadBalancingPolicy performs the following: -// - retrieve a balancer builder for the given name. Use the default LB -// policy, pick_first, if no LB policy with name is found in the registry. -// - instruct the gracefulswitch balancer to switch to the above builder. This -// will actually build the new balancer. -// - update the `curBalancerName` field -// -// Must be called from a serializer callback. -func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) - builder = newPickfirstBuilder() - } else { - channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) - } - - if err := ccb.balancer.SwitchTo(builder); err != nil { - channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) - return - } - ccb.curBalancerName = builder.Name() -} - // close initiates async shutdown of the wrapper. cc.mu must be held when // calling this function. To determine the wrapper has finished shutting down, // the channel should block on ccb.serializer.Done() without cc.mu held. @@ -175,7 +132,7 @@ func (ccb *ccBalancerWrapper) close() { ccb.mu.Lock() ccb.closed = true ccb.mu.Unlock() - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing") ccb.serializer.Schedule(func(context.Context) { if ccb.balancer == nil { return @@ -212,7 +169,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer } ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { - channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) + channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ @@ -304,7 +261,7 @@ func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { } func (acbw *acBalancerWrapper) String() string { - return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID) } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index f0b7f3200f1..e3eb44d58b7 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -67,7 +67,7 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") - // errConnIdling indicates the the connection is being closed as the channel + // errConnIdling indicates the connection is being closed as the channel // is moving to an idle mode due to inactivity. errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default @@ -101,11 +101,6 @@ const ( defaultReadBufSize = 32 * 1024 ) -// Dial creates a client connection to the given target. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - return DialContext(context.Background(), target, opts...) -} - type defaultConfigSelector struct { sc *ServiceConfig } @@ -117,13 +112,22 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// newClient returns a new client in idle mode. -func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { +// NewClient creates a new gRPC "channel" for the target URI provided. No I/O +// is performed. Use of the ClientConn for RPCs will automatically cause it to +// connect. Connect may be used to manually create a connection, but for most +// users this is unnecessary. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns +// resolver, a "dns:///" prefix should be applied to the target. +// +// The DialOptions returned by WithBlock, WithTimeout, and +// WithReturnConnectionError are ignored by this function. +func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), - czData: new(channelzData), } cc.retryThrottler.Store((*retryThrottler)(nil)) @@ -175,15 +179,15 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) // Determine the resolver to use. if err := cc.parseTargetAndFindResolver(); err != nil { - channelz.RemoveEntry(cc.channelzID) + channelz.RemoveEntry(cc.channelz.ID) return nil, err } if err = cc.determineAuthority(); err != nil { - channelz.RemoveEntry(cc.channelzID) + channelz.RemoveEntry(cc.channelz.ID) return nil, err } - cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz) cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. @@ -191,39 +195,36 @@ func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) return cc, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. +// Dial calls DialContext(context.Background(), target, opts...). // -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. +// Deprecated: use NewClient instead. Will be supported throughout 1.x. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext calls NewClient and then exits idle mode. If WithBlock(true) is +// used, it calls Connect and WaitForStateChange until either the context +// expires or the state of the ClientConn is Ready. // -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. +// One subtle difference between NewClient and Dial and DialContext is that the +// former uses "dns" as the default name resolver, while the latter use +// "passthrough" for backward compatibility. This distinction should not matter +// to most users, but could matter to legacy users that specify a custom dialer +// and expect it to receive the target string directly. // -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +// Deprecated: use NewClient instead. Will be supported throughout 1.x. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { - cc, err := newClient(target, opts...) + // At the end of this method, we kick the channel out of idle, rather than + // waiting for the first rpc. + opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + cc, err := NewClient(target, opts...) if err != nil { return nil, err } // We start the channel off in idle mode, but kick it out of idle now, - // instead of waiting for the first RPC. Other gRPC implementations do wait - // for the first RPC to kick the channel out of idle. But doing so would be - // a major behavior change for our users who are used to seeing the channel - // active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, i.e. by making newClient exported. - + // instead of waiting for the first RPC. This is the legacy behavior of + // Dial. defer func() { if err != nil { cc.Close() @@ -291,17 +292,17 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // addTraceEvent is a helper method to add a trace event on the channel. If the // channel is a nested one, the same event is also added on the parent channel. func (cc *ClientConn) addTraceEvent(msg string) { - ted := &channelz.TraceEventDesc{ + ted := &channelz.TraceEvent{ Desc: fmt.Sprintf("Channel %s", msg), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + if cc.dopts.channelzParent != nil { + ted.Parent = &channelz.TraceEvent{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg), Severity: channelz.CtInfo, } } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + channelz.AddTraceEvent(logger, cc.channelz, 0, ted) } type idler ClientConn @@ -418,14 +419,15 @@ func (cc *ClientConn) validateTransportCredentials() error { } // channelzRegistration registers the newly created ClientConn with channelz and -// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. -// A channelz trace event is emitted for ClientConn creation. If the newly -// created ClientConn is a nested one, i.e a valid parent ClientConn ID is -// specified via a dial option, the trace event is also added to the parent. +// stores the returned identifier in `cc.channelz`. A channelz trace event is +// emitted for ClientConn creation. If the newly created ClientConn is a nested +// one, i.e a valid parent ClientConn ID is specified via a dial option, the +// trace event is also added to the parent. // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. func (cc *ClientConn) channelzRegistration(target string) { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel) + cc.channelz = channelz.RegisterChannel(parentChannel, target) cc.addTraceEvent("created") } @@ -492,11 +494,11 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } // newConnectivityStateManager creates an connectivityStateManager with -// the specified id. -func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { +// the specified channel. +func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager { return &connectivityStateManager{ - channelzID: id, - pubSub: grpcsync.NewPubSub(ctx), + channelz: channel, + pubSub: grpcsync.NewPubSub(ctx), } } @@ -510,7 +512,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID *channelz.Identifier + channelz *channelz.Channel pubSub *grpcsync.PubSub } @@ -527,9 +529,10 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.channelz.ChannelMetrics.State.Store(&state) csm.pubSub.Publish(state) - channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) + channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -583,12 +586,12 @@ type ClientConn struct { cancel context.CancelFunc // Cancelled on close. // The following are initialized at dial time, and are read-only after that. - target string // User's dial target. - parsedTarget resolver.Target // See parseTargetAndFindResolver(). - authority string // See determineAuthority(). - dopts dialOptions // Default and user specified dial options. - channelzID *channelz.Identifier // Channelz identifier for the channel. - resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelz *channelz.Channel // Channelz object. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't @@ -596,7 +599,6 @@ type ClientConn struct { csMgr *connectivityStateManager pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector - czData *channelzData retryThrottler atomic.Value // Updated from service config. // mu protects the following fields. @@ -690,6 +692,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error { var emptyServiceConfig *ServiceConfig func init() { + balancer.Register(pickfirstBuilder{}) cfg := parseServiceConfig("{}") if cfg.Err != nil { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) @@ -707,15 +710,15 @@ func init() { } } -func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { +func (cc *ClientConn) maybeApplyDefaultServiceConfig() { if cc.sc != nil { - cc.applyServiceConfigAndBalancer(cc.sc, nil, addrs) + cc.applyServiceConfigAndBalancer(cc.sc, nil) return } if cc.dopts.defaultServiceConfig != nil { - cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}, addrs) + cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig}) } else { - cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}, addrs) + cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig}) } } @@ -733,7 +736,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) // May need to apply the initial service config in case the resolver // doesn't support service configs, or doesn't provide a service config // with the new addresses. - cc.maybeApplyDefaultServiceConfig(nil) + cc.maybeApplyDefaultServiceConfig() cc.balancerWrapper.resolverError(err) @@ -744,10 +747,10 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) var ret error if cc.dopts.disableServiceConfig { - channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) - cc.maybeApplyDefaultServiceConfig(s.Addresses) + channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig) + cc.maybeApplyDefaultServiceConfig() } else if s.ServiceConfig == nil { - cc.maybeApplyDefaultServiceConfig(s.Addresses) + cc.maybeApplyDefaultServiceConfig() // TODO: do we need to apply a failing LB policy if there is no // default, per the error handling design? } else { @@ -755,12 +758,12 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) configSelector := iresolver.GetConfigSelector(s) if configSelector != nil { if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 { - channelz.Infof(logger, cc.channelzID, "method configs in service config will be ignored due to presence of config selector") + channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector") } } else { configSelector = &defaultConfigSelector{sc} } - cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) + cc.applyServiceConfigAndBalancer(sc, configSelector) } else { ret = balancer.ErrBadResolverState if cc.sc == nil { @@ -775,7 +778,7 @@ func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) var balCfg serviceconfig.LoadBalancingConfig if cc.sc != nil && cc.sc.lbConfig != nil { - balCfg = cc.sc.lbConfig.cfg + balCfg = cc.sc.lbConfig } bw := cc.balancerWrapper cc.mu.Unlock() @@ -834,22 +837,17 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, - czData: new(channelzData), + channelz: channelz.RegisterSubChannel(cc.channelz.ID, ""), resetBackoff: make(chan struct{}), stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - var err error - ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") - if err != nil { - return nil, err - } - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ Desc: "Subchannel created", Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID), Severity: channelz.CtInfo, }, }) @@ -872,38 +870,27 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { ac.tearDown(err) } -func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { - return &channelz.ChannelInternalMetric{ - State: cc.GetState(), - Target: cc.target, - CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)), - } -} - // Target returns the target string of the ClientConn. -// -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. func (cc *ClientConn) Target() string { return cc.target } +// CanonicalTarget returns the canonical target string of the ClientConn. +func (cc *ClientConn) CanonicalTarget() string { + return cc.parsedTarget.String() +} + func (cc *ClientConn) incrCallsStarted() { - atomic.AddInt64(&cc.czData.callsStarted, 1) - atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano()) + cc.channelz.ChannelMetrics.CallsStarted.Add(1) + cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } func (cc *ClientConn) incrCallsSucceeded() { - atomic.AddInt64(&cc.czData.callsSucceeded, 1) + cc.channelz.ChannelMetrics.CallsSucceeded.Add(1) } func (cc *ClientConn) incrCallsFailed() { - atomic.AddInt64(&cc.czData.callsFailed, 1) + cc.channelz.ChannelMetrics.CallsFailed.Add(1) } // connect starts creating a transport. @@ -947,7 +934,7 @@ func equalAddresses(a, b []resolver.Address) bool { // connections or connection attempts. func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) addrs = copyAddressesWithoutBalancerAttributes(addrs) if equalAddresses(ac.addrs, addrs) { @@ -1067,7 +1054,7 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st }) } -func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector, addrs []resolver.Address) { +func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) { if sc == nil { // should never reach here. return @@ -1088,17 +1075,6 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } else { cc.retryThrottler.Store((*retryThrottler)(nil)) } - - var newBalancerName string - if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { - // No service config or no LB policy specified in config. - newBalancerName = PickFirstBalancerName - } else if cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { // cc.sc.LB != nil - newBalancerName = *cc.sc.LB - } - cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1174,7 +1150,7 @@ func (cc *ClientConn) Close() error { // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. - channelz.RemoveEntry(cc.channelzID) + channelz.RemoveEntry(cc.channelz.ID) return nil } @@ -1206,8 +1182,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.SubChannel } // Note: this requires a lock on ac.mu. @@ -1219,10 +1194,11 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) close(ac.stateChan) ac.stateChan = make(chan struct{}) ac.state = s + ac.channelz.ChannelMetrics.State.Store(&s) if lastErr == nil { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s) } else { - channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) + channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } ac.acbw.updateState(s, lastErr) } @@ -1335,7 +1311,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c } ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) + channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr) err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { @@ -1388,7 +1364,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() - copts.ChannelzParentID = ac.channelzID + copts.ChannelzParent = ac.channelz newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { @@ -1397,7 +1373,7 @@ func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, } // newTr is either nil, or closed. hcancel() - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) + channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1469,7 +1445,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - channelz.Error(logger, ac.channelzID, "Health check is requested but health check function is not set.") + channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.") return } @@ -1499,9 +1475,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) + channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err) } } }() @@ -1566,18 +1542,18 @@ func (ac *addrConn) tearDown(err error) { ac.cancel() ac.curAddr = resolver.Address{} - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), + Parent: &channelz.TraceEvent{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID), Severity: channelz.CtInfo, }, }) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from // being deleted right away. - channelz.RemoveEntry(ac.channelzID) + channelz.RemoveEntry(ac.channelz.ID) ac.mu.Unlock() // We have to release the lock before the call to GracefulClose/Close here @@ -1604,39 +1580,6 @@ func (ac *addrConn) tearDown(err error) { } } -func (ac *addrConn) getState() connectivity.State { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { - ac.mu.Lock() - addr := ac.curAddr.Addr - ac.mu.Unlock() - return &channelz.ChannelInternalMetric{ - State: ac.getState(), - Target: addr, - CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)), - } -} - -func (ac *addrConn) incrCallsStarted() { - atomic.AddInt64(&ac.czData.callsStarted, 1) - atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano()) -} - -func (ac *addrConn) incrCallsSucceeded() { - atomic.AddInt64(&ac.czData.callsSucceeded, 1) -} - -func (ac *addrConn) incrCallsFailed() { - atomic.AddInt64(&ac.czData.callsFailed, 1) -} - type retryThrottler struct { max float64 thresh float64 @@ -1674,12 +1617,17 @@ func (rt *retryThrottler) successfulRPC() { } } -type channelzChannel struct { - cc *ClientConn +func (ac *addrConn) incrCallsStarted() { + ac.channelz.ChannelMetrics.CallsStarted.Add(1) + ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } -func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { - return c.cc.channelzMetric() +func (ac *addrConn) incrCallsSucceeded() { + ac.channelz.ChannelMetrics.CallsSucceeded.Add(1) +} + +func (ac *addrConn) incrCallsFailed() { + ac.channelz.ChannelMetrics.CallsFailed.Add(1) } // ErrClientConnTimeout indicates that the ClientConn cannot establish the @@ -1721,14 +1669,14 @@ func (cc *ClientConn) connectionError() error { // // Doesn't grab cc.mu as this method is expected to be called only at Dial time. func (cc *ClientConn) parseTargetAndFindResolver() error { - channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) + channelz.Infof(logger, cc.channelz, "original dial target is: %q", cc.target) var rb resolver.Builder parsedTarget, err := parseTarget(cc.target) if err != nil { - channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) + channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1740,17 +1688,22 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { // We are here because the user's dial target did not contain a scheme or // specified an unregistered scheme. We should fallback to the default // scheme, except when a custom dialer is specified in which case, we should - // always use passthrough scheme. - defScheme := resolver.GetDefaultScheme() - channelz.Infof(logger, cc.channelzID, "fallback to scheme %q", defScheme) + // always use passthrough scheme. For either case, we need to respect any overridden + // global defaults set by the user. + defScheme := cc.dopts.defaultScheme + if internal.UserSetDefaultScheme { + defScheme = resolver.GetDefaultScheme() + } + + channelz.Infof(logger, cc.channelz, "fallback to scheme %q", defScheme) canonicalTarget := defScheme + ":///" + cc.target parsedTarget, err = parseTarget(canonicalTarget) if err != nil { - channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) + channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", canonicalTarget, err) return err } - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelz, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) @@ -1873,6 +1826,6 @@ func (cc *ClientConn) determineAuthority() error { } else { cc.authority = encodeAuthority(endpoint) } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority) return nil } diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 5feac3aa0e4..f6b55c68b56 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -28,9 +28,9 @@ import ( "fmt" "net" - "github.com/golang/protobuf/proto" "google.golang.org/grpc/attributes" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/protobuf/protoadapt" ) // PerRPCCredentials defines the common interface for the credentials which need to @@ -287,5 +287,5 @@ type ChannelzSecurityValue interface { type OtherChannelzSecurityValue struct { ChannelzSecurityValue Name string - Value proto.Message + Value protoadapt.MessageV1 } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index ba242618040..402493224e0 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -68,7 +68,7 @@ type dialOptions struct { binaryLogger binarylog.Logger copts transport.ConnectOptions callOptions []CallOption - channelzParentID *channelz.Identifier + channelzParent channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -79,6 +79,7 @@ type dialOptions struct { resolvers []resolver.Builder idleTimeout time.Duration recvBufferPool SharedBufferPool + defaultScheme string } // DialOption configures how we set up the connection. @@ -154,9 +155,7 @@ func WithSharedWriteBuffer(val bool) DialOption { } // WithWriteBufferSize determines how much data can be batched before doing a -// write on the wire. The corresponding memory allocation for this buffer will -// be twice the size to keep syscalls low. The default value for this buffer is -// 32KB. +// write on the wire. The default value for this buffer is 32KB. // // Zero or negative values will disable the write buffer such that each write // will be on underlying connection. Note: A Send call may not directly @@ -555,9 +554,9 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id *channelz.Identifier) DialOption { +func WithChannelzParentID(c channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.channelzParentID = id + o.channelzParent = c }) } @@ -645,6 +644,7 @@ func defaultDialOptions() dialOptions { healthCheckFunc: internal.HealthCheckFunc, idleTimeout: 30 * time.Minute, recvBufferPool: nopBufferPool{}, + defaultScheme: "dns", } } @@ -659,6 +659,14 @@ func withMinConnectDeadline(f func() time.Duration) DialOption { }) } +// withDefaultScheme is used to allow Dial to use "passthrough" as the default +// name resolver, while NewClient uses "dns" otherwise. +func withDefaultScheme(s string) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.defaultScheme = s + }) +} + // WithResolvers allows a list of resolver implementations to be registered // locally with the ClientConn without needing to be globally registered via // resolver.Register. They will be matched against the scheme used for the diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go new file mode 100644 index 00000000000..6bf7f87396f --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go @@ -0,0 +1,83 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package gracefulswitch + +import ( + "encoding/json" + "fmt" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/serviceconfig" +) + +type lbConfig struct { + serviceconfig.LoadBalancingConfig + + childBuilder balancer.Builder + childConfig serviceconfig.LoadBalancingConfig +} + +func ChildName(l serviceconfig.LoadBalancingConfig) string { + return l.(*lbConfig).childBuilder.Name() +} + +// ParseConfig parses a child config list and returns a LB config for the +// gracefulswitch Balancer. +// +// cfg is expected to be a json.RawMessage containing a JSON array of LB policy +// names + configs as the format of the "loadBalancingConfig" field in +// ServiceConfig. It returns a type that should be passed to +// UpdateClientConnState in the BalancerConfig field. +func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + var lbCfg []map[string]json.RawMessage + if err := json.Unmarshal(cfg, &lbCfg); err != nil { + return nil, err + } + for i, e := range lbCfg { + if len(e) != 1 { + return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i) + } + + var name string + var jsonCfg json.RawMessage + for name, jsonCfg = range e { + } + + builder := balancer.Get(name) + if builder == nil { + // Skip unregistered balancer names. + continue + } + + parser, ok := builder.(balancer.ConfigParser) + if !ok { + // This is a valid child with no config. + return &lbConfig{childBuilder: builder}, nil + } + + cfg, err := parser.ParseConfig(jsonCfg) + if err != nil { + return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err) + } + + return &lbConfig{childBuilder: builder, childConfig: cfg}, nil + } + + return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg)) +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 3c594e6e4e5..45d5e50ea9b 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -94,14 +94,23 @@ func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { // process is not complete when this method returns. This method must be called // synchronously alongside the rest of the balancer.Balancer methods this // Graceful Switch Balancer implements. +// +// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState +// to cause the Balancer to automatically change to the new child when necessary. func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + _, err := gsb.switchTo(builder) + return err +} + +func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) { gsb.mu.Lock() if gsb.closed { gsb.mu.Unlock() - return errBalancerClosed + return nil, errBalancerClosed } bw := &balancerWrapper{ - gsb: gsb, + builder: builder, + gsb: gsb, lastState: balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), @@ -129,7 +138,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { gsb.balancerCurrent = nil } gsb.mu.Unlock() - return balancer.ErrBadResolverState + return nil, balancer.ErrBadResolverState } // This write doesn't need to take gsb.mu because this field never gets read @@ -138,7 +147,7 @@ func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { // bw.Balancer field will never be forwarded to until this SwitchTo() // function returns. bw.Balancer = newBalancer - return nil + return bw, nil } // Returns nil if the graceful switch balancer is closed. @@ -152,12 +161,33 @@ func (gsb *Balancer) latestBalancer() *balancerWrapper { } // UpdateClientConnState forwards the update to the latest balancer created. +// +// If the state's BalancerConfig is the config returned by a call to +// gracefulswitch.ParseConfig, then this function will automatically SwitchTo +// the balancer indicated by the config before forwarding its config to it, if +// necessary. func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { // The resolver data is only relevant to the most recent LB Policy. balToUpdate := gsb.latestBalancer() + + gsbCfg, ok := state.BalancerConfig.(*lbConfig) + if ok { + // Switch to the child in the config unless it is already active. + if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() { + var err error + balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder) + if err != nil { + return fmt.Errorf("could not switch to new child balancer: %w", err) + } + } + // Unwrap the child balancer's config. + state.BalancerConfig = gsbCfg.childConfig + } + if balToUpdate == nil { return errBalancerClosed } + // Perform this call without gsb.mu to prevent deadlocks if the child calls // back into the channel. The latest balancer can never be closed during a // call from the channel, even without gsb.mu held. @@ -169,6 +199,10 @@ func (gsb *Balancer) ResolverError(err error) { // The resolver data is only relevant to the most recent LB Policy. balToUpdate := gsb.latestBalancer() if balToUpdate == nil { + gsb.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) return } // Perform this call without gsb.mu to prevent deadlocks if the child calls @@ -261,7 +295,8 @@ func (gsb *Balancer) Close() { // graceful switch logic. type balancerWrapper struct { balancer.Balancer - gsb *Balancer + gsb *Balancer + builder balancer.Builder lastState balancer.State subconns map[balancer.SubConn]bool // subconns created by this balancer diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go new file mode 100644 index 00000000000..d7e9e1d54ec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go @@ -0,0 +1,255 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" + + "google.golang.org/grpc/connectivity" +) + +// Channel represents a channel within channelz, which includes metrics and +// internal channelz data, such as channelz id, child list, etc. +type Channel struct { + Entity + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + Parent *Channel + trace *ChannelTrace + // traceRefCount is the number of trace events that reference this channel. + // Non-zero traceRefCount means the trace of this channel cannot be deleted. + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +// Implemented to make Channel implement the Identifier interface used for +// nesting. +func (c *Channel) channelzIdentifier() {} + +func (c *Channel) String() string { + if c.Parent == nil { + return fmt.Sprintf("Channel #%d", c.ID) + } + return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID) +} + +func (c *Channel) id() int64 { + return c.ID +} + +func (c *Channel) SubChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.subChans) +} + +func (c *Channel) NestedChans() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(c.nestedChans) +} + +func (c *Channel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return c.trace.copy() +} + +type ChannelMetrics struct { + // The current connectivity state of the channel. + State atomic.Pointer[connectivity.State] + // The target this channel originally tried to connect to. May be absent + Target atomic.Pointer[string] + // The number of calls started on the channel. + CallsStarted atomic.Int64 + // The number of calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp atomic.Int64 +} + +// CopyFrom copies the metrics in o to c. For testing only. +func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) { + c.State.Store(o.State.Load()) + c.Target.Store(o.Target.Load()) + c.CallsStarted.Store(o.CallsStarted.Load()) + c.CallsSucceeded.Store(o.CallsSucceeded.Load()) + c.CallsFailed.Store(o.CallsFailed.Load()) + c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// Equal returns true iff the metrics of c are the same as the metrics of o. +// For testing only. +func (c *ChannelMetrics) Equal(o any) bool { + oc, ok := o.(*ChannelMetrics) + if !ok { + return false + } + if (c.State.Load() == nil) != (oc.State.Load() == nil) { + return false + } + if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() { + return false + } + if (c.Target.Load() == nil) != (oc.Target.Load() == nil) { + return false + } + if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() { + return false + } + return c.CallsStarted.Load() == oc.CallsStarted.Load() && + c.CallsFailed.Load() == oc.CallsFailed.Load() && + c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() && + c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load() +} + +func strFromPointer(s *string) string { + if s == nil { + return "" + } + return *s +} + +func (c *ChannelMetrics) String() string { + return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v", + c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(), + ) +} + +func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics { + c := &ChannelMetrics{} + c.State.Store(&state) + c.Target.Store(&target) + c.CallsStarted.Store(started) + c.CallsSucceeded.Store(succeeded) + c.CallsFailed.Store(failed) + c.LastCallStartedTimestamp.Store(timestamp) + return c +} + +func (c *Channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *SubChannel: + c.subChans[id] = v.RefName + case *Channel: + c.nestedChans[id] = v.RefName + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *Channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *Channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *Channel) getParentID() int64 { + if c.Parent == nil { + return -1 + } + return c.Parent.ID +} + +// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means +// deleting the channel reference from its parent's child list. +// +// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the +// corresponding grpc object has been invoked, and the channel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (c *Channel) deleteSelfFromTree() (deleted bool) { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return false + } + // not top channel + if c.Parent != nil { + c.Parent.deleteChild(c.ID) + } + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means +// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the +// channel, and its memory will be garbage collected. +// +// The trace reference count of the channel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (c *Channel) deleteSelfFromMap() (delete bool) { + return c.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the channel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. +func (c *Channel) deleteSelfIfReady() { + if !c.deleteSelfFromTree() { + return + } + if !c.deleteSelfFromMap() { + return + } + db.deleteEntry(c.ID) + c.trace.clear() +} + +func (c *Channel) getChannelTrace() *ChannelTrace { + return c.trace +} + +func (c *Channel) incrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, 1) +} + +func (c *Channel) decrTraceRefCount() { + atomic.AddInt32(&c.traceRefCount, -1) +} + +func (c *Channel) getTraceRefCount() int { + i := atomic.LoadInt32(&c.traceRefCount) + return int(i) +} + +func (c *Channel) getRefName() string { + return c.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go new file mode 100644 index 00000000000..dfe18b08925 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go @@ -0,0 +1,402 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sort" + "sync" + "time" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if + // child list is not empty, then deletion from the database is on hold until + // the last child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, + // and whether child list is now empty. If both conditions are met, then + // delete self from database. + deleteSelfIfReady() + // getParentID returns parent ID of the entry. 0 value parent ID means no parent. + getParentID() int64 + Entity +} + +// channelMap is the storage data structure for channelz. +// +// Methods of channelMap can be divided in two two categories with respect to +// locking. +// +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + channels map[int64]*Channel + subChannels map[int64]*SubChannel + sockets map[int64]*Socket + servers map[int64]*Server +} + +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*Channel), + subChannels: make(map[int64]*SubChannel), + sockets: make(map[int64]*Socket), + servers: make(map[int64]*Server), + } +} + +func (c *channelMap) addServer(id int64, s *Server) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.servers[id] = s +} + +func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + cn.trace.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else if p := c.channels[pid]; p != nil { + p.addChild(id, cn) + } else { + logger.Infof("channel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) { + c.mu.Lock() + defer c.mu.Unlock() + sc.trace.cm = c + c.subChannels[id] = sc + if p := c.channels[pid]; p != nil { + p.addChild(id, sc) + } else { + logger.Infof("subchannel %d references invalid parent ID %d", id, pid) + } +} + +func (c *channelMap) addSocket(s *Socket) { + c.mu.Lock() + defer c.mu.Unlock() + s.cm = c + c.sockets[s.ID] = s + if s.Parent == nil { + logger.Infof("normal socket %d has no parent", s.ID) + } + s.Parent.(entry).addChild(s.ID, s) +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children and until no other +// entity's channel trace references it. It may lead to a chain of entry +// deletion. For example, deleting the last socket of a gracefully shutting down +// server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + defer c.mu.Unlock() + c.findEntry(id).triggerDelete() +} + +// tracedChannel represents tracing operations which are present on both +// channels and subChannels. +type tracedChannel interface { + getChannelTrace() *ChannelTrace + incrTraceRefCount() + decrTraceRefCount() + getRefName() string +} + +// c.mu must be held by the caller +func (c *channelMap) decrTraceRefCount(id int64) { + e := c.findEntry(id) + if v, ok := e.(tracedChannel); ok { + v.decrTraceRefCount() + e.deleteSelfIfReady() + } +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + if v, ok := c.channels[id]; ok { + return v + } + if v, ok := c.subChannels[id]; ok { + return v + } + if v, ok := c.servers[id]; ok { + return v + } + if v, ok := c.sockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// +// deleteEntry deletes an entry from the channelMap. Before calling this method, +// caller must check this entry is ready to be deleted, i.e removeEntry() has +// been called on it, and no children still exist. +func (c *channelMap) deleteEntry(id int64) entry { + if v, ok := c.sockets[id]; ok { + delete(c.sockets, id) + return v + } + if v, ok := c.subChannels[id]; ok { + delete(c.subChannels, id) + return v + } + if v, ok := c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return v + } + if v, ok := c.servers[id]; ok { + delete(c.servers, id) + return v + } + return &dummyEntry{idNotFound: id} +} + +func (c *channelMap) traceEvent(id int64, desc *TraceEvent) { + c.mu.Lock() + defer c.mu.Unlock() + child := c.findEntry(id) + childTC, ok := child.(tracedChannel) + if !ok { + return + } + childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) + if desc.Parent != nil { + parent := c.findEntry(child.getParentID()) + var chanType RefChannelType + switch child.(type) { + case *Channel: + chanType = RefChannel + case *SubChannel: + chanType = RefSubChannel + } + if parentTC, ok := parent.(tracedChannel); ok { + parentTC.getChannelTrace().append(&traceEvent{ + Desc: desc.Parent.Desc, + Severity: desc.Parent.Severity, + Timestamp: time.Now(), + RefID: id, + RefName: childTC.getRefName(), + RefType: chanType, + }) + childTC.incrTraceRefCount() + } + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + l := int64(len(c.topLevelChannels)) + ids := make([]int64, 0, l) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var t []*Channel + for _, v := range ids[idx:] { + if len(t) == maxResults { + end = false + break + } + if cn, ok := c.channels[v]; ok { + t = append(t, cn) + } + } + return t, end +} + +func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + ids := make([]int64, 0, len(c.servers)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + end := true + var s []*Server + for _, v := range ids[idx:] { + if len(s) == maxResults { + end = false + break + } + if svr, ok := c.servers[v]; ok { + s = append(s, svr) + } + } + return s, end +} + +func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + if maxResults <= 0 { + maxResults = EntriesPerPage + } + c.mu.RLock() + defer c.mu.RUnlock() + svr, ok := c.servers[id] + if !ok { + // server with id doesn't exist. + return nil, true + } + svrskts := svr.sockets + ids := make([]int64, 0, len(svrskts)) + sks := make([]*Socket, 0, min(len(svrskts), maxResults)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) + end := true + for _, v := range ids[idx:] { + if len(sks) == maxResults { + end = false + break + } + if ns, ok := c.sockets[v]; ok { + sks = append(sks, ns) + } + } + return sks, end +} + +func (c *channelMap) getChannel(id int64) *Channel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.channels[id] +} + +func (c *channelMap) getSubChannel(id int64) *SubChannel { + c.mu.RLock() + defer c.mu.RUnlock() + return c.subChannels[id] +} + +func (c *channelMap) getSocket(id int64) *Socket { + c.mu.RLock() + defer c.mu.RUnlock() + return c.sockets[id] +} + +func (c *channelMap) getServer(id int64) *Server { + c.mu.RLock() + defer c.mu.RUnlock() + return c.servers[id] +} + +type dummyEntry struct { + // dummyEntry is a fake entry to handle entry not found case. + idNotFound int64 + Entity +} + +func (d *dummyEntry) String() string { + return fmt.Sprintf("non-existent entity #%d", d.idNotFound) +} + +func (d *dummyEntry) ID() int64 { return d.idNotFound } + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race + // condition. For example, there could be a race between ClientConn.Close() + // info being propagated to addrConn and http2Client. ClientConn.Close() + // cancel the context and result in http2Client to error. The error info is + // then caught by transport monitor and before addrConn.tearDown() is called + // in side ClientConn.Close(). Therefore, the addrConn will create a new + // transport. And when registering the new transport in channelz, its parent + // addrConn could have already been torn down and deleted from channelz + // tracking, and thus reach the code here. + logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +func (*dummyEntry) getParentID() int64 { + return 0 +} + +// Entity is implemented by all channelz types. +type Entity interface { + isEntity() + fmt.Stringer + id() int64 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index fc094f3441b..f461e9bc3ba 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -16,47 +16,32 @@ * */ -// Package channelz defines APIs for enabling channelz service, entry +// Package channelz defines internal APIs for enabling channelz service, entry // registration/deletion, and accessing channelz data. It also defines channelz // metric struct formats. -// -// All APIs in this package are experimental. package channelz import ( - "errors" - "sort" - "sync" "sync/atomic" "time" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" ) -const ( - defaultMaxTraceEntry int32 = 30 -) - var ( // IDGen is the global channelz entity ID generator. It should not be used // outside this package except by tests. IDGen IDGenerator - db dbWrapper - // EntryPerPage defines the number of channelz entries to be shown on a web page. - EntryPerPage = int64(50) - curState int32 - maxTraceEntry = defaultMaxTraceEntry + db *channelMap = newChannelMap() + // EntriesPerPage defines the number of channelz entries to be shown on a web page. + EntriesPerPage = 50 + curState int32 ) // TurnOn turns on channelz data collection. func TurnOn() { - if !IsOn() { - db.set(newChannelMap()) - IDGen.Reset() - atomic.StoreInt32(&curState, 1) - } + atomic.StoreInt32(&curState, 1) } func init() { @@ -70,49 +55,15 @@ func IsOn() bool { return atomic.LoadInt32(&curState) == 1 } -// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). -// Setting it to 0 will disable channel tracing. -func SetMaxTraceEntry(i int32) { - atomic.StoreInt32(&maxTraceEntry, i) -} - -// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default. -func ResetMaxTraceEntryToDefault() { - atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) -} - -func getMaxTraceEntry() int { - i := atomic.LoadInt32(&maxTraceEntry) - return int(i) -} - -// dbWarpper wraps around a reference to internal channelz data storage, and -// provide synchronized functionality to set and get the reference. -type dbWrapper struct { - mu sync.RWMutex - DB *channelMap -} - -func (d *dbWrapper) set(db *channelMap) { - d.mu.Lock() - d.DB = db - d.mu.Unlock() -} - -func (d *dbWrapper) get() *channelMap { - d.mu.RLock() - defer d.mu.RUnlock() - return d.DB -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // -// The arg id specifies that only top channel with id at or above it will be included -// in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - return db.get().GetTopChannels(id, maxResults) +// The arg id specifies that only top channel with id at or above it will be +// included in the result. The returned slice is up to a length of the arg +// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending +// id order. +func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) { + return db.getTopChannels(id, maxResults) } // GetServers returns a slice of server's ServerMetric, along with a @@ -120,73 +71,69 @@ func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { // // The arg id specifies that only server with id at or above it will be included // in the result. The returned slice is up to a length of the arg maxResults or -// EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) { - return db.get().GetServers(id, maxResults) +// EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServers(id int64, maxResults int) ([]*Server, bool) { + return db.getServers(id, maxResults) } // GetServerSockets returns a slice of server's (identified by id) normal socket's -// SocketMetric, along with a boolean indicating whether there's more sockets to +// SocketMetrics, along with a boolean indicating whether there's more sockets to // be queried for. // // The arg startID specifies that only sockets with id at or above it will be // included in the result. The returned slice is up to a length of the arg maxResults -// or EntryPerPage if maxResults is zero, and is sorted in ascending id order. -func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - return db.get().GetServerSockets(id, startID, maxResults) +// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) { + return db.getServerSockets(id, startID, maxResults) } -// GetChannel returns the ChannelMetric for the channel (identified by id). -func GetChannel(id int64) *ChannelMetric { - return db.get().GetChannel(id) +// GetChannel returns the Channel for the channel (identified by id). +func GetChannel(id int64) *Channel { + return db.getChannel(id) } -// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). -func GetSubChannel(id int64) *SubChannelMetric { - return db.get().GetSubChannel(id) +// GetSubChannel returns the SubChannel for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannel { + return db.getSubChannel(id) } -// GetSocket returns the SocketInternalMetric for the socket (identified by id). -func GetSocket(id int64) *SocketMetric { - return db.get().GetSocket(id) +// GetSocket returns the Socket for the socket (identified by id). +func GetSocket(id int64) *Socket { + return db.getSocket(id) } // GetServer returns the ServerMetric for the server (identified by id). -func GetServer(id int64) *ServerMetric { - return db.get().GetServer(id) +func GetServer(id int64) *Server { + return db.getServer(id) } // RegisterChannel registers the given channel c in the channelz database with -// ref as its reference name, and adds it to the child list of its parent -// (identified by pid). pid == nil means no parent. +// target as its target and reference name, and adds it to the child list of its +// parent. parent == nil means no parent. // // Returns a unique channelz identifier assigned to this channel. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { +func RegisterChannel(parent *Channel, target string) *Channel { id := IDGen.genID() - var parent int64 - isTopChannel := true - if pid != nil { - isTopChannel = false - parent = pid.Int() - } if !IsOn() { - return newIdentifer(RefChannel, id, pid) + return &Channel{ID: id} } - cn := &channel{ - refName: ref, - c: c, - subChans: make(map[int64]string), + isTopChannel := parent == nil + + cn := &Channel{ + ID: id, + RefName: target, nestedChans: make(map[int64]string), - id: id, - pid: parent, - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + subChans: make(map[int64]string), + Parent: parent, + trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}, } - db.get().addChannel(id, cn, isTopChannel, parent) - return newIdentifer(RefChannel, id, pid) + cn.ChannelMetrics.Target.Store(&target) + db.addChannel(id, cn, isTopChannel, cn.getParentID()) + return cn } // RegisterSubChannel registers the given subChannel c in the channelz database @@ -196,555 +143,66 @@ func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { // Returns a unique channelz identifier assigned to this subChannel. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a SubChannel's parent id cannot be nil") - } +func RegisterSubChannel(pid int64, ref string) *SubChannel { id := IDGen.genID() if !IsOn() { - return newIdentifer(RefSubChannel, id, pid), nil + return &SubChannel{ID: id} } - sc := &subChannel{ - refName: ref, - c: c, + sc := &SubChannel{ + RefName: ref, + ID: id, sockets: make(map[int64]string), - id: id, - pid: pid.Int(), - trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, + parent: db.getChannel(pid), + trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid.Int()) - return newIdentifer(RefSubChannel, id, pid), nil + db.addSubChannel(id, sc, pid) + return sc } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterServer(s Server, ref string) *Identifier { +func RegisterServer(ref string) *Server { id := IDGen.genID() if !IsOn() { - return newIdentifer(RefServer, id, nil) + return &Server{ID: id} } - svr := &server{ - refName: ref, - s: s, + svr := &Server{ + RefName: ref, sockets: make(map[int64]string), listenSockets: make(map[int64]string), - id: id, - } - db.get().addServer(id, svr) - return newIdentifer(RefServer, id, nil) -} - -// RegisterListenSocket registers the given listen socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this listen socket. -// -// If channelz is not turned ON, the channelz database is not mutated. -func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a ListenSocket's parent id cannot be 0") + ID: id, } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefListenSocket, id, pid), nil - } - - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} - db.get().addListenSocket(id, ls, pid.Int()) - return newIdentifer(RefListenSocket, id, pid), nil + db.addServer(id, svr) + return svr } -// RegisterNormalSocket registers the given normal socket s in channelz database +// RegisterSocket registers the given normal socket s in channelz database // with ref as its reference name, and adds it to the child list of its parent -// (identified by pid). It returns the unique channelz tracking id assigned to -// this normal socket. +// (identified by skt.Parent, which must be set). It returns the unique channelz +// tracking id assigned to this normal socket. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { - if pid == nil { - return nil, errors.New("a NormalSocket's parent id cannot be 0") - } - id := IDGen.genID() - if !IsOn() { - return newIdentifer(RefNormalSocket, id, pid), nil +func RegisterSocket(skt *Socket) *Socket { + skt.ID = IDGen.genID() + if IsOn() { + db.addSocket(skt) } - - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} - db.get().addNormalSocket(id, ns, pid.Int()) - return newIdentifer(RefNormalSocket, id, pid), nil + return skt } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. // // If channelz is not turned ON, this function is a no-op. -func RemoveEntry(id *Identifier) { +func RemoveEntry(id int64) { if !IsOn() { return } - db.get().removeEntry(id.Int()) -} - -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe -// the event to be added to the channel trace. -// -// The Parent field is optional. It is used for an event that will be recorded -// in the entity's parent trace. -type TraceEventDesc struct { - Desc string - Severity Severity - Parent *TraceEventDesc -} - -// AddTraceEvent adds trace related to the entity with specified id, using the -// provided TraceEventDesc. -// -// If channelz is not turned ON, this will simply log the event descriptions. -func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { - // Log only the trace description associated with the bottom most entity. - switch desc.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, withParens(id)+desc.Desc) - case CtWarning: - l.WarningDepth(depth+1, withParens(id)+desc.Desc) - case CtError: - l.ErrorDepth(depth+1, withParens(id)+desc.Desc) - } - - if getMaxTraceEntry() == 0 { - return - } - if IsOn() { - db.get().traceEvent(id.Int(), desc) - } -} - -// channelMap is the storage data structure for channelz. -// Methods of channelMap can be divided in two two categories with respect to locking. -// 1. Methods acquire the global lock. -// 2. Methods that can only be called when global lock is held. -// A second type of method need always to be called inside a first type of method. -type channelMap struct { - mu sync.RWMutex - topLevelChannels map[int64]struct{} - servers map[int64]*server - channels map[int64]*channel - subChannels map[int64]*subChannel - listenSockets map[int64]*listenSocket - normalSockets map[int64]*normalSocket -} - -func newChannelMap() *channelMap { - return &channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - } -} - -func (c *channelMap) addServer(id int64, s *server) { - c.mu.Lock() - s.cm = c - c.servers[id] = s - c.mu.Unlock() -} - -func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) { - c.mu.Lock() - cn.cm = c - cn.trace.cm = c - c.channels[id] = cn - if isTopChannel { - c.topLevelChannels[id] = struct{}{} - } else { - c.findEntry(pid).addChild(id, cn) - } - c.mu.Unlock() -} - -func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) { - c.mu.Lock() - sc.cm = c - sc.trace.cm = c - c.subChannels[id] = sc - c.findEntry(pid).addChild(id, sc) - c.mu.Unlock() -} - -func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) { - c.mu.Lock() - ls.cm = c - c.listenSockets[id] = ls - c.findEntry(pid).addChild(id, ls) - c.mu.Unlock() -} - -func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) { - c.mu.Lock() - ns.cm = c - c.normalSockets[id] = ns - c.findEntry(pid).addChild(id, ns) - c.mu.Unlock() -} - -// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to -// wait on the deletion of its children and until no other entity's channel trace references it. -// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully -// shutting down server will lead to the server being also deleted. -func (c *channelMap) removeEntry(id int64) { - c.mu.Lock() - c.findEntry(id).triggerDelete() - c.mu.Unlock() -} - -// c.mu must be held by the caller -func (c *channelMap) decrTraceRefCount(id int64) { - e := c.findEntry(id) - if v, ok := e.(tracedChannel); ok { - v.decrTraceRefCount() - e.deleteSelfIfReady() - } -} - -// c.mu must be held by the caller. -func (c *channelMap) findEntry(id int64) entry { - var v entry - var ok bool - if v, ok = c.channels[id]; ok { - return v - } - if v, ok = c.subChannels[id]; ok { - return v - } - if v, ok = c.servers[id]; ok { - return v - } - if v, ok = c.listenSockets[id]; ok { - return v - } - if v, ok = c.normalSockets[id]; ok { - return v - } - return &dummyEntry{idNotFound: id} -} - -// c.mu must be held by the caller -// deleteEntry simply deletes an entry from the channelMap. Before calling this -// method, caller must check this entry is ready to be deleted, i.e removeEntry() -// has been called on it, and no children still exist. -// Conditionals are ordered by the expected frequency of deletion of each entity -// type, in order to optimize performance. -func (c *channelMap) deleteEntry(id int64) { - var ok bool - if _, ok = c.normalSockets[id]; ok { - delete(c.normalSockets, id) - return - } - if _, ok = c.subChannels[id]; ok { - delete(c.subChannels, id) - return - } - if _, ok = c.channels[id]; ok { - delete(c.channels, id) - delete(c.topLevelChannels, id) - return - } - if _, ok = c.listenSockets[id]; ok { - delete(c.listenSockets, id) - return - } - if _, ok = c.servers[id]; ok { - delete(c.servers, id) - return - } -} - -func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) { - c.mu.Lock() - child := c.findEntry(id) - childTC, ok := child.(tracedChannel) - if !ok { - c.mu.Unlock() - return - } - childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()}) - if desc.Parent != nil { - parent := c.findEntry(child.getParentID()) - var chanType RefChannelType - switch child.(type) { - case *channel: - chanType = RefChannel - case *subChannel: - chanType = RefSubChannel - } - if parentTC, ok := parent.(tracedChannel); ok { - parentTC.getChannelTrace().append(&TraceEvent{ - Desc: desc.Parent.Desc, - Severity: desc.Parent.Severity, - Timestamp: time.Now(), - RefID: id, - RefName: childTC.getRefName(), - RefType: chanType, - }) - childTC.incrTraceRefCount() - } - } - c.mu.Unlock() -} - -type int64Slice []int64 - -func (s int64Slice) Len() int { return len(s) } -func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } - -func copyMap(m map[int64]string) map[int64]string { - n := make(map[int64]string) - for k, v := range m { - n[k] = v - } - return n -} - -func min(a, b int64) int64 { - if a < b { - return a - } - return b -} - -func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.topLevelChannels)) - ids := make([]int64, 0, l) - cns := make([]*channel, 0, min(l, maxResults)) - - for k := range c.topLevelChannels { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var t []*ChannelMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if cn, ok := c.channels[v]; ok { - cns = append(cns, cn) - t = append(t, &ChannelMetric{ - NestedChans: copyMap(cn.nestedChans), - SubChans: copyMap(cn.subChans), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, cn := range cns { - t[i].ChannelData = cn.c.ChannelzMetric() - t[i].ID = cn.id - t[i].RefName = cn.refName - t[i].Trace = cn.trace.dumpData() - } - return t, end -} - -func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - c.mu.RLock() - l := int64(len(c.servers)) - ids := make([]int64, 0, l) - ss := make([]*server, 0, min(l, maxResults)) - for k := range c.servers { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) - count := int64(0) - var end bool - var s []*ServerMetric - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if svr, ok := c.servers[v]; ok { - ss = append(ss, svr) - s = append(s, &ServerMetric{ - ListenSockets: copyMap(svr.listenSockets), - }) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - - for i, svr := range ss { - s[i].ServerData = svr.s.ChannelzMetric() - s[i].ID = svr.id - s[i].RefName = svr.refName - } - return s, end -} - -func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) { - if maxResults <= 0 { - maxResults = EntryPerPage - } - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - // server with id doesn't exist. - c.mu.RUnlock() - return nil, true - } - svrskts := svr.sockets - l := int64(len(svrskts)) - ids := make([]int64, 0, l) - sks := make([]*normalSocket, 0, min(l, maxResults)) - for k := range svrskts { - ids = append(ids, k) - } - sort.Sort(int64Slice(ids)) - idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID }) - count := int64(0) - var end bool - for i, v := range ids[idx:] { - if count == maxResults { - break - } - if ns, ok := c.normalSockets[v]; ok { - sks = append(sks, ns) - count++ - } - if i == len(ids[idx:])-1 { - end = true - break - } - } - c.mu.RUnlock() - if count == 0 { - end = true - } - s := make([]*SocketMetric, 0, len(sks)) - for _, ns := range sks { - sm := &SocketMetric{} - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - s = append(s, sm) - } - return s, end -} - -func (c *channelMap) GetChannel(id int64) *ChannelMetric { - cm := &ChannelMetric{} - var cn *channel - var ok bool - c.mu.RLock() - if cn, ok = c.channels[id]; !ok { - // channel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.NestedChans = copyMap(cn.nestedChans) - cm.SubChans = copyMap(cn.subChans) - // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when - // holding the lock to prevent potential data race. - chanCopy := cn.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = cn.id - cm.RefName = cn.refName - cm.Trace = cn.trace.dumpData() - return cm -} - -func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { - cm := &SubChannelMetric{} - var sc *subChannel - var ok bool - c.mu.RLock() - if sc, ok = c.subChannels[id]; !ok { - // subchannel with id doesn't exist. - c.mu.RUnlock() - return nil - } - cm.Sockets = copyMap(sc.sockets) - // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when - // holding the lock to prevent potential data race. - chanCopy := sc.c - c.mu.RUnlock() - cm.ChannelData = chanCopy.ChannelzMetric() - cm.ID = sc.id - cm.RefName = sc.refName - cm.Trace = sc.trace.dumpData() - return cm -} - -func (c *channelMap) GetSocket(id int64) *SocketMetric { - sm := &SocketMetric{} - c.mu.RLock() - if ls, ok := c.listenSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ls.s.ChannelzMetric() - sm.ID = ls.id - sm.RefName = ls.refName - return sm - } - if ns, ok := c.normalSockets[id]; ok { - c.mu.RUnlock() - sm.SocketData = ns.s.ChannelzMetric() - sm.ID = ns.id - sm.RefName = ns.refName - return sm - } - c.mu.RUnlock() - return nil -} - -func (c *channelMap) GetServer(id int64) *ServerMetric { - sm := &ServerMetric{} - var svr *server - var ok bool - c.mu.RLock() - if svr, ok = c.servers[id]; !ok { - c.mu.RUnlock() - return nil - } - sm.ListenSockets = copyMap(svr.listenSockets) - c.mu.RUnlock() - sm.ID = svr.id - sm.RefName = svr.refName - sm.ServerData = svr.s.ChannelzMetric() - return sm + db.removeEntry(id) } // IDGenerator is an incrementing atomic that tracks IDs for channelz entities. @@ -761,3 +219,11 @@ func (i *IDGenerator) Reset() { func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } + +// Identifier is an opaque channelz identifier used to expose channelz symbols +// outside of grpc. Currently only implemented by Channel since no other +// types require exposure outside grpc. +type Identifier interface { + Entity + channelzIdentifier() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go deleted file mode 100644 index c9a27acd371..00000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/id.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import "fmt" - -// Identifier is an opaque identifier which uniquely identifies an entity in the -// channelz database. -type Identifier struct { - typ RefChannelType - id int64 - str string - pid *Identifier -} - -// Type returns the entity type corresponding to id. -func (id *Identifier) Type() RefChannelType { - return id.typ -} - -// Int returns the integer identifier corresponding to id. -func (id *Identifier) Int() int64 { - return id.id -} - -// String returns a string representation of the entity corresponding to id. -// -// This includes some information about the parent as well. Examples: -// Top-level channel: [Channel #channel-number] -// Nested channel: [Channel #parent-channel-number Channel #channel-number] -// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] -func (id *Identifier) String() string { - return id.str -} - -// Equal returns true if other is the same as id. -func (id *Identifier) Equal(other *Identifier) bool { - if (id != nil) != (other != nil) { - return false - } - if id == nil && other == nil { - return true - } - return id.typ == other.typ && id.id == other.id && id.pid == other.pid -} - -// NewIdentifierForTesting returns a new opaque identifier to be used only for -// testing purposes. -func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { - return newIdentifer(typ, id, pid) -} - -func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { - str := fmt.Sprintf("%s #%d", typ, id) - if pid != nil { - str = fmt.Sprintf("%s %s", pid, str) - } - return &Identifier{typ: typ, id: id, str: str, pid: pid} -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index f89e6f77bbd..ee4d7212580 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,53 +26,49 @@ import ( var logger = grpclog.Component("channelz") -func withParens(id *Identifier) string { - return "[" + id.String() + "] " -} - // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtInfo, }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtWarning, }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprint(args...), Severity: CtError, }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { - AddTraceEvent(l, id, 1, &TraceEventDesc{ +func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) { + AddTraceEvent(l, e, 1, &TraceEvent{ Desc: fmt.Sprintf(format, args...), Severity: CtError, }) diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go new file mode 100644 index 00000000000..cdfc49d6eac --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/server.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// Server is the channelz representation of a server. +type Server struct { + Entity + ID int64 + RefName string + + ServerMetrics ServerMetrics + + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + cm *channelMap +} + +// ServerMetrics defines a struct containing metrics for servers. +type ServerMetrics struct { + // The number of incoming calls started on the server. + CallsStarted atomic.Int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded atomic.Int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed atomic.Int64 + // The last time a call was started on the server. + LastCallStartedTimestamp atomic.Int64 +} + +// NewServerMetricsForTesting returns an initialized ServerMetrics. +func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics { + sm := &ServerMetrics{} + sm.CallsStarted.Store(started) + sm.CallsSucceeded.Store(succeeded) + sm.CallsFailed.Store(failed) + sm.LastCallStartedTimestamp.Store(timestamp) + return sm +} + +func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) { + sm.CallsStarted.Store(o.CallsStarted.Load()) + sm.CallsSucceeded.Store(o.CallsSucceeded.Load()) + sm.CallsFailed.Store(o.CallsFailed.Load()) + sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load()) +} + +// ListenSockets returns the listening sockets for s. +func (s *Server) ListenSockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(s.listenSockets) +} + +// String returns a printable description of s. +func (s *Server) String() string { + return fmt.Sprintf("Server #%d", s.ID) +} + +func (s *Server) id() int64 { + return s.ID +} + +func (s *Server) addChild(id int64, e entry) { + switch v := e.(type) { + case *Socket: + switch v.SocketType { + case SocketTypeNormal: + s.sockets[id] = v.RefName + case SocketTypeListen: + s.listenSockets[id] = v.RefName + } + default: + logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *Server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *Server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *Server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.ID) +} + +func (s *Server) getParentID() int64 { + return 0 +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go new file mode 100644 index 00000000000..fa64834b25d --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "net" + "sync/atomic" + + "google.golang.org/grpc/credentials" +) + +// SocketMetrics defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketMetrics struct { + // The number of streams that have been started. + StreamsStarted atomic.Int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded atomic.Int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed atomic.Int64 + // The number of messages successfully sent on this socket. + MessagesSent atomic.Int64 + MessagesReceived atomic.Int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent atomic.Int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp atomic.Int64 + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp atomic.Int64 + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp atomic.Int64 + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp atomic.Int64 +} + +// EphemeralSocketMetrics are metrics that change rapidly and are tracked +// outside of channelz. +type EphemeralSocketMetrics struct { + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 +} + +type SocketType string + +const ( + SocketTypeNormal = "NormalSocket" + SocketTypeListen = "ListenSocket" +) + +type Socket struct { + Entity + SocketType SocketType + ID int64 + Parent Entity + cm *channelMap + SocketMetrics SocketMetrics + EphemeralMetrics func() *EphemeralSocketMetrics + + RefName string + // The locally bound address. Immutable. + LocalAddr net.Addr + // The remote bound address. May be absent. Immutable. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. Immutable. + RemoteName string + // Immutable. + SocketOptions *SocketOptionData + // Immutable. + Security credentials.ChannelzSecurityValue +} + +func (ls *Socket) String() string { + return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID) +} + +func (ls *Socket) id() int64 { + return ls.ID +} + +func (ls *Socket) addChild(id int64, e entry) { + logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *Socket) deleteChild(id int64) { + logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *Socket) triggerDelete() { + ls.cm.deleteEntry(ls.ID) + ls.Parent.(entry).deleteChild(ls.ID) +} + +func (ls *Socket) deleteSelfIfReady() { + logger.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +func (ls *Socket) getParentID() int64 { + return ls.Parent.id() +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go new file mode 100644 index 00000000000..3b88e4cba8e --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go @@ -0,0 +1,151 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync/atomic" +) + +// SubChannel is the channelz representation of a subchannel. +type SubChannel struct { + Entity + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + closeCalled bool + sockets map[int64]string + parent *Channel + trace *ChannelTrace + traceRefCount int32 + + ChannelMetrics ChannelMetrics +} + +func (sc *SubChannel) String() string { + return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID) +} + +func (sc *SubChannel) id() int64 { + return sc.ID +} + +func (sc *SubChannel) Sockets() map[int64]string { + db.mu.RLock() + defer db.mu.RUnlock() + return copyMap(sc.sockets) +} + +func (sc *SubChannel) Trace() *ChannelTrace { + db.mu.RLock() + defer db.mu.RUnlock() + return sc.trace.copy() +} + +func (sc *SubChannel) addChild(id int64, e entry) { + if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal { + sc.sockets[id] = v.RefName + } else { + logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *SubChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *SubChannel) getParentID() int64 { + return sc.parent.ID +} + +// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which +// means deleting the subchannel reference from its parent's child list. +// +// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of +// the corresponding grpc object has been invoked, and the subchannel does not have any children left. +// +// The returned boolean value indicates whether the channel has been successfully deleted from tree. +func (sc *SubChannel) deleteSelfFromTree() (deleted bool) { + if !sc.closeCalled || len(sc.sockets) != 0 { + return false + } + sc.parent.deleteChild(sc.ID) + return true +} + +// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means +// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query +// the subchannel, and its memory will be garbage collected. +// +// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is +// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, +// the trace of the referenced entity must not be deleted. In order to release the resource allocated +// by grpc, the reference to the grpc object is reset to a dummy object. +// +// deleteSelfFromMap must be called after deleteSelfFromTree returns true. +// +// It returns a bool to indicate whether the channel can be safely deleted from map. +func (sc *SubChannel) deleteSelfFromMap() (delete bool) { + return sc.getTraceRefCount() == 0 +} + +// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. +// The delete process includes two steps: +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. +func (sc *SubChannel) deleteSelfIfReady() { + if !sc.deleteSelfFromTree() { + return + } + if !sc.deleteSelfFromMap() { + return + } + db.deleteEntry(sc.ID) + sc.trace.clear() +} + +func (sc *SubChannel) getChannelTrace() *ChannelTrace { + return sc.trace +} + +func (sc *SubChannel) incrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, 1) +} + +func (sc *SubChannel) decrTraceRefCount() { + atomic.AddInt32(&sc.traceRefCount, -1) +} + +func (sc *SubChannel) getTraceRefCount() int { + i := atomic.LoadInt32(&sc.traceRefCount) + return int(i) +} + +func (sc *SubChannel) getRefName() string { + return sc.RefName +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go similarity index 83% rename from vendor/google.golang.org/grpc/internal/channelz/types_linux.go rename to vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go index 1b1c4cce34a..5ac73ff8339 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go @@ -49,3 +49,17 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) { s.TCPInfo = v } } + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(socket any) *SocketOptionData { + c, ok := socket.(syscall.Conn) + if !ok { + return nil + } + data := &SocketOptionData{} + if rawConn, err := c.SyscallConn(); err == nil { + rawConn.Control(data.Getsockopt) + return data + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go similarity index 90% rename from vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go rename to vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go index 8b06eed1ab8..d1ed8df6a51 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* * @@ -41,3 +40,8 @@ func (s *SocketOptionData) Getsockopt(fd uintptr) { logger.Warning("Channelz: socket options are not supported on non-linux environments") }) } + +// GetSocketOption gets the socket option info of the conn. +func GetSocketOption(c any) *SocketOptionData { + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go new file mode 100644 index 00000000000..36b86740323 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go @@ -0,0 +1,204 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + "sync" + "sync/atomic" + "time" + + "google.golang.org/grpc/grpclog" +) + +const ( + defaultMaxTraceEntry int32 = 30 +) + +var maxTraceEntry = defaultMaxTraceEntry + +// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e. +// channel/subchannel). Setting it to 0 will disable channel tracing. +func SetMaxTraceEntry(i int32) { + atomic.StoreInt32(&maxTraceEntry, i) +} + +// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per +// entity to default. +func ResetMaxTraceEntryToDefault() { + atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry) +} + +func getMaxTraceEntry() int { + i := atomic.LoadInt32(&maxTraceEntry) + return int(i) +} + +// traceEvent is an internal representation of a single trace event +type traceEvent struct { + // Desc is a simple description of the trace event. + Desc string + // Severity states the severity of this trace event. + Severity Severity + // Timestamp is the event time. + Timestamp time.Time + // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is + // involved in this event. + // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) + RefID int64 + // RefName is the reference name for the entity that gets referenced in the event. + RefName string + // RefType indicates the referenced entity type, i.e Channel or SubChannel. + RefType RefChannelType +} + +// TraceEvent is what the caller of AddTraceEvent should provide to describe the +// event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. +type TraceEvent struct { + Desc string + Severity Severity + Parent *TraceEvent +} + +type ChannelTrace struct { + cm *channelMap + clearCalled bool + CreationTime time.Time + EventNum int64 + mu sync.Mutex + Events []*traceEvent +} + +func (c *ChannelTrace) copy() *ChannelTrace { + return &ChannelTrace{ + CreationTime: c.CreationTime, + EventNum: c.EventNum, + Events: append(([]*traceEvent)(nil), c.Events...), + } +} + +func (c *ChannelTrace) append(e *traceEvent) { + c.mu.Lock() + if len(c.Events) == getMaxTraceEntry() { + del := c.Events[0] + c.Events = c.Events[1:] + if del.RefID != 0 { + // start recursive cleanup in a goroutine to not block the call originated from grpc. + go func() { + // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. + c.cm.mu.Lock() + c.cm.decrTraceRefCount(del.RefID) + c.cm.mu.Unlock() + }() + } + } + e.Timestamp = time.Now() + c.Events = append(c.Events, e) + c.EventNum++ + c.mu.Unlock() +} + +func (c *ChannelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true + c.mu.Lock() + for _, e := range c.Events { + if e.RefID != 0 { + // caller should have already held the c.cm.mu lock. + c.cm.decrTraceRefCount(e.RefID) + } + } + c.mu.Unlock() +} + +// Severity is the severity level of a trace event. +// The canonical enumeration of all valid values is here: +// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. +type Severity int + +const ( + // CtUnknown indicates unknown severity of a trace event. + CtUnknown Severity = iota + // CtInfo indicates info level severity of a trace event. + CtInfo + // CtWarning indicates warning level severity of a trace event. + CtWarning + // CtError indicates error level severity of a trace event. + CtError +) + +// RefChannelType is the type of the entity being referenced in a trace event. +type RefChannelType int + +const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota + // RefChannel indicates the referenced entity is a Channel. + RefChannel + // RefSubChannel indicates the referenced entity is a SubChannel. + RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket +) + +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) { + // Log only the trace description associated with the bottom most entity. + d := fmt.Sprintf("[%s]%s", e, desc.Desc) + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, d) + case CtWarning: + l.WarningDepth(depth+1, d) + case CtError: + l.ErrorDepth(depth+1, d) + } + + if getMaxTraceEntry() == 0 { + return + } + if IsOn() { + db.traceEvent(e.id(), desc) + } +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go deleted file mode 100644 index 1d4020f5379..00000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ /dev/null @@ -1,727 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "net" - "sync" - "sync/atomic" - "time" - - "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" -) - -// entry represents a node in the channelz database. -type entry interface { - // addChild adds a child e, whose channelz id is id to child list - addChild(id int64, e entry) - // deleteChild deletes a child with channelz id to be id from child list - deleteChild(id int64) - // triggerDelete tries to delete self from channelz database. However, if child - // list is not empty, then deletion from the database is on hold until the last - // child is deleted from database. - triggerDelete() - // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child - // list is now empty. If both conditions are met, then delete self from database. - deleteSelfIfReady() - // getParentID returns parent ID of the entry. 0 value parent ID means no parent. - getParentID() int64 -} - -// dummyEntry is a fake entry to handle entry not found case. -type dummyEntry struct { - idNotFound int64 -} - -func (d *dummyEntry) addChild(id int64, e entry) { - // Note: It is possible for a normal program to reach here under race condition. - // For example, there could be a race between ClientConn.Close() info being propagated - // to addrConn and http2Client. ClientConn.Close() cancel the context and result - // in http2Client to error. The error info is then caught by transport monitor - // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, - // the addrConn will create a new transport. And when registering the new transport in - // channelz, its parent addrConn could have already been torn down and deleted - // from channelz tracking, and thus reach the code here. - logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) -} - -func (d *dummyEntry) deleteChild(id int64) { - // It is possible for a normal program to reach here under race condition. - // Refer to the example described in addChild(). - logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) -} - -func (d *dummyEntry) triggerDelete() { - logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) -} - -func (*dummyEntry) deleteSelfIfReady() { - // code should not reach here. deleteSelfIfReady is always called on an existing entry. -} - -func (*dummyEntry) getParentID() int64 { - return 0 -} - -// ChannelMetric defines the info channelz provides for a specific Channel, which -// includes ChannelInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ChannelMetric struct { - // ID is the channelz id of this channel. - ID int64 - // RefName is the human readable reference string of this channel. - RefName string - // ChannelData contains channel internal metric reported by the channel through - // ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this channel in the format of - // a map from nested channel channelz id to corresponding reference string. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this channel in the format of a - // map from subchannel channelz id to corresponding reference string. - SubChans map[int64]string - // Sockets tracks the socket type children of this channel in the format of a map - // from socket channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow channel having sockets directly, - // therefore, this is field is unused. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// SubChannelMetric defines the info channelz provides for a specific SubChannel, -// which includes ChannelInternalMetric and channelz-specific data, such as -// channelz id, child list, etc. -type SubChannelMetric struct { - // ID is the channelz id of this subchannel. - ID int64 - // RefName is the human readable reference string of this subchannel. - RefName string - // ChannelData contains subchannel internal metric reported by the subchannel - // through ChannelzMetric(). - ChannelData *ChannelInternalMetric - // NestedChans tracks the nested channel type children of this subchannel in the format of - // a map from nested channel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have nested channels - // as children, therefore, this field is unused. - NestedChans map[int64]string - // SubChans tracks the subchannel type children of this subchannel in the format of a - // map from subchannel channelz id to corresponding reference string. - // Note current grpc implementation doesn't allow subchannel to have subchannels - // as children, therefore, this field is unused. - SubChans map[int64]string - // Sockets tracks the socket type children of this subchannel in the format of a map - // from socket channelz id to corresponding reference string. - Sockets map[int64]string - // Trace contains the most recent traced events. - Trace *ChannelTrace -} - -// ChannelInternalMetric defines the struct that the implementor of Channel interface -// should return from ChannelzMetric(). -type ChannelInternalMetric struct { - // current connectivity state of the channel. - State connectivity.State - // The target this channel originally tried to connect to. May be absent - Target string - // The number of calls started on the channel. - CallsStarted int64 - // The number of calls that have completed with an OK status. - CallsSucceeded int64 - // The number of calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the channel. - LastCallStartedTimestamp time.Time -} - -// ChannelTrace stores traced events on a channel/subchannel and related info. -type ChannelTrace struct { - // EventNum is the number of events that ever got traced (i.e. including those that have been deleted) - EventNum int64 - // CreationTime is the creation time of the trace. - CreationTime time.Time - // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the - // oldest one) - Events []*TraceEvent -} - -// TraceEvent represent a single trace event -type TraceEvent struct { - // Desc is a simple description of the trace event. - Desc string - // Severity states the severity of this trace event. - Severity Severity - // Timestamp is the event time. - Timestamp time.Time - // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is - // involved in this event. - // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside []) - RefID int64 - // RefName is the reference name for the entity that gets referenced in the event. - RefName string - // RefType indicates the referenced entity type, i.e Channel or SubChannel. - RefType RefChannelType -} - -// Channel is the interface that should be satisfied in order to be tracked by -// channelz as Channel or SubChannel. -type Channel interface { - ChannelzMetric() *ChannelInternalMetric -} - -type dummyChannel struct{} - -func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric { - return &ChannelInternalMetric{} -} - -type channel struct { - refName string - c Channel - closeCalled bool - nestedChans map[int64]string - subChans map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - // traceRefCount is the number of trace events that reference this channel. - // Non-zero traceRefCount means the trace of this channel cannot be deleted. - traceRefCount int32 -} - -func (c *channel) addChild(id int64, e entry) { - switch v := e.(type) { - case *subChannel: - c.subChans[id] = v.refName - case *channel: - c.nestedChans[id] = v.refName - default: - logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) - } -} - -func (c *channel) deleteChild(id int64) { - delete(c.subChans, id) - delete(c.nestedChans, id) - c.deleteSelfIfReady() -} - -func (c *channel) triggerDelete() { - c.closeCalled = true - c.deleteSelfIfReady() -} - -func (c *channel) getParentID() int64 { - return c.pid -} - -// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means -// deleting the channel reference from its parent's child list. -// -// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the -// corresponding grpc object has been invoked, and the channel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (c *channel) deleteSelfFromTree() (deleted bool) { - if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { - return false - } - // not top channel - if c.pid != 0 { - c.cm.findEntry(c.pid).deleteChild(c.id) - } - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means -// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the -// channel, and its memory will be garbage collected. -// -// The trace reference count of the channel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (c *channel) deleteSelfFromMap() (delete bool) { - if c.getTraceRefCount() != 0 { - c.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the channel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. -func (c *channel) deleteSelfIfReady() { - if !c.deleteSelfFromTree() { - return - } - if !c.deleteSelfFromMap() { - return - } - c.cm.deleteEntry(c.id) - c.trace.clear() -} - -func (c *channel) getChannelTrace() *channelTrace { - return c.trace -} - -func (c *channel) incrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, 1) -} - -func (c *channel) decrTraceRefCount() { - atomic.AddInt32(&c.traceRefCount, -1) -} - -func (c *channel) getTraceRefCount() int { - i := atomic.LoadInt32(&c.traceRefCount) - return int(i) -} - -func (c *channel) getRefName() string { - return c.refName -} - -type subChannel struct { - refName string - c Channel - closeCalled bool - sockets map[int64]string - id int64 - pid int64 - cm *channelMap - trace *channelTrace - traceRefCount int32 -} - -func (sc *subChannel) addChild(id int64, e entry) { - if v, ok := e.(*normalSocket); ok { - sc.sockets[id] = v.refName - } else { - logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) - } -} - -func (sc *subChannel) deleteChild(id int64) { - delete(sc.sockets, id) - sc.deleteSelfIfReady() -} - -func (sc *subChannel) triggerDelete() { - sc.closeCalled = true - sc.deleteSelfIfReady() -} - -func (sc *subChannel) getParentID() int64 { - return sc.pid -} - -// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which -// means deleting the subchannel reference from its parent's child list. -// -// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of -// the corresponding grpc object has been invoked, and the subchannel does not have any children left. -// -// The returned boolean value indicates whether the channel has been successfully deleted from tree. -func (sc *subChannel) deleteSelfFromTree() (deleted bool) { - if !sc.closeCalled || len(sc.sockets) != 0 { - return false - } - sc.cm.findEntry(sc.pid).deleteChild(sc.id) - return true -} - -// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means -// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query -// the subchannel, and its memory will be garbage collected. -// -// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is -// specified in the channel tracing gRFC that as long as some other trace has reference to an entity, -// the trace of the referenced entity must not be deleted. In order to release the resource allocated -// by grpc, the reference to the grpc object is reset to a dummy object. -// -// deleteSelfFromMap must be called after deleteSelfFromTree returns true. -// -// It returns a bool to indicate whether the channel can be safely deleted from map. -func (sc *subChannel) deleteSelfFromMap() (delete bool) { - if sc.getTraceRefCount() != 0 { - // free the grpc struct (i.e. addrConn) - sc.c = &dummyChannel{} - return false - } - return true -} - -// deleteSelfIfReady tries to delete the subchannel itself from the channelz database. -// The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. -func (sc *subChannel) deleteSelfIfReady() { - if !sc.deleteSelfFromTree() { - return - } - if !sc.deleteSelfFromMap() { - return - } - sc.cm.deleteEntry(sc.id) - sc.trace.clear() -} - -func (sc *subChannel) getChannelTrace() *channelTrace { - return sc.trace -} - -func (sc *subChannel) incrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, 1) -} - -func (sc *subChannel) decrTraceRefCount() { - atomic.AddInt32(&sc.traceRefCount, -1) -} - -func (sc *subChannel) getTraceRefCount() int { - i := atomic.LoadInt32(&sc.traceRefCount) - return int(i) -} - -func (sc *subChannel) getRefName() string { - return sc.refName -} - -// SocketMetric defines the info channelz provides for a specific Socket, which -// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. -type SocketMetric struct { - // ID is the channelz id of this socket. - ID int64 - // RefName is the human readable reference string of this socket. - RefName string - // SocketData contains socket internal metric reported by the socket through - // ChannelzMetric(). - SocketData *SocketInternalMetric -} - -// SocketInternalMetric defines the struct that the implementor of Socket interface -// should return from ChannelzMetric(). -type SocketInternalMetric struct { - // The number of streams that have been started. - StreamsStarted int64 - // The number of streams that have ended successfully: - // On client side, receiving frame with eos bit set. - // On server side, sending frame with eos bit set. - StreamsSucceeded int64 - // The number of streams that have ended unsuccessfully: - // On client side, termination without receiving frame with eos bit set. - // On server side, termination without sending frame with eos bit set. - StreamsFailed int64 - // The number of messages successfully sent on this socket. - MessagesSent int64 - MessagesReceived int64 - // The number of keep alives sent. This is typically implemented with HTTP/2 - // ping messages. - KeepAlivesSent int64 - // The last time a stream was created by this endpoint. Usually unset for - // servers. - LastLocalStreamCreatedTimestamp time.Time - // The last time a stream was created by the remote endpoint. Usually unset - // for clients. - LastRemoteStreamCreatedTimestamp time.Time - // The last time a message was sent by this endpoint. - LastMessageSentTimestamp time.Time - // The last time a message was received by this endpoint. - LastMessageReceivedTimestamp time.Time - // The amount of window, granted to the local endpoint by the remote endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - LocalFlowControlWindow int64 - // The amount of window, granted to the remote endpoint by the local endpoint. - // This may be slightly out of date due to network latency. This does NOT - // include stream level or TCP level flow control info. - RemoteFlowControlWindow int64 - // The locally bound address. - LocalAddr net.Addr - // The remote bound address. May be absent. - RemoteAddr net.Addr - // Optional, represents the name of the remote endpoint, if different than - // the original target name. - RemoteName string - SocketOptions *SocketOptionData - Security credentials.ChannelzSecurityValue -} - -// Socket is the interface that should be satisfied in order to be tracked by -// channelz as Socket. -type Socket interface { - ChannelzMetric() *SocketInternalMetric -} - -type listenSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ls *listenSocket) addChild(id int64, e entry) { - logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) -} - -func (ls *listenSocket) deleteChild(id int64) { - logger.Errorf("cannot delete a child (id = %d) from a listen socket", id) -} - -func (ls *listenSocket) triggerDelete() { - ls.cm.deleteEntry(ls.id) - ls.cm.findEntry(ls.pid).deleteChild(ls.id) -} - -func (ls *listenSocket) deleteSelfIfReady() { - logger.Errorf("cannot call deleteSelfIfReady on a listen socket") -} - -func (ls *listenSocket) getParentID() int64 { - return ls.pid -} - -type normalSocket struct { - refName string - s Socket - id int64 - pid int64 - cm *channelMap -} - -func (ns *normalSocket) addChild(id int64, e entry) { - logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) -} - -func (ns *normalSocket) deleteChild(id int64) { - logger.Errorf("cannot delete a child (id = %d) from a normal socket", id) -} - -func (ns *normalSocket) triggerDelete() { - ns.cm.deleteEntry(ns.id) - ns.cm.findEntry(ns.pid).deleteChild(ns.id) -} - -func (ns *normalSocket) deleteSelfIfReady() { - logger.Errorf("cannot call deleteSelfIfReady on a normal socket") -} - -func (ns *normalSocket) getParentID() int64 { - return ns.pid -} - -// ServerMetric defines the info channelz provides for a specific Server, which -// includes ServerInternalMetric and channelz-specific data, such as channelz id, -// child list, etc. -type ServerMetric struct { - // ID is the channelz id of this server. - ID int64 - // RefName is the human readable reference string of this server. - RefName string - // ServerData contains server internal metric reported by the server through - // ChannelzMetric(). - ServerData *ServerInternalMetric - // ListenSockets tracks the listener socket type children of this server in the - // format of a map from socket channelz id to corresponding reference string. - ListenSockets map[int64]string -} - -// ServerInternalMetric defines the struct that the implementor of Server interface -// should return from ChannelzMetric(). -type ServerInternalMetric struct { - // The number of incoming calls started on the server. - CallsStarted int64 - // The number of incoming calls that have completed with an OK status. - CallsSucceeded int64 - // The number of incoming calls that have a completed with a non-OK status. - CallsFailed int64 - // The last time a call was started on the server. - LastCallStartedTimestamp time.Time -} - -// Server is the interface to be satisfied in order to be tracked by channelz as -// Server. -type Server interface { - ChannelzMetric() *ServerInternalMetric -} - -type server struct { - refName string - s Server - closeCalled bool - sockets map[int64]string - listenSockets map[int64]string - id int64 - cm *channelMap -} - -func (s *server) addChild(id int64, e entry) { - switch v := e.(type) { - case *normalSocket: - s.sockets[id] = v.refName - case *listenSocket: - s.listenSockets[id] = v.refName - default: - logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) - } -} - -func (s *server) deleteChild(id int64) { - delete(s.sockets, id) - delete(s.listenSockets, id) - s.deleteSelfIfReady() -} - -func (s *server) triggerDelete() { - s.closeCalled = true - s.deleteSelfIfReady() -} - -func (s *server) deleteSelfIfReady() { - if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { - return - } - s.cm.deleteEntry(s.id) -} - -func (s *server) getParentID() int64 { - return 0 -} - -type tracedChannel interface { - getChannelTrace() *channelTrace - incrTraceRefCount() - decrTraceRefCount() - getRefName() string -} - -type channelTrace struct { - cm *channelMap - clearCalled bool - createdTime time.Time - eventCount int64 - mu sync.Mutex - events []*TraceEvent -} - -func (c *channelTrace) append(e *TraceEvent) { - c.mu.Lock() - if len(c.events) == getMaxTraceEntry() { - del := c.events[0] - c.events = c.events[1:] - if del.RefID != 0 { - // start recursive cleanup in a goroutine to not block the call originated from grpc. - go func() { - // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func. - c.cm.mu.Lock() - c.cm.decrTraceRefCount(del.RefID) - c.cm.mu.Unlock() - }() - } - } - e.Timestamp = time.Now() - c.events = append(c.events, e) - c.eventCount++ - c.mu.Unlock() -} - -func (c *channelTrace) clear() { - if c.clearCalled { - return - } - c.clearCalled = true - c.mu.Lock() - for _, e := range c.events { - if e.RefID != 0 { - // caller should have already held the c.cm.mu lock. - c.cm.decrTraceRefCount(e.RefID) - } - } - c.mu.Unlock() -} - -// Severity is the severity level of a trace event. -// The canonical enumeration of all valid values is here: -// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126. -type Severity int - -const ( - // CtUnknown indicates unknown severity of a trace event. - CtUnknown Severity = iota - // CtInfo indicates info level severity of a trace event. - CtInfo - // CtWarning indicates warning level severity of a trace event. - CtWarning - // CtError indicates error level severity of a trace event. - CtError -) - -// RefChannelType is the type of the entity being referenced in a trace event. -type RefChannelType int - -const ( - // RefUnknown indicates an unknown entity type, the zero value for this type. - RefUnknown RefChannelType = iota - // RefChannel indicates the referenced entity is a Channel. - RefChannel - // RefSubChannel indicates the referenced entity is a SubChannel. - RefSubChannel - // RefServer indicates the referenced entity is a Server. - RefServer - // RefListenSocket indicates the referenced entity is a ListenSocket. - RefListenSocket - // RefNormalSocket indicates the referenced entity is a NormalSocket. - RefNormalSocket -) - -var refChannelTypeToString = map[RefChannelType]string{ - RefUnknown: "Unknown", - RefChannel: "Channel", - RefSubChannel: "SubChannel", - RefServer: "Server", - RefListenSocket: "ListenSocket", - RefNormalSocket: "NormalSocket", -} - -func (r RefChannelType) String() string { - return refChannelTypeToString[r] -} - -func (c *channelTrace) dumpData() *ChannelTrace { - c.mu.Lock() - ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} - ct.Events = c.events[:len(c.events)] - c.mu.Unlock() - return ct -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go deleted file mode 100644 index 98288c3f866..00000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -import ( - "syscall" -) - -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket any) *SocketOptionData { - c, ok := socket.(syscall.Conn) - if !ok { - return nil - } - data := &SocketOptionData{} - if rawConn, err := c.SyscallConn(); err == nil { - rawConn.Control(data.Getsockopt) - return data - } - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go deleted file mode 100644 index b5568b22e20..00000000000 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ /dev/null @@ -1,27 +0,0 @@ -//go:build !linux -// +build !linux - -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package channelz - -// GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c any) *SocketOptionData { - return nil -} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 6c7ea6a5336..48d24bdb4e6 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -190,12 +190,16 @@ var ( // function makes events more predictable than relying on timer events. TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error - // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton - // to invoke resource not found for a resource type name and resource name. + // TriggerXDSResourceNameNotFoundClient invokes the testing xDS Client + // singleton to invoke resource not found for a resource type name and + // resource name. TriggerXDSResourceNameNotFoundClient any // func(string, string) error // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) + + // UserSetDefaultScheme is set to true if the user has overridden the default resolver scheme. + UserSetDefaultScheme bool = false ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 52cfab1b93d..dbee7a60d78 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -24,9 +24,8 @@ import ( "encoding/json" "fmt" - protov1 "github.com/golang/protobuf/proto" "google.golang.org/protobuf/encoding/protojson" - protov2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/protoadapt" ) const jsonIndent = " " @@ -35,21 +34,14 @@ const jsonIndent = " " // // If marshal fails, it falls back to fmt.Sprintf("%+v"). func ToJSON(e any) string { - switch ee := e.(type) { - case protov1.Message: - mm := protojson.MarshalOptions{Indent: jsonIndent} - ret, err := mm.Marshal(protov1.MessageV2(ee)) - if err != nil { - // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 - // messages are not imported, and this will fail because the message - // is not found. - return fmt.Sprintf("%+v", ee) - } - return string(ret) - case protov2.Message: + if ee, ok := e.(protoadapt.MessageV1); ok { + e = protoadapt.MessageV2Of(ee) + } + + if ee, ok := e.(protoadapt.MessageV2); ok { mm := protojson.MarshalOptions{ - Multiline: true, Indent: jsonIndent, + Multiline: true, } ret, err := mm.Marshal(ee) if err != nil { @@ -59,13 +51,13 @@ func ToJSON(e any) string { return fmt.Sprintf("%+v", ee) } return string(ret) - default: - ret, err := json.MarshalIndent(ee, "", jsonIndent) - if err != nil { - return fmt.Sprintf("%+v", ee) - } - return string(ret) } + + ret, err := json.MarshalIndent(e, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", e) + } + return string(ret) } // FormatJSON formats the input json bytes with indentation. diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index b66dcb21327..abab35e250e 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -45,6 +45,13 @@ import ( // addresses from SRV records. Must not be changed after init time. var EnableSRVLookups = false +// ResolvingTimeout specifies the maximum duration for a DNS resolution request. +// If the timeout expires before a response is received, the request will be canceled. +// +// It is recommended to set this value at application startup. Avoid modifying this variable +// after initialization as it's not thread-safe for concurrent modification. +var ResolvingTimeout = 30 * time.Second + var logger = grpclog.Component("dns") func init() { @@ -221,18 +228,18 @@ func (d *dnsResolver) watcher() { } } -func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { +func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) { if !EnableSRVLookups { return nil, nil } var newAddrs []resolver.Address - _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) + _, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host) if err != nil { err = handleDNSError(err, "SRV") // may become nil return nil, err } for _, s := range srvs { - lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) + lbAddrs, err := d.resolver.LookupHost(ctx, s.Target) if err != nil { err = handleDNSError(err, "A") // may become nil if err == nil { @@ -269,8 +276,8 @@ func handleDNSError(err error, lookupType string) error { return err } -func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { - ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) +func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult { + ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host) if err != nil { if envconfig.TXTErrIgnore { return nil @@ -297,8 +304,8 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { return d.cc.ParseServiceConfig(sc) } -func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { - addrs, err := d.resolver.LookupHost(d.ctx, d.host) +func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) { + addrs, err := d.resolver.LookupHost(ctx, d.host) if err != nil { err = handleDNSError(err, "A") return nil, err @@ -316,8 +323,10 @@ func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { } func (d *dnsResolver) lookup() (*resolver.State, error) { - srv, srvErr := d.lookupSRV() - addrs, hostErr := d.lookupHost() + ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout) + defer cancel() + srv, srvErr := d.lookupSRV(ctx) + addrs, hostErr := d.lookupHost(ctx) if hostErr != nil && (srvErr != nil || len(srv) == 0) { return nil, hostErr } @@ -327,7 +336,7 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv}) } if !d.disableServiceConfig { - state.ServiceConfig = d.lookupTXT() + state.ServiceConfig = d.lookupTXT(ctx) } return &state, nil } diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index bd39ff9a229..4a3ddce29a4 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -51,14 +51,10 @@ import ( // inside an http.Handler, or writes an HTTP error to w and returns an error. // It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { - if r.ProtoMajor != 2 { - msg := "gRPC requires HTTP/2" - http.Error(w, msg, http.StatusBadRequest) - return nil, errors.New(msg) - } - if r.Method != "POST" { + if r.Method != http.MethodPost { + w.Header().Set("Allow", http.MethodPost) msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) - http.Error(w, msg, http.StatusBadRequest) + http.Error(w, msg, http.StatusMethodNotAllowed) return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") @@ -69,6 +65,11 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s http.Error(w, msg, http.StatusUnsupportedMediaType) return nil, errors.New(msg) } + if r.ProtoMajor != 2 { + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusHTTPVersionNotSupported) + return nil, errors.New(msg) + } if _, ok := w.(http.Flusher); !ok { msg := "gRPC requires a ResponseWriter supporting http.Flusher" http.Error(w, msg, http.StatusInternalServerError) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index eff8799640c..deba0c4d9ef 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -140,9 +140,7 @@ type http2Client struct { // variable. kpDormant bool - // Fields below are for channelz metric collection. - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Socket onClose func(GoAwayReason) @@ -319,6 +317,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.MaxHeaderListSize != nil { maxHeaderListSize = *opts.MaxHeaderListSize } + t := &http2Client{ ctx: ctx, ctxDone: ctx.Done(), // Cache Done chan. @@ -346,11 +345,25 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), - czData: new(channelzData), keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), onClose: onClose, } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: opts.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }) t.logger = prefixLoggerForClientTransport(t) // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -381,10 +394,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } sh.HandleConn(t.ctx, connBegin) } - t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) - if err != nil { - return nil, err - } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() @@ -756,8 +765,8 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return ErrConnClosing } if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano()) } // If the keepalive goroutine has gone dormant, wake it up. if t.kpDormant { @@ -928,9 +937,9 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. t.mu.Unlock() if channelz.IsOn() { if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.channelz.SocketMetrics.StreamsFailed.Add(1) } } }, @@ -985,7 +994,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - channelz.RemoveEntry(t.channelzID) + channelz.RemoveEntry(t.channelz.ID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() @@ -1708,7 +1717,7 @@ func (t *http2Client) keepalive() { // keepalive timer expired. In both cases, we need to send a ping. if !outstandingPing { if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) } t.controlBuf.put(p) timeoutLeft = t.kp.Timeout @@ -1738,40 +1747,23 @@ func (t *http2Client) GoAway() <-chan struct{} { return t.goAway } -func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, - // RemoteName : - } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s +func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } } func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr } func (t *http2Client) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano()) } func (t *http2Client) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3839c1ade27..d582e047109 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -118,8 +118,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Socket bufferPool *bufferPool connectionID uint64 @@ -262,9 +261,24 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, idle: time.Now(), kep: kep, initialWindowSize: iwz, - czData: new(channelzData), bufferPool: newBufferPool(), } + var czSecurity credentials.ChannelzSecurityValue + if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok { + czSecurity = au.GetSecurityValue() + } + t.channelz = channelz.RegisterSocket( + &channelz.Socket{ + SocketType: channelz.SocketTypeNormal, + Parent: config.ChannelzParent, + SocketMetrics: channelz.SocketMetrics{}, + EphemeralMetrics: t.socketMetrics, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, + SocketOptions: channelz.GetSocketOption(t.conn), + Security: czSecurity, + }, + ) t.logger = prefixLoggerForServerTransport(t) t.controlBuf = newControlBuffer(t.done) @@ -274,10 +288,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) - if err != nil { - return nil, err - } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) t.framer.writer.Flush() @@ -334,9 +344,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // closed, would lead to a TCP RST instead of FIN, and the client // encountering errors. For more info: // https://github.com/grpc/grpc-go/issues/5358 + timer := time.NewTimer(time.Second) + defer timer.Stop() select { case <-t.readerDone: - case <-time.After(time.Second): + case <-timer.C: } t.conn.Close() } @@ -592,8 +604,8 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade } t.mu.Unlock() if channelz.IsOn() { - atomic.AddInt64(&t.czData.streamsStarted, 1) - atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.StreamsStarted.Add(1) + t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano()) } s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) @@ -658,8 +670,14 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { switch frame := frame.(type) { case *http2.MetaHeadersFrame: if err := t.operateHeaders(ctx, frame, handle); err != nil { - t.Close(err) - break + // Any error processing client headers, e.g. invalid stream ID, + // is considered a protocol violation. + t.controlBuf.put(&goAway{ + code: http2.ErrCodeProtocol, + debugData: []byte(err.Error()), + closeConn: err, + }) + continue } case *http2.DataFrame: t.handleData(frame) @@ -1195,7 +1213,7 @@ func (t *http2Server) keepalive() { } if !outstandingPing { if channelz.IsOn() { - atomic.AddInt64(&t.czData.kpCount, 1) + t.channelz.SocketMetrics.KeepAlivesSent.Add(1) } t.controlBuf.put(p) kpTimeoutLeft = t.kp.Timeout @@ -1235,7 +1253,7 @@ func (t *http2Server) Close(err error) { if err := t.conn.Close(); err != nil && t.logger.V(logLevel) { t.logger.Infof("Error closing underlying net.Conn during Close: %v", err) } - channelz.RemoveEntry(t.channelzID) + channelz.RemoveEntry(t.channelz.ID) // Cancel all active streams. for _, s := range streams { s.cancel() @@ -1256,9 +1274,9 @@ func (t *http2Server) deleteStream(s *Stream, eosReceived bool) { if channelz.IsOn() { if eosReceived { - atomic.AddInt64(&t.czData.streamsSucceeded, 1) + t.channelz.SocketMetrics.StreamsSucceeded.Add(1) } else { - atomic.AddInt64(&t.czData.streamsFailed, 1) + t.channelz.SocketMetrics.StreamsFailed.Add(1) } } } @@ -1375,38 +1393,21 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { return false, nil } -func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { - s := channelz.SocketInternalMetric{ - StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted), - StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded), - StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed), - MessagesSent: atomic.LoadInt64(&t.czData.msgSent), - MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv), - KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount), - LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)), - LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)), - LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), - LocalFlowControlWindow: int64(t.fc.getSize()), - SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.peer.LocalAddr, - RemoteAddr: t.peer.Addr, - // RemoteName : - } - if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { - s.Security = au.GetSecurityValue() - } - s.RemoteFlowControlWindow = t.getOutFlowWindow() - return &s +func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics { + return &channelz.EphemeralSocketMetrics{ + LocalFlowControlWindow: int64(t.fc.getSize()), + RemoteFlowControlWindow: t.getOutFlowWindow(), + } } func (t *http2Server) IncrMsgSent() { - atomic.AddInt64(&t.czData.msgSent, 1) - atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesSent.Add(1) + t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1) } func (t *http2Server) IncrMsgRecv() { - atomic.AddInt64(&t.czData.msgRecv, 1) - atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano()) + t.channelz.SocketMetrics.MessagesReceived.Add(1) + t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1) } func (t *http2Server) getOutFlowWindow() int64 { diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index dc29d590e91..39cef3bd442 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -418,10 +418,9 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu return f } -func getWriteBufferPool(writeBufferSize int) *sync.Pool { +func getWriteBufferPool(size int) *sync.Pool { writeBufferMutex.Lock() defer writeBufferMutex.Unlock() - size := writeBufferSize * 2 pool, ok := writeBufferPoolMap[size] if ok { return pool diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index d3796c256e2..0d2a6e47f67 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -571,7 +571,7 @@ type ServerConfig struct { WriteBufferSize int ReadBufferSize int SharedWriteBuffer bool - ChannelzParentID *channelz.Identifier + ChannelzParent *channelz.Server MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -606,8 +606,8 @@ type ConnectOptions struct { ReadBufferSize int // SharedWriteBuffer indicates whether connections should reuse write buffer SharedWriteBuffer bool - // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID *channelz.Identifier + // ChannelzParent sets the addrConn id which initiated the creation of this client transport. + ChannelzParent *channelz.SubChannel // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -820,30 +820,6 @@ const ( GoAwayTooManyPings GoAwayReason = 2 ) -// channelzData is used to store channelz related data for http2Client and http2Server. -// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - kpCount int64 - // The number of streams that have started, including already finished ones. - streamsStarted int64 - // Client side: The number of streams that have ended successfully by receiving - // EoS bit set frame from server. - // Server side: The number of streams that have ended successfully by sending - // frame with EoS bit set. - streamsSucceeded int64 - streamsFailed int64 - // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type - // instead of time.Time since it's more costly to atomically update time.Time variable than int64 - // variable. The same goes for lastMsgSentTime and lastMsgRecvTime. - lastStreamCreatedTime int64 - msgSent int64 - msgRecv int64 - lastMsgSentTime int64 - lastMsgRecvTime int64 -} - // ContextErr converts the error from context package into a status error. func ContextErr(err error) error { switch err { diff --git a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go b/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go deleted file mode 100644 index e8b492774d1..00000000000 --- a/vendor/google.golang.org/grpc/internal/xds_handshake_cluster.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package internal - -import ( - "google.golang.org/grpc/attributes" - "google.golang.org/grpc/resolver" -) - -// handshakeClusterNameKey is the type used as the key to store cluster name in -// the Attributes field of resolver.Address. -type handshakeClusterNameKey struct{} - -// SetXDSHandshakeClusterName returns a copy of addr in which the Attributes field -// is updated with the cluster name. -func SetXDSHandshakeClusterName(addr resolver.Address, clusterName string) resolver.Address { - addr.Attributes = addr.Attributes.WithValue(handshakeClusterNameKey{}, clusterName) - return addr -} - -// GetXDSHandshakeClusterName returns cluster name stored in attr. -func GetXDSHandshakeClusterName(attr *attributes.Attributes) (string, bool) { - v := attr.Value(handshakeClusterNameKey{}) - name, ok := v.(string) - return name, ok -} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 5128f9364dd..e3ea42ba962 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -38,19 +38,15 @@ const ( logPrefix = "[pick-first-lb %p] " ) -func newPickfirstBuilder() balancer.Builder { - return &pickfirstBuilder{} -} - type pickfirstBuilder struct{} -func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { +func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { b := &pickfirstBalancer{cc: cc} b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) return b } -func (*pickfirstBuilder) Name() string { +func (pickfirstBuilder) Name() string { return PickFirstBalancerName } @@ -63,7 +59,7 @@ type pfConfig struct { ShuffleAddressList bool `json:"shuffleAddressList"` } -func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { +func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { var cfg pfConfig if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) @@ -243,7 +239,3 @@ func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } - -func init() { - balancer.Register(newPickfirstBuilder()) -} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go index 14aa6f20ae0..b54a3a3225d 100644 --- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -24,10 +24,28 @@ package dns import ( + "time" + "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" ) +// SetResolvingTimeout sets the maximum duration for DNS resolution requests. +// +// This function affects the global timeout used by all channels using the DNS +// name resolver scheme. +// +// It must be called only at application startup, before any gRPC calls are +// made. Modifying this value after initialization is not thread-safe. +// +// The default value is 30 seconds. Setting the timeout too low may result in +// premature timeouts during resolution, while setting it too high may lead to +// unnecessary delays in service discovery. Choose a value appropriate for your +// specific needs and network environment. +func SetResolvingTimeout(timeout time.Duration) { + dns.ResolvingTimeout = timeout +} + // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. // // Deprecated: import grpc and use resolver.Get("dns") instead. diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index d72f21c1b32..202854511b8 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -29,6 +29,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/serviceconfig" ) @@ -63,16 +64,18 @@ func Get(scheme string) Builder { } // SetDefaultScheme sets the default scheme that will be used. The default -// default scheme is "passthrough". +// scheme is initially set to "passthrough". // // NOTE: this function must only be called during initialization time (i.e. in // an init() function), and is not thread-safe. The scheme set last overrides // previously set values. func SetDefaultScheme(scheme string) { defaultScheme = scheme + internal.UserSetDefaultScheme = true } -// GetDefaultScheme gets the default scheme that will be used. +// GetDefaultScheme gets the default scheme that will be used by grpc.Dial. If +// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead. func GetDefaultScheme() string { return defaultScheme } @@ -284,9 +287,9 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } -// String returns a string representation of Target. +// String returns the canonical string representation of Target. func (t Target) String() string { - return t.URL.String() + return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint() } // Builder creates a resolver that will be used to watch name resolution updates. diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go index f845ac95893..9dcc9780f89 100644 --- a/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -97,7 +97,7 @@ func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { // finished shutting down, the channel should block on ccr.serializer.Done() // without cc.mu held. func (ccr *ccResolverWrapper) close() { - channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver") ccr.mu.Lock() ccr.closed = true ccr.mu.Unlock() @@ -147,7 +147,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { return } ccr.mu.Unlock() - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err) ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) } @@ -194,5 +194,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 82493d237bc..998e251ddc4 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -962,19 +962,6 @@ func setCallInfoCodec(c *callInfo) error { return nil } -// channelzData is used to store channelz related data for ClientConn, addrConn and Server. -// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic -// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment. -// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment. -type channelzData struct { - callsStarted int64 - callsFailed int64 - callsSucceeded int64 - // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of - // time.Time since it's more costly to atomically update time.Time variable than int64 variable. - lastCallStartedTime int64 -} - // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest // support package version is 7. diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index a6a11704b34..fd4558daa52 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -137,8 +137,7 @@ type Server struct { serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop handlersWG sync.WaitGroup // counts active method handler goroutines - channelzID *channelz.Identifier - czData *channelzData + channelz *channelz.Server serverWorkerChannel chan func() serverWorkerChannelClose func() @@ -249,11 +248,9 @@ func SharedWriteBuffer(val bool) ServerOption { } // WriteBufferSize determines how much data can be batched before doing a write -// on the wire. The corresponding memory allocation for this buffer will be -// twice the size to keep syscalls low. The default value for this buffer is -// 32KB. Zero or negative values will disable the write buffer such that each -// write will be on underlying connection. -// Note: A Send call may not directly translate to a write. +// on the wire. The default value for this buffer is 32KB. Zero or negative +// values will disable the write buffer such that each write will be on underlying +// connection. Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.writeBufferSize = s @@ -661,7 +658,7 @@ func NewServer(opt ...ServerOption) *Server { services: make(map[string]*serviceInfo), quit: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - czData: new(channelzData), + channelz: channelz.RegisterServer(""), } chainUnaryServerInterceptors(s) chainStreamServerInterceptors(s) @@ -675,8 +672,7 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - channelz.Info(logger, s.channelzID, "Server created") + channelz.Info(logger, s.channelz, "Server created") return s } @@ -802,20 +798,13 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID *channelz.Identifier -} - -func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { - return &channelz.SocketInternalMetric{ - SocketOptions: channelz.GetSocketOption(l.Listener), - LocalAddr: l.Listener.Addr(), - } + channelz *channelz.Socket } func (l *listenSocket) Close() error { err := l.Listener.Close() - channelz.RemoveEntry(l.channelzID) - channelz.Info(logger, l.channelzID, "ListenSocket deleted") + channelz.RemoveEntry(l.channelz.ID) + channelz.Info(logger, l.channelz, "ListenSocket deleted") return err } @@ -857,7 +846,16 @@ func (s *Server) Serve(lis net.Listener) error { } }() - ls := &listenSocket{Listener: lis} + ls := &listenSocket{ + Listener: lis, + channelz: channelz.RegisterSocket(&channelz.Socket{ + SocketType: channelz.SocketTypeListen, + Parent: s.channelz, + RefName: lis.Addr().String(), + LocalAddr: lis.Addr(), + SocketOptions: channelz.GetSocketOption(lis)}, + ), + } s.lis[ls] = true defer func() { @@ -869,14 +867,8 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var err error - ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - if err != nil { - s.mu.Unlock() - return err - } s.mu.Unlock() - channelz.Info(logger, ls.channelzID, "ListenSocket created") + channelz.Info(logger, ls.channelz, "ListenSocket created") var tempDelay time.Duration // how long to sleep on accept failure for { @@ -975,7 +967,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, SharedWriteBuffer: s.opts.sharedWriteBuffer, - ChannelzParentID: s.channelzID, + ChannelzParent: s.channelz, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, } @@ -989,7 +981,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { if err != credentials.ErrConnDispatched { // Don't log on ErrConnDispatched and io.EOF to prevent log spam. if err != io.EOF { - channelz.Info(logger, s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err) } c.Close() } @@ -1121,37 +1113,28 @@ func (s *Server) removeConn(addr string, st transport.ServerTransport) { } } -func (s *Server) channelzMetric() *channelz.ServerInternalMetric { - return &channelz.ServerInternalMetric{ - CallsStarted: atomic.LoadInt64(&s.czData.callsStarted), - CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded), - CallsFailed: atomic.LoadInt64(&s.czData.callsFailed), - LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)), - } -} - func (s *Server) incrCallsStarted() { - atomic.AddInt64(&s.czData.callsStarted, 1) - atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano()) + s.channelz.ServerMetrics.CallsStarted.Add(1) + s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano()) } func (s *Server) incrCallsSucceeded() { - atomic.AddInt64(&s.czData.callsSucceeded, 1) + s.channelz.ServerMetrics.CallsSucceeded.Add(1) } func (s *Server) incrCallsFailed() { - atomic.AddInt64(&s.czData.callsFailed, 1) + s.channelz.ServerMetrics.CallsFailed.Add(1) } func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) + channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - channelz.Error(logger, s.channelzID, "grpc: server failed to compress response: ", err) + channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -1346,7 +1329,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } @@ -1397,7 +1380,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if len(binlogs) != 0 { if h, _ := stream.Header(); h.Len() > 0 { @@ -1437,7 +1420,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor } if sts, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, sts); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1765,7 +1748,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() } - channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) } if ti != nil { ti.tr.Finish() @@ -1822,7 +1805,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ti.tr.SetError() } - channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err) } if ti != nil { ti.tr.Finish() @@ -1894,8 +1877,7 @@ func (s *Server) stop(graceful bool) { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) }) s.mu.Lock() s.closeListenersLocked() // Wait for serving threads to be ready to exit. Only then can we be sure no @@ -2150,14 +2132,6 @@ func Method(ctx context.Context) (string, bool) { return s.Method(), true } -type channelzServer struct { - s *Server -} - -func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric { - return c.s.channelzMetric() -} - // validateSendCompressor returns an error when given compressor name cannot be // handled by the server or the client based on the advertised compressors. func validateSendCompressor(name string, clientCompressors []string) error { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 0df11fc0988..2b35c5d2130 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -25,8 +25,10 @@ import ( "reflect" "time" + "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/balancer/gracefulswitch" internalserviceconfig "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/serviceconfig" ) @@ -41,11 +43,6 @@ const maxInt = int(^uint(0) >> 1) // https://github.com/grpc/grpc/blob/master/doc/service_config.md type MethodConfig = internalserviceconfig.MethodConfig -type lbConfig struct { - name string - cfg serviceconfig.LoadBalancingConfig -} - // ServiceConfig is provided by the service provider and contains parameters for how // clients that connect to the service should behave. // @@ -55,14 +52,9 @@ type lbConfig struct { type ServiceConfig struct { serviceconfig.Config - // LB is the load balancer the service providers recommends. This is - // deprecated; lbConfigs is preferred. If lbConfig and LB are both present, - // lbConfig will be used. - LB *string - // lbConfig is the service config's load balancing configuration. If // lbConfig and LB are both present, lbConfig will be used. - lbConfig *lbConfig + lbConfig serviceconfig.LoadBalancingConfig // Methods contains a map for the methods in this service. If there is an // exact match for a method (i.e. /service/method) in the map, use the @@ -164,7 +156,7 @@ type jsonMC struct { // TODO(lyuxuan): delete this struct after cleaning up old service config implementation. type jsonSC struct { LoadBalancingPolicy *string - LoadBalancingConfig *internalserviceconfig.BalancerConfig + LoadBalancingConfig *json.RawMessage MethodConfig *[]jsonMC RetryThrottling *retryThrottlingPolicy HealthCheckConfig *healthCheckConfig @@ -184,18 +176,33 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ - LB: rsc.LoadBalancingPolicy, Methods: make(map[string]MethodConfig), retryThrottling: rsc.RetryThrottling, healthCheckConfig: rsc.HealthCheckConfig, rawJSONString: js, } - if c := rsc.LoadBalancingConfig; c != nil { - sc.lbConfig = &lbConfig{ - name: c.Name, - cfg: c.Config, + c := rsc.LoadBalancingConfig + if c == nil { + name := PickFirstBalancerName + if rsc.LoadBalancingPolicy != nil { + name = *rsc.LoadBalancingPolicy + } + if balancer.Get(name) == nil { + name = PickFirstBalancerName } + cfg := []map[string]any{{name: struct{}{}}} + strCfg, err := json.Marshal(cfg) + if err != nil { + return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)} + } + r := json.RawMessage(strCfg) + c = &r + } + cfg, err := gracefulswitch.ParseConfig(*c) + if err != nil { + return &serviceconfig.ParseResult{Err: err} } + sc.lbConfig = cfg if rsc.MethodConfig == nil { return &serviceconfig.ParseResult{Config: &sc} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 814e998354a..d939ffc6348 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -655,13 +655,13 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - channelz.Infof(logger, cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return false, err } hasPushback = true } else if len(sps) > 1 { - channelz.Warningf(logger, cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return false, err } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 46ad8113ff4..eaf5dbceacf 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.62.1" +const Version = "1.63.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 7a33c215b58..7e6b92e491a 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -83,6 +83,10 @@ git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Exampl # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" +# - Do not use time.After except in tests. It has the potential to leak the +# timer since there is no way to stop it early. +git grep -l 'time.After(' -- "*.go" | not grep -v '_test.go\|test_utils\|testutils' + # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' @@ -172,6 +176,7 @@ UpdateAddresses is deprecated: UpdateSubConnState is deprecated: balancer.ErrTransientFailure is deprecated: grpc/reflection/v1alpha/reflection.proto +SwitchTo is deprecated: XXXXX xDS deprecated fields we support .ExactMatch .PrefixMatch diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh index 9676fac313b..415b0b67c64 100644 --- a/vendor/k8s.io/code-generator/generate-internal-groups.sh +++ b/vendor/k8s.io/code-generator/generate-internal-groups.sh @@ -55,6 +55,12 @@ echo "WARNING: $(basename "$0") is deprecated." echo "WARNING: Please use k8s.io/code-generator/kube_codegen.sh instead." echo +# If verification only is requested, avoid deleting files +verify_only="" +for ((i = 1; i <= $#; i++)); do + if [ "${!i}" = --verify-only ]; then verify_only=1; fi +done + if [ "${GENS}" = "all" ] || grep -qw "all" <<<"${GENS}"; then ALL="client,conversion,deepcopy,defaulter,informer,lister,openapi" echo "WARNING: Specifying \"all\" as a generator is deprecated." @@ -124,12 +130,14 @@ CLIENTSET_PKG="${CLIENTSET_PKG_NAME:-clientset}" CLIENTSET_NAME="${CLIENTSET_NAME_VERSIONED:-versioned}" if grep -qw "deepcopy" <<<"${GENS}"; then - # Nuke existing files - for dir in $(GO111MODULE=on go list -f '{{.Dir}}' "${ALL_FQ_APIS[@]}"); do - pushd "${dir}" >/dev/null - git_find -z ':(glob)**'/zz_generated.deepcopy.go | xargs -0 rm -f - popd >/dev/null - done + if [ ! "$verify_only" ]; then + # Nuke existing files + for dir in $(GO111MODULE=on go list -f '{{.Dir}}' "${ALL_FQ_APIS[@]}"); do + pushd "${dir}" >/dev/null + git_find -z ':(glob)**'/zz_generated.deepcopy.go | xargs -0 rm -f + popd >/dev/null + done + fi echo "Generating deepcopy funcs" "${gobin}/deepcopy-gen" \ @@ -139,12 +147,14 @@ if grep -qw "deepcopy" <<<"${GENS}"; then fi if grep -qw "defaulter" <<<"${GENS}"; then - # Nuke existing files - for dir in $(GO111MODULE=on go list -f '{{.Dir}}' "${ALL_FQ_APIS[@]}"); do - pushd "${dir}" >/dev/null - git_find -z ':(glob)**'/zz_generated.defaults.go | xargs -0 rm -f - popd >/dev/null - done + if [ ! "$verify_only" ]; then + # Nuke existing files + for dir in $(GO111MODULE=on go list -f '{{.Dir}}' "${ALL_FQ_APIS[@]}"); do + pushd "${dir}" >/dev/null + git_find -z ':(glob)**'/zz_generated.defaults.go | xargs -0 rm -f + popd >/dev/null + done + fi echo "Generating defaulters" "${gobin}/defaulter-gen" \ @@ -154,12 +164,14 @@ if grep -qw "defaulter" <<<"${GENS}"; then fi if grep -qw "conversion" <<<"${GENS}"; then - # Nuke existing files - for dir in $(GO111MODULE=on go list -f '{{.Dir}}' "${ALL_FQ_APIS[@]}"); do - pushd "${dir}" >/dev/null - git_find -z ':(glob)**'/zz_generated.conversion.go | xargs -0 rm -f - popd >/dev/null - done + if [ ! "$verify_only" ]; then + # Nuke existing files + for dir in $(GO111MODULE=on go list -f '{{.Dir}}' "${ALL_FQ_APIS[@]}"); do + pushd "${dir}" >/dev/null + git_find -z ':(glob)**'/zz_generated.conversion.go | xargs -0 rm -f + popd >/dev/null + done + fi echo "Generating conversions" "${gobin}/conversion-gen" \ @@ -171,15 +183,17 @@ fi if grep -qw "applyconfiguration" <<<"${GENS}"; then APPLY_CONFIGURATION_PACKAGE="${OUTPUT_PKG}/${APPLYCONFIGURATION_PKG_NAME:-applyconfiguration}" - # Nuke existing files - root="$(GO111MODULE=on go list -f '{{.Dir}}' "${APPLY_CONFIGURATION_PACKAGE}" 2>/dev/null || true)" - if [ -n "${root}" ]; then - pushd "${root}" >/dev/null - git_grep -l --null \ - -e '^// Code generated by applyconfiguration-gen. DO NOT EDIT.$' \ - ':(glob)**/*.go' \ - | xargs -0 rm -f - popd >/dev/null + if [ ! "$verify_only" ]; then + # Nuke existing files + root="$(GO111MODULE=on go list -f '{{.Dir}}' "${APPLY_CONFIGURATION_PACKAGE}" 2>/dev/null || true)" + if [ -n "${root}" ]; then + pushd "${root}" >/dev/null + git_grep -l --null \ + -e '^// Code generated by applyconfiguration-gen. DO NOT EDIT.$' \ + ':(glob)**/*.go' \ + | xargs -0 rm -f + popd >/dev/null + fi fi echo "Generating apply configuration for ${GROUPS_WITH_VERSIONS} at ${APPLY_CONFIGURATION_PACKAGE}" @@ -190,15 +204,17 @@ if grep -qw "applyconfiguration" <<<"${GENS}"; then fi if grep -qw "client" <<<"${GENS}"; then - # Nuke existing files - root="$(GO111MODULE=on go list -f '{{.Dir}}' "${OUTPUT_PKG}/${CLIENTSET_PKG}/${CLIENTSET_NAME}" 2>/dev/null || true)" - if [ -n "${root}" ]; then - pushd "${root}" >/dev/null - git_grep -l --null \ - -e '^// Code generated by client-gen. DO NOT EDIT.$' \ - ':(glob)**/*.go' \ - | xargs -0 rm -f - popd >/dev/null + if [ ! "$verify_only" ]; then + # Nuke existing files + root="$(GO111MODULE=on go list -f '{{.Dir}}' "${OUTPUT_PKG}/${CLIENTSET_PKG}/${CLIENTSET_NAME}" 2>/dev/null || true)" + if [ -n "${root}" ]; then + pushd "${root}" >/dev/null + git_grep -l --null \ + -e '^// Code generated by client-gen. DO NOT EDIT.$' \ + ':(glob)**/*.go' \ + | xargs -0 rm -f + popd >/dev/null + fi fi echo "Generating clientset for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/${CLIENTSET_PKG}" @@ -212,18 +228,20 @@ if grep -qw "client" <<<"${GENS}"; then fi if grep -qw "lister" <<<"${GENS}"; then - # Nuke existing files - for gv in "${GROUP_VERSIONS[@]}"; do - root="$(GO111MODULE=on go list -f '{{.Dir}}' "${OUTPUT_PKG}/listers/${gv}" 2>/dev/null || true)" - if [ -n "${root}" ]; then - pushd "${root}" >/dev/null - git_grep -l --null \ - -e '^// Code generated by lister-gen. DO NOT EDIT.$' \ - ':(glob)**/*.go' \ - | xargs -0 rm -f - popd >/dev/null - fi - done + if [ ! "$verify_only" ]; then + # Nuke existing files + for gv in "${GROUP_VERSIONS[@]}"; do + root="$(GO111MODULE=on go list -f '{{.Dir}}' "${OUTPUT_PKG}/listers/${gv}" 2>/dev/null || true)" + if [ -n "${root}" ]; then + pushd "${root}" >/dev/null + git_grep -l --null \ + -e '^// Code generated by lister-gen. DO NOT EDIT.$' \ + ':(glob)**/*.go' \ + | xargs -0 rm -f + popd >/dev/null + fi + done + fi echo "Generating listers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/listers" "${gobin}/lister-gen" \ @@ -233,15 +251,17 @@ if grep -qw "lister" <<<"${GENS}"; then fi if grep -qw "informer" <<<"${GENS}"; then - # Nuke existing files - root="$(GO111MODULE=on go list -f '{{.Dir}}' "${OUTPUT_PKG}/informers/externalversions" 2>/dev/null || true)" - if [ -n "${root}" ]; then - pushd "${root}" >/dev/null - git_grep -l --null \ - -e '^// Code generated by informer-gen. DO NOT EDIT.$' \ - ':(glob)**/*.go' \ - | xargs -0 rm -f - popd >/dev/null + if [ ! "$verify_only" ]; then + # Nuke existing files + root="$(GO111MODULE=on go list -f '{{.Dir}}' "${OUTPUT_PKG}/informers/externalversions" 2>/dev/null || true)" + if [ -n "${root}" ]; then + pushd "${root}" >/dev/null + git_grep -l --null \ + -e '^// Code generated by informer-gen. DO NOT EDIT.$' \ + ':(glob)**/*.go' \ + | xargs -0 rm -f + popd >/dev/null + fi fi echo "Generating informers for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/informers" @@ -254,12 +274,14 @@ if grep -qw "informer" <<<"${GENS}"; then fi if grep -qw "openapi" <<<"${GENS}"; then - # Nuke existing files - for dir in $(GO111MODULE=on go list -f '{{.Dir}}' "${FQ_APIS[@]}"); do - pushd "${dir}" >/dev/null - git_find -z ':(glob)**'/zz_generated.openapi.go | xargs -0 rm -f - popd >/dev/null - done + if [ ! "$verify_only" ]; then + # Nuke existing files + for dir in $(GO111MODULE=on go list -f '{{.Dir}}' "${FQ_APIS[@]}"); do + pushd "${dir}" >/dev/null + git_find -z ':(glob)**'/zz_generated.openapi.go | xargs -0 rm -f + popd >/dev/null + done + fi echo "Generating OpenAPI definitions for ${GROUPS_WITH_VERSIONS} at ${OUTPUT_PKG}/openapi" declare -a OPENAPI_EXTRA_PACKAGES diff --git a/vendor/modules.txt b/vendor/modules.txt index 15ee27f64a8..90567dee2ae 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go/compute v1.25.0 +# cloud.google.com/go/compute v1.25.1 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 @@ -7,10 +7,31 @@ cloud.google.com/go/compute/metadata # dario.cat/mergo v1.0.0 ## explicit; go 1.13 dario.cat/mergo +# github.com/Azure/azure-pipeline-go v0.2.3 +## explicit; go 1.14 +github.com/Azure/azure-pipeline-go/pipeline +# github.com/Azure/azure-storage-blob-go v0.15.0 +## explicit; go 1.15 +github.com/Azure/azure-storage-blob-go/azblob # github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm +# github.com/Azure/go-autorest v14.2.0+incompatible +## explicit +github.com/Azure/go-autorest +# github.com/Azure/go-autorest/autorest/adal v0.9.23 +## explicit; go 1.15 +github.com/Azure/go-autorest/autorest/adal +# github.com/Azure/go-autorest/autorest/date v0.3.0 +## explicit; go 1.12 +github.com/Azure/go-autorest/autorest/date +# github.com/Azure/go-autorest/logger v0.2.1 +## explicit; go 1.12 +github.com/Azure/go-autorest/logger +# github.com/Azure/go-autorest/tracing v0.6.0 +## explicit; go 1.12 +github.com/Azure/go-autorest/tracing # github.com/MakeNowJust/heredoc v1.0.0 ## explicit; go 1.12 github.com/MakeNowJust/heredoc @@ -41,12 +62,72 @@ github.com/ProtonMail/go-crypto/openpgp/internal/ecc github.com/ProtonMail/go-crypto/openpgp/internal/encoding github.com/ProtonMail/go-crypto/openpgp/packet github.com/ProtonMail/go-crypto/openpgp/s2k +# github.com/Unknwon/goconfig v1.0.0 +## explicit +github.com/Unknwon/goconfig +# github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3 +## explicit; go 1.13 +github.com/aalpar/deheap +# github.com/abbot/go-http-auth v0.4.0 +## explicit +github.com/abbot/go-http-auth # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.50.14 +# github.com/aws/aws-sdk-go v1.51.14 ## explicit; go 1.19 -# github.com/aws/aws-sdk-go-v2 v1.25.2 +github.com/aws/aws-sdk-go/aws +github.com/aws/aws-sdk-go/aws/arn +github.com/aws/aws-sdk-go/aws/auth/bearer +github.com/aws/aws-sdk-go/aws/awserr +github.com/aws/aws-sdk-go/aws/awsutil +github.com/aws/aws-sdk-go/aws/client +github.com/aws/aws-sdk-go/aws/client/metadata +github.com/aws/aws-sdk-go/aws/corehandlers +github.com/aws/aws-sdk-go/aws/credentials +github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds +github.com/aws/aws-sdk-go/aws/credentials/endpointcreds +github.com/aws/aws-sdk-go/aws/credentials/processcreds +github.com/aws/aws-sdk-go/aws/credentials/ssocreds +github.com/aws/aws-sdk-go/aws/credentials/stscreds +github.com/aws/aws-sdk-go/aws/csm +github.com/aws/aws-sdk-go/aws/defaults +github.com/aws/aws-sdk-go/aws/ec2metadata +github.com/aws/aws-sdk-go/aws/endpoints +github.com/aws/aws-sdk-go/aws/request +github.com/aws/aws-sdk-go/aws/session +github.com/aws/aws-sdk-go/aws/signer/v4 +github.com/aws/aws-sdk-go/internal/context +github.com/aws/aws-sdk-go/internal/ini +github.com/aws/aws-sdk-go/internal/s3shared +github.com/aws/aws-sdk-go/internal/s3shared/arn +github.com/aws/aws-sdk-go/internal/s3shared/s3err +github.com/aws/aws-sdk-go/internal/sdkio +github.com/aws/aws-sdk-go/internal/sdkmath +github.com/aws/aws-sdk-go/internal/sdkrand +github.com/aws/aws-sdk-go/internal/sdkuri +github.com/aws/aws-sdk-go/internal/shareddefaults +github.com/aws/aws-sdk-go/internal/strings +github.com/aws/aws-sdk-go/internal/sync/singleflight +github.com/aws/aws-sdk-go/private/checksum +github.com/aws/aws-sdk-go/private/protocol +github.com/aws/aws-sdk-go/private/protocol/eventstream +github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi +github.com/aws/aws-sdk-go/private/protocol/json/jsonutil +github.com/aws/aws-sdk-go/private/protocol/jsonrpc +github.com/aws/aws-sdk-go/private/protocol/query +github.com/aws/aws-sdk-go/private/protocol/query/queryutil +github.com/aws/aws-sdk-go/private/protocol/rest +github.com/aws/aws-sdk-go/private/protocol/restjson +github.com/aws/aws-sdk-go/private/protocol/restxml +github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil +github.com/aws/aws-sdk-go/service/s3 +github.com/aws/aws-sdk-go/service/sso +github.com/aws/aws-sdk-go/service/sso/ssoiface +github.com/aws/aws-sdk-go/service/ssooidc +github.com/aws/aws-sdk-go/service/sts +github.com/aws/aws-sdk-go/service/sts/stsiface +# github.com/aws/aws-sdk-go-v2 v1.26.1 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/defaults @@ -72,10 +153,10 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/config v1.27.6 +# github.com/aws/aws-sdk-go-v2/config v1.27.10 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.17.6 +# github.com/aws/aws-sdk-go-v2/credentials v1.17.10 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -84,56 +165,56 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.8 +# github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue v1.13.13 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.2 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.1 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.2 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.5 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.2 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.5 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 # github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/internal/ini -# github.com/aws/aws-sdk-go-v2/service/dynamodb v1.30.3 +# github.com/aws/aws-sdk-go-v2/service/dynamodb v1.31.1 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/dynamodb github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints github.com/aws/aws-sdk-go-v2/service/dynamodb/types -# github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.1 +# github.com/aws/aws-sdk-go-v2/service/dynamodbstreams v1.20.4 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/dynamodbstreams/types -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.1 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.3 +# github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.6 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.4 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.7 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/sso v1.20.1 +# github.com/aws/aws-sdk-go-v2/service/sso v1.20.4 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.1 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.23.4 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.28.3 +# github.com/aws/aws-sdk-go-v2/service/sts v1.28.6 ## explicit; go 1.20 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.20.1 +# github.com/aws/smithy-go v1.20.2 ## explicit; go 1.20 github.com/aws/smithy-go github.com/aws/smithy-go/auth @@ -171,6 +252,9 @@ github.com/c9s/goprocinfo/linux # github.com/cenkalti/backoff v2.2.1+incompatible ## explicit github.com/cenkalti/backoff +# github.com/cenkalti/backoff/v4 v4.3.0 +## explicit; go 1.18 +github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -194,6 +278,9 @@ github.com/containers/image/v5/docker/reference # github.com/containers/storage v1.53.0 ## explicit; go 1.20 github.com/containers/storage/pkg/regexp +# github.com/coreos/go-semver v0.3.1 +## explicit; go 1.8 +github.com/coreos/go-semver/semver # github.com/coreos/go-systemd/v22 v22.5.0 ## explicit; go 1.12 github.com/coreos/go-systemd/v22/dbus @@ -204,7 +291,7 @@ github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/emicklei/go-restful/v3 v3.11.3 +# github.com/emicklei/go-restful/v3 v3.12.0 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log @@ -242,7 +329,7 @@ github.com/go-git/go-billy/v5/helper/polyfill github.com/go-git/go-billy/v5/memfs github.com/go-git/go-billy/v5/osfs github.com/go-git/go-billy/v5/util -# github.com/go-git/go-git/v5 v5.11.0 +# github.com/go-git/go-git/v5 v5.12.0 ## explicit; go 1.19 github.com/go-git/go-git/v5 github.com/go-git/go-git/v5/config @@ -297,8 +384,8 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-openapi/analysis v0.22.3 -## explicit; go 1.19 +# github.com/go-openapi/analysis v0.23.0 +## explicit; go 1.20 github.com/go-openapi/analysis github.com/go-openapi/analysis/internal/debug github.com/go-openapi/analysis/internal/flatten/normalize @@ -306,21 +393,21 @@ github.com/go-openapi/analysis/internal/flatten/operations github.com/go-openapi/analysis/internal/flatten/replace github.com/go-openapi/analysis/internal/flatten/schutils github.com/go-openapi/analysis/internal/flatten/sortref -# github.com/go-openapi/errors v0.21.1 -## explicit; go 1.19 +# github.com/go-openapi/errors v0.22.0 +## explicit; go 1.20 github.com/go-openapi/errors -# github.com/go-openapi/jsonpointer v0.20.3 -## explicit; go 1.19 +# github.com/go-openapi/jsonpointer v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.5 -## explicit; go 1.19 +# github.com/go-openapi/jsonreference v0.21.0 +## explicit; go 1.20 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/loads v0.21.6 -## explicit; go 1.19 +# github.com/go-openapi/loads v0.22.0 +## explicit; go 1.20 github.com/go-openapi/loads -# github.com/go-openapi/runtime v0.27.2 -## explicit; go 1.19 +# github.com/go-openapi/runtime v0.28.0 +## explicit; go 1.20 github.com/go-openapi/runtime github.com/go-openapi/runtime/client github.com/go-openapi/runtime/logger @@ -330,17 +417,17 @@ github.com/go-openapi/runtime/middleware/header github.com/go-openapi/runtime/middleware/untyped github.com/go-openapi/runtime/security github.com/go-openapi/runtime/yamlpc -# github.com/go-openapi/spec v0.20.15 -## explicit; go 1.19 +# github.com/go-openapi/spec v0.21.0 +## explicit; go 1.20 github.com/go-openapi/spec -# github.com/go-openapi/strfmt v0.22.2 -## explicit; go 1.19 +# github.com/go-openapi/strfmt v0.23.0 +## explicit; go 1.20 github.com/go-openapi/strfmt -# github.com/go-openapi/swag v0.22.10 -## explicit; go 1.19 +# github.com/go-openapi/swag v0.23.0 +## explicit; go 1.20 github.com/go-openapi/swag -# github.com/go-openapi/validate v0.23.2 -## explicit; go 1.19 +# github.com/go-openapi/validate v0.24.0 +## explicit; go 1.20 github.com/go-openapi/validate # github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 ## explicit; go 1.13 @@ -358,7 +445,7 @@ github.com/gobwas/glob/syntax/ast github.com/gobwas/glob/syntax/lexer github.com/gobwas/glob/util/runes github.com/gobwas/glob/util/strings -# github.com/gocql/gocql v1.6.0 => github.com/scylladb/gocql v1.12.0 +# github.com/gocql/gocql v1.6.0 => github.com/scylladb/gocql v1.13.0 ## explicit; go 1.13 github.com/gocql/gocql github.com/gocql/gocql/internal/lru @@ -374,6 +461,9 @@ github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys +# github.com/golang-jwt/jwt/v4 v4.5.0 +## explicit; go 1.16 +github.com/golang-jwt/jwt/v4 # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru @@ -409,7 +499,7 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource -# github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 +# github.com/google/pprof v0.0.0-20240402174815-29b9bb013b0f ## explicit; go 1.19 github.com/google/pprof/profile # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 @@ -418,6 +508,13 @@ github.com/google/shlex # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid +# github.com/googleapis/gax-go/v2 v2.12.3 +## explicit; go 1.19 +github.com/googleapis/gax-go/v2 +github.com/googleapis/gax-go/v2/apierror +github.com/googleapis/gax-go/v2/apierror/internal/proto +github.com/googleapis/gax-go/v2/callctx +github.com/googleapis/gax-go/v2/internal # github.com/gorilla/websocket v1.5.1 ## explicit; go 1.20 github.com/gorilla/websocket @@ -459,6 +556,9 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go +# github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 +## explicit +github.com/jzelinskie/whirlpool # github.com/kevinburke/ssh_config v1.2.0 ## explicit github.com/kevinburke/ssh_config @@ -480,12 +580,18 @@ github.com/mailru/easyjson/jwriter # github.com/mattn/go-colorable v0.1.13 ## explicit; go 1.15 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.17 +# github.com/mattn/go-ieproxy v0.0.11 +## explicit; go 1.17 +github.com/mattn/go-ieproxy +# github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.15 ## explicit; go 1.9 github.com/mattn/go-runewidth +# github.com/mitchellh/go-homedir v1.1.0 +## explicit +github.com/mitchellh/go-homedir # github.com/mitchellh/go-wordwrap v1.0.1 ## explicit; go 1.14 github.com/mitchellh/go-wordwrap @@ -515,10 +621,13 @@ github.com/munnerz/goautoneg # github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f ## explicit github.com/mxk/go-flowrate/flowrate +# github.com/ncw/swift v1.0.53 +## explicit +github.com/ncw/swift # github.com/oklog/ulid v1.3.1 ## explicit github.com/oklog/ulid -# github.com/onsi/ginkgo/v2 v2.16.0 +# github.com/onsi/ginkgo/v2 v2.17.1 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config @@ -540,7 +649,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.31.1 +# github.com/onsi/gomega v1.32.0 ## explicit; go 1.20 github.com/onsi/gomega github.com/onsi/gomega/format @@ -579,10 +688,10 @@ github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.6.0 +# github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.50.0 +# github.com/prometheus/common v0.52.2 ## explicit; go 1.21 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -592,35 +701,128 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/rclone/rclone v1.66.0 => github.com/scylladb/rclone v1.54.1-0.20240312172628-afe1fd2aa65e +## explicit; go 1.21 +github.com/rclone/rclone/backend/azureblob +github.com/rclone/rclone/backend/crypt +github.com/rclone/rclone/backend/crypt/pkcs7 +github.com/rclone/rclone/backend/googlecloudstorage +github.com/rclone/rclone/backend/local +github.com/rclone/rclone/backend/s3 +github.com/rclone/rclone/cmd/serve/httplib +github.com/rclone/rclone/cmd/serve/httplib/serve/data +github.com/rclone/rclone/fs +github.com/rclone/rclone/fs/accounting +github.com/rclone/rclone/fs/asyncreader +github.com/rclone/rclone/fs/cache +github.com/rclone/rclone/fs/config +github.com/rclone/rclone/fs/config/configmap +github.com/rclone/rclone/fs/config/configstruct +github.com/rclone/rclone/fs/config/obscure +github.com/rclone/rclone/fs/dirtree +github.com/rclone/rclone/fs/driveletter +github.com/rclone/rclone/fs/filter +github.com/rclone/rclone/fs/fserrors +github.com/rclone/rclone/fs/fshttp +github.com/rclone/rclone/fs/fspath +github.com/rclone/rclone/fs/hash +github.com/rclone/rclone/fs/list +github.com/rclone/rclone/fs/march +github.com/rclone/rclone/fs/object +github.com/rclone/rclone/fs/operations +github.com/rclone/rclone/fs/rc +github.com/rclone/rclone/fs/rc/jobs +github.com/rclone/rclone/fs/sync +github.com/rclone/rclone/fs/walk +github.com/rclone/rclone/lib/atexit +github.com/rclone/rclone/lib/bucket +github.com/rclone/rclone/lib/cache +github.com/rclone/rclone/lib/encoder +github.com/rclone/rclone/lib/env +github.com/rclone/rclone/lib/errors +github.com/rclone/rclone/lib/file +github.com/rclone/rclone/lib/mmap +github.com/rclone/rclone/lib/oauthutil +github.com/rclone/rclone/lib/pacer +github.com/rclone/rclone/lib/pool +github.com/rclone/rclone/lib/random +github.com/rclone/rclone/lib/readers +github.com/rclone/rclone/lib/rest +github.com/rclone/rclone/lib/structs +github.com/rclone/rclone/lib/terminal +# github.com/rfjakob/eme v1.1.2 +## explicit; go 1.16 +github.com/rfjakob/eme # github.com/rivo/uniseg v0.4.7 ## explicit; go 1.18 github.com/rivo/uniseg +# github.com/robfig/cron/v3 v3.0.1 +## explicit; go 1.12 +github.com/robfig/cron/v3 # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 +# github.com/scylladb/go-log v0.0.7 +## explicit; go 1.15 +github.com/scylladb/go-log # github.com/scylladb/go-reflectx v1.0.1 ## explicit github.com/scylladb/go-reflectx # github.com/scylladb/go-set v1.0.2 ## explicit +github.com/scylladb/go-set/b16set github.com/scylladb/go-set/strset # github.com/scylladb/gocqlx/v2 v2.8.0 ## explicit; go 1.17 github.com/scylladb/gocqlx/v2 github.com/scylladb/gocqlx/v2/qb github.com/scylladb/gocqlx/v2/table -# github.com/scylladb/scylla-manager/v3 v3.2.6 +# github.com/scylladb/scylla-manager/v3 v3.2.7 ## explicit; go 1.21 +github.com/scylladb/scylla-manager/v3/pkg +github.com/scylladb/scylla-manager/v3/pkg/auth +github.com/scylladb/scylla-manager/v3/pkg/dht github.com/scylladb/scylla-manager/v3/pkg/managerclient github.com/scylladb/scylla-manager/v3/pkg/managerclient/table +github.com/scylladb/scylla-manager/v3/pkg/metrics +github.com/scylladb/scylla-manager/v3/pkg/rclone +github.com/scylladb/scylla-manager/v3/pkg/rclone/backend/localdir +github.com/scylladb/scylla-manager/v3/pkg/rclone/operations +github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver +github.com/scylladb/scylla-manager/v3/pkg/rclone/rcserver/internal +github.com/scylladb/scylla-manager/v3/pkg/scheduler +github.com/scylladb/scylla-manager/v3/pkg/scheduler/trigger +github.com/scylladb/scylla-manager/v3/pkg/schema/table +github.com/scylladb/scylla-manager/v3/pkg/scyllaclient github.com/scylladb/scylla-manager/v3/pkg/service +github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec +github.com/scylladb/scylla-manager/v3/pkg/service/scheduler +github.com/scylladb/scylla-manager/v3/pkg/store +github.com/scylladb/scylla-manager/v3/pkg/util/duration +github.com/scylladb/scylla-manager/v3/pkg/util/httpx github.com/scylladb/scylla-manager/v3/pkg/util/inexlist +github.com/scylladb/scylla-manager/v3/pkg/util/inexlist/ksfilter +github.com/scylladb/scylla-manager/v3/pkg/util/jsonutil +github.com/scylladb/scylla-manager/v3/pkg/util/parallel +github.com/scylladb/scylla-manager/v3/pkg/util/pathparser github.com/scylladb/scylla-manager/v3/pkg/util/pointer +github.com/scylladb/scylla-manager/v3/pkg/util/prom +github.com/scylladb/scylla-manager/v3/pkg/util/retry +github.com/scylladb/scylla-manager/v3/pkg/util/slice github.com/scylladb/scylla-manager/v3/pkg/util/timeutc github.com/scylladb/scylla-manager/v3/pkg/util/uuid github.com/scylladb/scylla-manager/v3/pkg/util/version +github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client +github.com/scylladb/scylla-manager/v3/swagger/gen/agent/client/operations +github.com/scylladb/scylla-manager/v3/swagger/gen/agent/models github.com/scylladb/scylla-manager/v3/swagger/gen/scylla-manager/client/operations github.com/scylladb/scylla-manager/v3/swagger/gen/scylla-manager/models +github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client +github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/client/operations +github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v1/models +github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client +github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/client/config +github.com/scylladb/scylla-manager/v3/swagger/gen/scylla/v2/models # github.com/scylladb/scylladb-swagger-go-client v0.2.0 ## explicit; go 1.20 github.com/scylladb/scylladb-swagger-go-client/scylladb/gen/v1/client @@ -633,8 +835,8 @@ github.com/scylladb/scylladb-swagger-go-client/scylladb/gen/v2/models ## explicit github.com/scylladb/termtables github.com/scylladb/termtables/term -# github.com/sergi/go-diff v1.3.1 -## explicit; go 1.12 +# github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 +## explicit; go 1.13 github.com/sergi/go-diff/diffmatchpatch # github.com/shurcooL/githubv4 v0.0.0-20240120211514-18a1ae0e79dc ## explicit; go 1.19 @@ -644,15 +846,25 @@ github.com/shurcooL/githubv4 github.com/shurcooL/graphql github.com/shurcooL/graphql/ident github.com/shurcooL/graphql/internal/jsonutil -# github.com/skeema/knownhosts v1.2.1 +# github.com/sirupsen/logrus v1.9.3 +## explicit; go 1.13 +github.com/sirupsen/logrus +# github.com/skeema/knownhosts v1.2.2 ## explicit; go 1.17 github.com/skeema/knownhosts +# github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 +## explicit +github.com/skratchdot/open-golang/open +# github.com/smartystreets/goconvey v1.8.1 +## explicit; go 1.18 # github.com/spf13/cobra v1.8.0 ## explicit; go 1.15 github.com/spf13/cobra # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag +# github.com/stretchr/objx v0.5.2 +## explicit; go 1.20 # github.com/xanzy/ssh-agent v0.3.3 ## explicit; go 1.16 github.com/xanzy/ssh-agent @@ -668,6 +880,24 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore +# go.opencensus.io v0.24.0 +## explicit; go 1.13 +go.opencensus.io +go.opencensus.io/internal +go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricdata +go.opencensus.io/metric/metricproducer +go.opencensus.io/plugin/ochttp +go.opencensus.io/plugin/ochttp/propagation/b3 +go.opencensus.io/resource +go.opencensus.io/stats +go.opencensus.io/stats/internal +go.opencensus.io/stats/view +go.opencensus.io/tag +go.opencensus.io/trace +go.opencensus.io/trace/internal +go.opencensus.io/trace/propagation +go.opencensus.io/trace/tracestate # go.opentelemetry.io/otel v1.24.0 ## explicit; go 1.20 go.opentelemetry.io/otel @@ -690,8 +920,8 @@ go.opentelemetry.io/otel/metric/embedded ## explicit; go 1.20 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded -# go.starlark.net v0.0.0-20240123142251-f86470692795 -## explicit; go 1.18 +# go.starlark.net v0.0.0-20240329153429-e6e8e7ce1b7a +## explicit; go 1.19 go.starlark.net/internal/compile go.starlark.net/internal/spell go.starlark.net/resolve @@ -714,9 +944,21 @@ go.uber.org/config/internal/unreachable # go.uber.org/multierr v1.11.0 ## explicit; go 1.19 go.uber.org/multierr +# go.uber.org/zap v1.27.0 +## explicit; go 1.19 +go.uber.org/zap +go.uber.org/zap/buffer +go.uber.org/zap/internal +go.uber.org/zap/internal/bufferpool +go.uber.org/zap/internal/color +go.uber.org/zap/internal/exit +go.uber.org/zap/internal/pool +go.uber.org/zap/internal/stacktrace +go.uber.org/zap/zapcore # golang.org/x/crypto v0.21.0 ## explicit; go 1.18 golang.org/x/crypto/argon2 +golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 @@ -726,11 +968,18 @@ golang.org/x/crypto/curve25519/internal/field golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 +golang.org/x/crypto/nacl/secretbox +golang.org/x/crypto/pbkdf2 +golang.org/x/crypto/pkcs12 +golang.org/x/crypto/pkcs12/internal/rc2 +golang.org/x/crypto/salsa20/salsa +golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts +golang.org/x/crypto/ssh/terminal # golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 ## explicit; go 1.11 golang.org/x/lint @@ -740,24 +989,34 @@ golang.org/x/lint/golint golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.22.0 +# golang.org/x/net v0.23.0 ## explicit; go 1.18 golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/html/charset golang.org/x/net/http/httpguts +golang.org/x/net/http/httpproxy golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy +golang.org/x/net/publicsuffix golang.org/x/net/trace # golang.org/x/oauth2 v0.18.0 ## explicit; go 1.18 golang.org/x/oauth2 +golang.org/x/oauth2/authhandler +golang.org/x/oauth2/google +golang.org/x/oauth2/google/externalaccount +golang.org/x/oauth2/google/internal/externalaccountauthorizeduser +golang.org/x/oauth2/google/internal/impersonate +golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal +golang.org/x/oauth2/jws +golang.org/x/oauth2/jwt # golang.org/x/sync v0.6.0 ## explicit; go 1.18 golang.org/x/sync/errgroup @@ -768,6 +1027,7 @@ golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows +golang.org/x/sys/windows/registry # golang.org/x/term v0.18.0 ## explicit; go 1.18 golang.org/x/term @@ -823,19 +1083,39 @@ golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions +# google.golang.org/api v0.172.0 => github.com/scylladb/google-api-go-client v0.34.1-patched +## explicit; go 1.11 +google.golang.org/api/googleapi +google.golang.org/api/googleapi/transport +google.golang.org/api/internal +google.golang.org/api/internal/gensupport +google.golang.org/api/internal/impersonate +google.golang.org/api/internal/third_party/uritemplates +google.golang.org/api/option +google.golang.org/api/option/internaloption +google.golang.org/api/storage/v1 +google.golang.org/api/transport/cert +google.golang.org/api/transport/http +google.golang.org/api/transport/http/internal/propagation +google.golang.org/api/transport/internal/dca # google.golang.org/appengine v1.6.8 ## explicit; go 1.11 +google.golang.org/appengine google.golang.org/appengine/internal +google.golang.org/appengine/internal/app_identity google.golang.org/appengine/internal/base google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log +google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240304212257-790db918fca8 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda ## explicit; go 1.19 +google.golang.org/genproto/googleapis/rpc/code +google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.62.1 +# google.golang.org/grpc v1.63.0 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes @@ -941,7 +1221,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.29.2 +# k8s.io/api v0.29.3 ## explicit; go 1.21 k8s.io/api/admission/v1 k8s.io/api/admissionregistration/v1 @@ -996,12 +1276,12 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.29.2 +# k8s.io/apiextensions-apiserver v0.29.3 ## explicit; go 1.21 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1 -# k8s.io/apimachinery v0.29.2 +# k8s.io/apimachinery v0.29.3 ## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -1060,18 +1340,18 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.29.2 +# k8s.io/apiserver v0.29.3 ## explicit; go 1.21 k8s.io/apiserver/pkg/server/dynamiccertificates k8s.io/apiserver/pkg/storage/names k8s.io/apiserver/pkg/util/feature -# k8s.io/cli-runtime v0.29.2 +# k8s.io/cli-runtime v0.29.3 ## explicit; go 1.21 k8s.io/cli-runtime/pkg/genericclioptions k8s.io/cli-runtime/pkg/genericiooptions k8s.io/cli-runtime/pkg/printers k8s.io/cli-runtime/pkg/resource -# k8s.io/client-go v0.29.2 +# k8s.io/client-go v0.29.3 ## explicit; go 1.21 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1390,7 +1670,7 @@ k8s.io/client-go/util/jsonpath k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.29.2 +# k8s.io/code-generator v0.29.3 ## explicit; go 1.21 k8s.io/code-generator k8s.io/code-generator/cmd/applyconfiguration-gen @@ -1428,7 +1708,7 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.29.2 +# k8s.io/component-base v0.29.3 ## explicit; go 1.21 k8s.io/component-base/cli/flag k8s.io/component-base/featuregate @@ -1438,12 +1718,12 @@ k8s.io/component-base/metrics/legacyregistry k8s.io/component-base/metrics/prometheus/feature k8s.io/component-base/metrics/prometheusextension k8s.io/component-base/version -# k8s.io/component-helpers v0.29.2 +# k8s.io/component-helpers v0.29.3 ## explicit; go 1.21 k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/volume -# k8s.io/cri-api v0.29.2 +# k8s.io/cri-api v0.29.3 ## explicit; go 1.21 k8s.io/cri-api/pkg/apis/runtime/v1 # k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 @@ -1474,7 +1754,7 @@ k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity k8s.io/klog/v2/internal/sloghandler -# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 +# k8s.io/kube-openapi v0.0.0-20240403164606-bc84c2ddaf99 ## explicit; go 1.20 k8s.io/kube-openapi/cmd/openapi-gen/args k8s.io/kube-openapi/pkg/cached @@ -1489,15 +1769,15 @@ k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kubectl v0.29.2 +# k8s.io/kubectl v0.29.3 ## explicit; go 1.21 k8s.io/kubectl/pkg/util/interrupt k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term -# k8s.io/kubelet v0.29.2 +# k8s.io/kubelet v0.29.3 ## explicit; go 1.21 k8s.io/kubelet/pkg/apis/podresources/v1 -# k8s.io/utils v0.0.0-20240102154912-e7106e64919e +# k8s.io/utils v0.0.0-20240310230437-4693a0247e57 ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock @@ -1625,4 +1905,6 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/gocql/gocql => github.com/scylladb/gocql v1.12.0 +# github.com/gocql/gocql => github.com/scylladb/gocql v1.13.0 +# github.com/rclone/rclone => github.com/scylladb/rclone v1.54.1-0.20240312172628-afe1fd2aa65e +# google.golang.org/api => github.com/scylladb/google-api-go-client v0.34.1-patched